text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""this is python equivalent of ./Wrapping/Tcl/vtktesting/backdrop.tcl
This script is used while running python tests translated from Tcl."""
import vtk
basePlane = None
baseMapper = None
base = None
backPlane = None
backMapper = None
back = None
leftPlane = None
leftMapper = None
left = None
def BuildBackdrop (minX, maxX, minY, maxY, minZ, maxZ, thickness):
global basePlane
global baseMapper
global base
global backPlane
global backMapper
global back
global left
global leftPlane
global leftMapper
if not basePlane:
basePlane = vtk.vtkCubeSource()
basePlane.SetCenter( (maxX + minX)/2.0, minY, (maxZ + minZ)/2.0)
basePlane.SetXLength(maxX-minX)
basePlane.SetYLength(thickness)
basePlane.SetZLength(maxZ - minZ)
if not baseMapper:
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInputConnection(basePlane.GetOutputPort())
if not base:
base = vtk.vtkActor()
base.SetMapper(baseMapper)
if not backPlane:
backPlane = vtk.vtkCubeSource()
backPlane.SetCenter( (maxX + minX)/2.0, (maxY + minY)/2.0, minZ)
backPlane.SetXLength(maxX-minX)
backPlane.SetYLength(maxY - minY)
backPlane.SetZLength(thickness)
if not backMapper:
backMapper = vtk.vtkPolyDataMapper()
backMapper.SetInputConnection(backPlane.GetOutputPort())
if not back:
back = vtk.vtkActor()
back.SetMapper(backMapper)
if not leftPlane:
leftPlane = vtk.vtkCubeSource()
leftPlane.SetCenter( minX, (maxY+minY)/2.0, (maxZ+minZ)/2.0)
leftPlane.SetXLength(thickness)
leftPlane.SetYLength(maxY-minY)
leftPlane.SetZLength(maxZ-minZ)
if not leftMapper:
leftMapper = vtk.vtkPolyDataMapper()
leftMapper.SetInputConnection(leftPlane.GetOutputPort())
if not left:
left = vtk.vtkActor()
left.SetMapper(leftMapper)
return [base, back, left]
|
cjh1/vtkmodular
|
Utilities/vtkTclTest2Py/backdrop.py
|
Python
|
bsd-3-clause
| 1,927
|
[
"VTK"
] |
51b2d29d9f00c0302f17a3d0243ff28fabd13e4588ba0088fd3c1cb16d1d26b0
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import os
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/ekboundaries.py", gpu=True, n_int_cycles=50)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
def test_file_generation(self):
# test .checkpoint files exist
for basename in ["pos_dens_0.vtk", "pos_flux_0.vtk", "ekv_0.vtk",
"neg_dens_0.vtk", "neg_flux_0.vtk", "ekb_0.vtk"]:
filepath = os.path.join("ek", basename)
self.assertTrue(
os.path.isfile(filepath),
filepath + " not created")
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/scripts/samples/test_ekboundaries.py
|
Python
|
gpl-3.0
| 1,413
|
[
"ESPResSo",
"VTK"
] |
caf9d404bd088dc0f2bc77bb02292dbfabd9842ecf3e8cfb961b06b63109e038
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import pytest
import psi4
import forte
def test_spinorbital_mp2():
import forte
import forte.utils
from math import isclose
import numpy as np
geom = """0 1
H
F 1 1.0
symmetry c1
"""
basis = '6-31g'
Escf, wfn = forte.utils.psi4_scf(geom, basis, 'rhf')
forte_objects = forte.utils.prepare_forte_objects(wfn, mo_spaces={'RESTRICTED_DOCC': [5], 'ACTIVE': [0]})
mo_space_info = forte_objects['mo_space_info']
core = mo_space_info.corr_absolute_mo('RESTRICTED_DOCC')
virt = mo_space_info.corr_absolute_mo('RESTRICTED_UOCC')
ints = forte_objects['ints']
H = {
'cc': forte.spinorbital_oei(ints, core, core),
'cccc': forte.spinorbital_tei(ints, core, core, core, core),
'ccvv': forte.spinorbital_tei(ints, core, core, virt, virt)
}
Eref_test = ints.nuclear_repulsion_energy()
Eref_test += np.einsum('mm->', H['cc'])
Eref_test += 0.5 * np.einsum('mnmn->', H['cccc'])
assert math.isclose(Eref_test, -99.97763667846159)
Fc = forte.spinorbital_fock(ints, core, core, core).diagonal()
Fv = forte.spinorbital_fock(ints, virt, virt, core).diagonal()
ncoreso = 2 * len(core)
nvirtso = 2 * len(virt)
d = np.zeros((ncoreso, ncoreso, nvirtso, nvirtso))
for i in range(ncoreso):
for j in range(ncoreso):
for a in range(nvirtso):
for b in range(nvirtso):
d[i][j][a][b] = 1. / (Fc[i] + Fc[j] - Fv[a] - Fv[b])
T = {'ccvv': np.einsum("ijab,ijab->ijab", d, H['ccvv'])}
E = 0.25 * np.einsum("ijab,ijab->", T['ccvv'], H['ccvv'])
assert math.isclose(E, -0.13305567213152, abs_tol=1e-09)
psi4.core.clean()
if __name__ == "__main__":
test_spinorbital_mp2()
|
evangelistalab/forte
|
tests/pytest/helpers/test_spinorbital.py
|
Python
|
lgpl-3.0
| 1,809
|
[
"Psi4"
] |
709df33ca405dfc9a00b7d876cc1c36eb916efc2d045960dec31f5384b4b6437
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from urllib import quote
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885499.309086
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:39 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/ajax/bouquets.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class bouquets(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(bouquets, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<script>
$(function() { InitAccordeon("#accordion");});
</script>
<div id="accordion">
''')
for bouquet in VFFSL(SL,"bouquets",True): # generated from line 7, col 1
write(u'''<h1>
<div>
<img onclick="window.open(\'/web/services.m3u?bRef=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bouquet",True)[0]) # u'$quote($bouquet[0])' on line 10, col 51
if _v is not None: write(_filter(_v, rawExpr=u'$quote($bouquet[0])')) # from line 10, col 51.
write(u'''\',\'_blank\');return false;" style="margin-left:30px;" src="/public/images/ico_stream.png" title="''')
_v = VFFSL(SL,"tstrings",True)['download_playlist'] # u"$tstrings['download_playlist']" on line 10, col 166
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['download_playlist']")) # from line 10, col 166.
write(u''' ''')
_v = VFFSL(SL,"bouquet",True)[1] # u'$bouquet[1]' on line 10, col 197
if _v is not None: write(_filter(_v, rawExpr=u'$bouquet[1]')) # from line 10, col 197.
write(u'''" border="0" alt="''')
_v = VFFSL(SL,"tstrings",True)['playlist'] # u"$tstrings['playlist']" on line 10, col 226
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['playlist']")) # from line 10, col 226.
write(u'''">
<a style="display: inline-block;" href="#" id="ajax/channels?id=''')
_v = VFFSL(SL,"quote",False)(VFFSL(SL,"bouquet",True)[0]) # u'$quote($bouquet[0])' on line 11, col 65
if _v is not None: write(_filter(_v, rawExpr=u'$quote($bouquet[0])')) # from line 11, col 65.
write(u'''&stype=''')
_v = VFFSL(SL,"stype",True) # u'$stype' on line 11, col 91
if _v is not None: write(_filter(_v, rawExpr=u'$stype')) # from line 11, col 91.
write(u'''">''')
_v = VFFSL(SL,"bouquet",True)[1] # u'$bouquet[1]' on line 11, col 99
if _v is not None: write(_filter(_v, rawExpr=u'$bouquet[1]')) # from line 11, col 99.
write(u'''</a>
</div></h1><div>''')
_v = VFFSL(SL,"tstrings",True)["loading"] # u'$tstrings["loading"]' on line 12, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$tstrings["loading"]')) # from line 12, col 17.
write(u''' ...</div>
''')
write(u'''</div>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_bouquets= 'respond'
## END CLASS DEFINITION
if not hasattr(bouquets, '_initCheetahAttributes'):
templateAPIClass = getattr(bouquets, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(bouquets)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=bouquets()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/ajax/bouquets.py
|
Python
|
gpl-2.0
| 6,693
|
[
"VisIt"
] |
9be85ed11b8023d7a84ff9700fd90b43a39d475fa66bd7825cd964e4401a817b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Utilities Trapped Services
-------------------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.comm import ServiceAccessPoint, ApplicationServiceElement, bind
from ..trapped_classes import TrappedServiceAccessPoint, \
TrappedApplicationServiceElement
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
sap = None
ase = None
@bacpypes_debugging
class EchoAccessPoint(ServiceAccessPoint):
def sap_indication(self, pdu):
if _debug: EchoAccessPoint._debug("sap_indication %r", pdu)
self.sap_response(pdu)
def sap_confirmation(self, pdu):
if _debug: EchoAccessPoint._debug("sap_confirmation %r", pdu)
pass
class TrappedEchoAccessPoint(TrappedServiceAccessPoint, EchoAccessPoint):
pass
@bacpypes_debugging
class EchoServiceElement(ApplicationServiceElement):
def indication(self, pdu):
if _debug: EchoServiceElement._debug("indication %r", pdu)
self.response(pdu)
def confirmation(self, pdu):
if _debug: EchoServiceElement._debug("confirmation %r", pdu)
pass
class TrappedEchoServiceElement(TrappedApplicationServiceElement, EchoServiceElement):
pass
@bacpypes_debugging
def setup_module():
if _debug: setup_module._debug("setup_module")
global sap, ase
# verify the echo access point is trapped correctly
assert TrappedEchoAccessPoint.__mro__ == (
TrappedEchoAccessPoint,
TrappedServiceAccessPoint,
EchoAccessPoint,
ServiceAccessPoint,
object,
)
# create an access point
sap = TrappedEchoAccessPoint()
# verify the echo service element is trapped correctly
assert TrappedEchoServiceElement.__mro__ == (
TrappedEchoServiceElement,
TrappedApplicationServiceElement,
EchoServiceElement,
ApplicationServiceElement,
object,
)
# create a service element
ase = TrappedEchoServiceElement()
# bind them together
bind(ase, sap)
@bacpypes_debugging
def teardown_module():
if _debug: setup_module._debug("teardown_module")
global sap, ase
# toss the objects into the garbage
sap = None
ase = None
@bacpypes_debugging
class TestApplicationService(unittest.TestCase):
def test_sap_request(self):
if _debug: TestApplicationService._debug("test_sap_request")
global sap, ase
# make a pdu object
pdu = object()
# service access point is going to request something
sap.sap_request(pdu)
# make sure the request was sent and received
assert sap.sap_request_sent is pdu
assert ase.indication_received is pdu
# make sure the echo response was sent and received
assert ase.response_sent is pdu
assert sap.sap_confirmation_received is pdu
def test_ase_request(self):
if _debug: TestApplicationService._debug("test_ase_request")
global sap, ase
# make a pdu object
pdu = object()
# service element is going to request something
ase.request(pdu)
# make sure the request was sent and received
assert ase.request_sent is pdu
assert sap.sap_indication_received is pdu
# make sure the echo response was sent and received
assert sap.sap_response_sent is pdu
assert ase.confirmation_received is pdu
|
JoelBender/bacpypes
|
tests/test_utilities/test_service_access_point.py
|
Python
|
mit
| 3,492
|
[
"ASE"
] |
f4dc15f5aafc77c06cf2138eacb861a2cc8bf680bd7cfff074be368a0e28b2f5
|
import cStringIO
import numpy as np
import theano.tensor as T
from theano.tests import disturb_mem
from theano.tests.record import Record, RecordMode
import warnings
from pylearn2.costs.cost import Cost, SumOfCosts, DefaultDataSpecsMixin
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.model import Model
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace
from pylearn2.termination_criteria import EpochCounter
from pylearn2.testing.cost import CallbackCost, SumOfParams
from pylearn2.testing.datasets import ArangeDataset
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import (ExponentialDecay,
PolyakAveraging,
LinearDecay,
LinearDecayOverEpoch,
MonitorBasedLRAdjuster,
SGD,
AnnealedLearningRate)
from pylearn2.training_algorithms.learning_rule import (Momentum,
MomentumAdjustor)
from pylearn2.utils.iteration import _iteration_schemes
from pylearn2.utils import safe_izip, safe_union, sharedX
from pylearn2.utils.exc import reraise_as
class SupervisedDummyCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
(X, Y) = data
return T.square(model(X) - Y).mean()
class DummyCost(DefaultDataSpecsMixin, Cost):
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
X = data
return T.square(model(X) - X).mean()
class DummyModel(Model):
def __init__(self, shapes, lr_scalers=None):
super(DummyModel, self).__init__()
self._params = [sharedX(np.random.random(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
self.lr_scalers = lr_scalers
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
if self.lr_scalers:
return dict(zip(self._params, self.lr_scalers))
else:
return dict()
class SoftmaxModel(Model):
"""A dummy model used for testing.
Important properties:
has a parameter (P) for SGD to act on
has a get_output_space method, so it can tell the
algorithm what kind of space the targets for supervised
learning live in
has a get_input_space method, so it can tell the
algorithm what kind of space the features live in
"""
def __init__(self, dim):
super(SoftmaxModel, self).__init__()
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_input_space(self):
return VectorSpace(self.dim)
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 2
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X*self.P)
class TopoSoftmaxModel(Model):
"""A dummy model used for testing.
Like SoftmaxModel but its features have 2 topological
dimensions. This tests that the training algorithm
will provide topological data correctly.
"""
def __init__(self, rows, cols, channels):
super(TopoSoftmaxModel, self).__init__()
dim = rows * cols * channels
self.input_space = Conv2DSpace((rows, cols), channels)
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 4
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X.reshape((X.shape[0], self.dim)) * self.P)
def test_sgd_unspec_num_mon_batch():
# tests that if you don't specify a number of
# monitoring batches, SGD configures the monitor
# to run on all the data
m = 25
visited = [False] * m
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
X[:, 0] = np.arange(m)
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=None,
monitoring_dataset=dataset,
termination_criterion=None,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
monitor = Monitor.get_monitor(model)
X = T.matrix()
def tracker(*data):
X, = data
assert X.shape[1] == 1
for i in xrange(X.shape[0]):
visited[int(X[i, 0])] = True
monitor.add_channel(name='tracker',
ipt=X,
val=0.,
prereqs=[tracker],
data_specs=(model.get_input_space(),
model.get_input_source()))
monitor()
if False in visited:
print visited
assert False
def test_sgd_sup():
# tests that we can run the sgd algorithm
# on a supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
# Including a monitoring dataset lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X, y=Y)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_unsup():
# tests that we can run the sgd algorithm
# on an supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# Including a monitoring dataset lets us test that
# the monitor works with unsupervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def get_topological_dataset(rng, rows, cols, channels, m):
X = rng.randn(m, rows, cols, channels)
dim = rows * cols * channels
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, y=Y)
def test_linear_decay():
# tests that the class LinearDecay in sgd.py
# gets the learning rate properly over the training batches
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at batch 'start'
# by an amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until
# we reach batch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
class LearningRateTracker(object):
def __init__(self):
self.lr_rates = []
def __call__(self, algorithm):
self.lr_rates.append(algorithm.learning_rate.get_value())
dim = 3
dataset_size = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(dataset_size, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecay(start=start, saturate=saturate,
decay_factor=decay_factor)
# including this extension for saving learning rate value after each batch
lr_tracker = LearningRateTracker()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=[linear_decay, lr_tracker],
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
num_batches = np.ceil(dataset_size / float(batch_size)).astype(int)
for i in xrange(epoch_num * num_batches):
actual = lr_tracker.lr_rates[i]
batches_seen = i + 1
if batches_seen < start:
expected = learning_rate
elif batches_seen >= saturate:
expected = learning_rate*decay_factor
elif (start <= batches_seen) and (batches_seen < saturate):
expected = (decay_factor * learning_rate +
(saturate - batches_seen) * step)
if not np.allclose(actual, expected):
raise AssertionError("After %d batches, expected learning rate to "
"be %f, but it is %f." %
(batches_seen, expected, actual))
def test_annealed_learning_rate():
# tests that the class AnnealedLearingRate in sgd.py
# gets the learning rate properly over the training batches
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at batch 'anneal_start'
# After batch anneal_start, the learning rate should be
# learning_rate * anneal_start/number of batches seen
class LearningRateTracker(object):
def __init__(self):
self.lr_rates = []
def __call__(self, algorithm):
self.lr_rates.append(algorithm.learning_rate.get_value())
dim = 3
dataset_size = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(dataset_size, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
anneal_start = 5
annealed_rate = AnnealedLearningRate(anneal_start=anneal_start)
# including this extension for saving learning rate value after each batch
lr_tracker = LearningRateTracker()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=[annealed_rate, lr_tracker],
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
num_batches = np.ceil(dataset_size / float(batch_size)).astype(int)
for i in xrange(epoch_num * num_batches):
actual = lr_tracker.lr_rates[i]
batches_seen = i + 1
expected = learning_rate*min(1, float(anneal_start)/batches_seen)
if not np.allclose(actual, expected):
raise AssertionError("After %d batches, expected learning rate to "
"be %f, but it is %f." %
(batches_seen, expected, actual))
def test_linear_decay_over_epoch():
# tests that the class LinearDecayOverEpoch in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at epoch 'start' by an
# amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until we
# reach epoch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecayOverEpoch(start=start,
saturate=saturate,
decay_factor=decay_factor)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[linear_decay])
train.main_loop()
lr = model.monitor.channels['learning_rate']
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
for i in xrange(epoch_num + 1):
actual = lr.val_record[i]
if i < start:
expected = learning_rate
elif i >= saturate:
expected = learning_rate*decay_factor
elif (start <= i) and (i < saturate):
expected = decay_factor * learning_rate + (saturate - i) * step
if not np.allclose(actual, expected):
raise AssertionError("After %d epochs, expected learning rate to "
"be %f, but it is %f." %
(i, expected, actual))
def test_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values. It
# runs 2 loops. Each loop evaluates one of the if clauses when checking
# the observation channels. Otherwise, longer training epochs are needed
# to observe both if and elif cases.
high_trigger = 1.0
shrink_amt = 0.99
low_trigger = 0.99
grow_amt = 1.01
min_lr = 1e-7
max_lr = 1.
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 5
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
for i in xrange(2):
if i == 1:
high_trigger = 0.99
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster(high_trigger=high_trigger,
shrink_amt=shrink_amt,
low_trigger=low_trigger,
grow_amt=grow_amt,
min_lr=min_lr,
max_lr=max_lr)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
v = model.monitor.channels['objective'].val_record
lr = model.monitor.channels['learning_rate'].val_record
lr_monitor = learning_rate
for i in xrange(2, epoch_num + 1):
if v[i-1] > high_trigger * v[i-2]:
lr_monitor *= shrink_amt
elif v[i-1] > low_trigger * v[i-2]:
lr_monitor *= grow_amt
lr_monitor = max(min_lr, lr_monitor)
lr_monitor = min(max_lr, lr_monitor)
assert np.allclose(lr_monitor, lr[i])
def test_bad_monitoring_input_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py avoids wrong
# settings of channel_name or dataset_name in the constructor.
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 2
dataset = DenseDesignMatrix(X=X)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
# testing for bad dataset_name input
dummy = 'void'
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dummy)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes dataset_name "
"that is invalid "))
# testing for bad channel_name input
monitor_lr2 = MonitorBasedLRAdjuster(channel_name=dummy)
model2 = SoftmaxModel(dim)
train2 = Train(dataset,
model2,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr2])
try:
train2.main_loop()
except ValueError as e:
pass
except Exception:
reraise_as(AssertionError("MonitorBasedLRAdjuster takes channel_name "
"that is invalid "))
return
def testing_multiple_datasets_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py does not take
# multiple datasets in which multiple channels ending in '_objective'
# exist.
# This case happens when the user has not specified either channel_name or
# dataset_name in the constructor
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset={'train': monitoring_train,
'test': monitoring_test},
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster()
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError:
return
raise AssertionError("MonitorBasedLRAdjuster takes multiple dataset names "
"in which more than one \"objective\" channel exist "
"and the user has not specified either channel_name "
"or database_name in the constructor to "
"disambiguate.")
def testing_multiple_datasets_with_specified_dataset_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py can properly use
# the spcified dataset_name in the constructor when multiple datasets
# exist.
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
monitoring_dataset = {'train': monitoring_train, 'test': monitoring_test}
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
dataset_name = monitoring_dataset.keys()[0]
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dataset_name)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
def test_sgd_topo():
# tests that we can run the sgd algorithm
# on data with topology
# does not test for correctness at all, just
# that the algorithm runs without dying
rows = 3
cols = 4
channels = 2
dim = rows * cols * channels
m = 10
rng = np.random.RandomState([25, 9, 2012])
dataset = get_topological_dataset(rng, rows, cols, channels, m)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
m = 15
monitoring_dataset = get_topological_dataset(rng, rows, cols, channels, m)
model = TopoSoftmaxModel(rows, cols, channels)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_no_mon():
# tests that we can run the sgd algorithm
# wihout a monitoring dataset
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_reject_mon_batch_without_mon():
# tests that setting up the sgd algorithm
# without a monitoring dataset
# but with monitoring_batches specified is an error
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
try:
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=None,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
except ValueError:
return
assert False
def test_sgd_sequential():
# tests that requesting train_iteration_mode = 'sequential'
# works
dim = 1
batch_size = 3
m = 5 * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [False] * m
def visit(X):
assert X.shape[1] == 1
assert np.all(X[1:] == X[0:-1]+1)
start = int(X[0, 0])
if start > 0:
assert visited[start - 1]
for i in xrange(batch_size):
assert not visited[start+i]
visited[start+i] = 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode='sequential',
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
algorithm.train(dataset)
assert all(visited)
def test_determinism():
# Verifies that running SGD twice results in the same examples getting
# visited in the same order
for mode in _iteration_schemes:
dim = 1
batch_size = 3
num_batches = 5
m = num_batches * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [[-1] * m]
def visit(X):
mx = max(visited[0])
counter = mx + 1
for i in X[:, 0]:
i = int(i)
assert visited[0][i] == -1
visited[0][i] = counter
counter += 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some
# point
termination_criterion = EpochCounter(5)
def run_algorithm():
unsupported_modes = ['random_slice', 'random_uniform']
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode=mode,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
raised = False
try:
algorithm.train(dataset)
except ValueError:
print mode
assert mode in unsupported_modes
raised = True
if mode in unsupported_modes:
assert raised
return True
return False
if run_algorithm():
continue
visited.insert(0, [-1] * m)
del model.monitor
run_algorithm()
for v in visited:
assert len(v) == m
for elem in range(m):
assert elem in v
assert len(visited) == 2
print visited[0]
print visited[1]
assert np.all(np.asarray(visited[0]) == np.asarray(visited[1]))
def test_determinism_2():
"""
A more aggressive determinism test. Tests that apply nodes are all passed
inputs with the same md5sums, apply nodes are run in same order, etc. Uses
disturb_mem to try to cause dictionaries to iterate in different orders,
etc.
"""
def run_sgd(mode):
# Must be seeded the same both times run_sgd is called
disturb_mem.disturb_mem()
rng = np.random.RandomState([2012, 11, 27])
batch_size = 5
train_batches = 3
valid_batches = 4
num_features = 2
# Synthesize dataset with a linear decision boundary
w = rng.randn(num_features)
def make_dataset(num_batches):
disturb_mem.disturb_mem()
m = num_batches*batch_size
X = rng.randn(m, num_features)
y = np.zeros((m, 1))
y[:, 0] = np.dot(X, w) > 0.
rval = DenseDesignMatrix(X=X, y=y)
rval.yaml_src = "" # suppress no yaml_src warning
X = rval.get_batch_design(batch_size)
assert X.shape == (batch_size, num_features)
return rval
train = make_dataset(train_batches)
valid = make_dataset(valid_batches)
num_chunks = 10
chunk_width = 2
class ManyParamsModel(Model):
"""
Make a model with lots of parameters, so that there are many
opportunities for their updates to get accidentally re-ordered
non-deterministically. This makes non-determinism bugs manifest
more frequently.
"""
def __init__(self):
super(ManyParamsModel, self).__init__()
self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i
in xrange(num_chunks)]
disturb_mem.disturb_mem()
self.W2 = [sharedX(rng.randn(chunk_width))
for i in xrange(num_chunks)]
self._params = safe_union(self.W1, self.W2)
self.input_space = VectorSpace(num_features)
self.output_space = VectorSpace(1)
disturb_mem.disturb_mem()
model = ManyParamsModel()
disturb_mem.disturb_mem()
class LotsOfSummingCost(Cost):
"""
Make a cost whose gradient on the parameters involves summing many
terms together, so that T.grad is more likely to sum things in a
random order.
"""
supervised = True
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data)
X, Y = data
disturb_mem.disturb_mem()
def mlp_pred(non_linearity):
Z = [T.dot(X, W) for W in model.W1]
H = map(non_linearity, Z)
Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]
pred = sum(Z)
return pred
nonlinearity_predictions = map(mlp_pred,
[T.nnet.sigmoid,
T.nnet.softplus,
T.sqr,
T.sin])
pred = sum(nonlinearity_predictions)
disturb_mem.disturb_mem()
return abs(pred-Y[:, 0]).sum()
def get_data_specs(self, model):
data = CompositeSpace((model.get_input_space(),
model.get_output_space()))
source = (model.get_input_source(), model.get_target_source())
return (data, source)
cost = LotsOfSummingCost()
disturb_mem.disturb_mem()
algorithm = SGD(cost=cost,
batch_size=batch_size,
learning_rule=Momentum(.5),
learning_rate=1e-3,
monitoring_dataset={'train': train, 'valid': valid},
update_callbacks=[ExponentialDecay(decay_factor=2.,
min_lr=.0001)],
termination_criterion=EpochCounter(max_epochs=5))
disturb_mem.disturb_mem()
train_object = Train(dataset=train,
model=model,
algorithm=algorithm,
extensions=[PolyakAveraging(start=0),
MomentumAdjustor(final_momentum=.9,
start=1,
saturate=5), ],
save_freq=0)
disturb_mem.disturb_mem()
train_object.main_loop()
output = cStringIO.StringIO()
record = Record(file_object=output, replay=False)
record_mode = RecordMode(record)
run_sgd(record_mode)
output = cStringIO.StringIO(output.getvalue())
playback = Record(file_object=output, replay=True)
playback_mode = RecordMode(playback)
run_sgd(playback_mode)
def test_lr_scalers():
"""
Tests that SGD respects Model.get_lr_scalers
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
learning_rate = .001
class ModelWithScalers(Model):
def __init__(self):
super(ModelWithScalers, self).__init__()
self._params = [sharedX(np.zeros(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
return dict(zip(self._params, scales))
model = ModelWithScalers()
dataset = ArangeDataset(1)
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(.0),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
manual = [param - learning_rate * scale for param, scale in
zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale
for param, scale
in zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_lr_scalers_momentum():
"""
Tests that SGD respects Model.get_lr_scalers when using
momentum.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
learning_rate = .001
momentum = 0.5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
learning_rule=Momentum(momentum),
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
inc = [-learning_rate * scale for param, scale in zip(manual, scales)]
manual = [param + i for param, i in zip(manual, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale + i * momentum
for param, scale, i in
zip(manual, scales, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_batch_size_specialization():
# Tests that using a batch size of 1 for training and a batch size
# other than 1 for monitoring does not result in a crash.
# This catches a bug reported in the [email protected]
# e-mail "[pylearn-dev] monitor assertion error: channel_X.type != X.type"
# The training data was specialized to a row matrix (theano tensor with
# first dim broadcastable) and the monitor ended up with expressions
# mixing the specialized and non-specialized version of the expression.
m = 2
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=1,
monitoring_batches=1,
monitoring_dataset=dataset,
termination_criterion=EpochCounter(max_epochs=1),
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_uneven_batch_size():
"""
Testing extensively sgd parametrisations for datasets with a number of
examples not divisible by batch size
The tested settings are:
- Model with force_batch_size = True or False
- Training dataset with number of examples divisible or not by batch size
- Monitoring dataset with number of examples divisible or not by batch size
- Even or uneven iterators
2 tests out of 10 should raise ValueError
"""
learning_rate = 1e-3
batch_size = 5
dim = 3
m1, m2, m3 = 10, 15, 22
rng = np.random.RandomState([25, 9, 2012])
dataset1 = DenseDesignMatrix(X=rng.randn(m1, dim))
dataset2 = DenseDesignMatrix(X=rng.randn(m2, dim))
dataset3 = DenseDesignMatrix(X=rng.randn(m3, dim))
def train_with_monitoring_datasets(train_dataset,
monitoring_datasets,
model_force_batch_size,
train_iteration_mode,
monitor_iteration_mode):
model = SoftmaxModel(dim)
if model_force_batch_size:
model.force_batch_size = model_force_batch_size
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
train_iteration_mode=train_iteration_mode,
monitor_iteration_mode=monitor_iteration_mode,
monitoring_dataset=monitoring_datasets,
termination_criterion=EpochCounter(2))
train = Train(train_dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
no_monitoring_datasets = None
even_monitoring_datasets = {'valid': dataset2}
uneven_monitoring_datasets = {'valid': dataset2, 'test': dataset3}
# without monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven training datasets
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='even_sequential',
monitor_iteration_mode='sequential')
# with even monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='even_sequential')
if __name__ == '__main__':
test_monitor_based_lr()
|
kastnerkyle/pylearn2
|
pylearn2/training_algorithms/tests/test_sgd.py
|
Python
|
bsd-3-clause
| 48,393
|
[
"VisIt"
] |
9739c7a2e9153bbee54fdc2cebefc9b798d5838b188313e8888faa0595ca20f8
|
# -*- coding: utf-8 -*-
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
import fauxfactory
import pytest
from riggerlib import recursive_update
from textwrap import dedent
from widgetastic.utils import partial_match
from widgetastic_patternfly import CheckableBootstrapTreeview as Check_tree
from cfme import test_requirements
from cfme.automate.explorer.domain import DomainCollection
from cfme.cloud.instance import Instance
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.utils import error
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.conf import credentials
from cfme.utils.rest import assert_response
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.update import update
from cfme.utils.wait import wait_for, RefreshTimer
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
test_requirements.provision, pytest.mark.tier(2),
pytest.mark.provider(
[CloudProvider], required_fields=[['provisioning', 'image']], scope="function"
)
]
@pytest.fixture()
def vm_name():
return random_vm_name(context='prov')
@pytest.fixture()
def testing_instance(request, setup_provider, provider, provisioning, vm_name, tag):
""" Fixture to prepare instance parameters for provisioning
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
image, vm_name, provider.key))
instance = Instance.factory(vm_name, provider, image)
inst_args = dict()
# Base instance info
inst_args['request'] = {
'email': '[email protected]',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
}
# TODO Move this into helpers on the provider classes
recursive_update(inst_args, {'catalog': {'vm_name': vm_name}})
# Check whether auto-selection of environment is passed
auto = False # By default provisioning will be manual
try:
parameter = request.param
if parameter == 'tag':
inst_args['purpose'] = {
'apply_tags': Check_tree.CheckNode(
['{} *'.format(tag.category.display_name), tag.display_name])
}
else:
auto = parameter
except AttributeError:
# in case nothing was passed just skip
pass
recursive_update(inst_args, {
'environment': {
'availability_zone': provisioning.get('availability_zone', None),
'security_groups': [provisioning.get('security_group', None)],
'cloud_network': provisioning.get('cloud_network', None),
'cloud_subnet': provisioning.get('cloud_subnet', None),
'resource_groups': provisioning.get('resource_group', None)
},
'properties': {
'instance_type': partial_match(provisioning.get('instance_type', None)),
'guest_keypair': provisioning.get('guest_keypair', None)}
})
# GCE specific
if provider.one_of(GCEProvider):
recursive_update(inst_args, {
'properties': {
'boot_disk_size': provisioning['boot_disk_size'],
'is_preemptible': True}
})
# Azure specific
if provider.one_of(AzureProvider):
# Azure uses different provisioning keys for some reason
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
recursive_update(inst_args, {
'customize': {
'admin_username': vm_user,
'root_password': vm_password}})
if auto:
inst_args.update({'environment': {'automatic_placement': auto}})
yield instance, inst_args, image
logger.info('Fixture cleanup, deleting test instance: %s', instance.name)
try:
instance.delete_from_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.fixture(scope='function')
def provisioned_instance(provider, testing_instance, appliance):
""" Checks provisioning status for instance """
instance, inst_args, image = testing_instance
instance.create(**inst_args)
logger.info('Waiting for cfme provision request for vm %s', instance.name)
request_description = 'Provision from [{}] to [{}]'.format(image, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise e
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
instance.wait_to_appear(timeout=800)
provider.refresh_provider_relationships()
logger.info("Refreshing provider relationships and power states")
refresh_timer = RefreshTimer(time_for_refresh=300)
wait_for(provider.is_refreshed,
[refresh_timer],
message="is_refreshed",
num_sec=1000,
delay=60,
handle_exception=True)
return instance
@pytest.mark.parametrize('testing_instance', [True, False], ids=["Auto", "Manual"], indirect=True)
def test_provision_from_template(provider, provisioned_instance):
""" Tests instance provision from template
Metadata:
test_flag: provision
"""
assert provisioned_instance.does_vm_exist_on_provider(), "Instance wasn't provisioned"
@pytest.mark.uncollectif(lambda provider: not provider.one_of(GCEProvider))
def test_gce_preemptible_provision(provider, testing_instance, soft_assert):
instance, inst_args, image = testing_instance
instance.create(**inst_args)
instance.wait_to_appear(timeout=800)
provider.refresh_provider_relationships()
logger.info("Refreshing provider relationships and power states")
refresh_timer = RefreshTimer(time_for_refresh=300)
wait_for(provider.is_refreshed,
[refresh_timer],
message="is_refreshed",
num_sec=1000,
delay=60,
handle_exception=True)
view = navigate_to(instance, "Details")
preemptible = view.entities.summary("Properties").get_text_of("Preemptible")
soft_assert('Yes' in preemptible, "GCE Instance isn't Preemptible")
soft_assert(instance.does_vm_exist_on_provider(), "Instance wasn't provisioned")
def test_provision_from_template_using_rest(
appliance, request, setup_provider, provider, vm_name, provisioning):
""" Tests provisioning from a template using the REST API.
Metadata:
test_flag: provision, rest
"""
if 'flavors' not in appliance.rest_api.collections.all_names:
pytest.skip("This appliance does not have `flavors` collection.")
image_guid = appliance.rest_api.collections.templates.find_by(
name=provisioning['image']['name'])[0].guid
if ':' in provisioning['instance_type'] and provider.one_of(EC2Provider, GCEProvider):
instance_type = provisioning['instance_type'].split(':')[0].strip()
elif provider.type == 'azure':
instance_type = provisioning['instance_type'].lower()
else:
instance_type = provisioning['instance_type']
flavors = appliance.rest_api.collections.flavors.find_by(name=instance_type)
assert flavors
# TODO: Multi search when it works
for flavor in flavors:
if flavor.ems.name == provider.name:
flavor_id = flavor.id
break
else:
pytest.fail(
"Cannot find flavour {} for provider {}".format(instance_type, provider.name))
provision_data = {
"version": "1.1",
"template_fields": {
"guid": image_guid,
},
"vm_fields": {
"vm_name": vm_name,
"instance_type": flavor_id,
"request_type": "template",
},
"requester": {
"user_name": "admin",
"owner_first_name": "Administrator",
"owner_last_name": "Administratorovich",
"owner_email": "[email protected]",
"auto_approve": True,
},
"tags": {
},
"additional_values": {
},
"ems_custom_attributes": {
},
"miq_custom_attributes": {
}
}
if not isinstance(provider, AzureProvider):
recursive_update(provision_data, {
'vm_fields': {
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair']}})
if isinstance(provider, GCEProvider):
recursive_update(provision_data, {
'vm_fields': {
'cloud_network': provisioning['cloud_network'],
'boot_disk_size': provisioning['boot_disk_size'].replace(' ', '.'),
'zone': provisioning['availability_zone'],
'region': provider.data["region"]}})
elif isinstance(provider, AzureProvider):
try:
template = provider.data.templates.small_template
vm_user = credentials[template.creds].username
vm_password = credentials[template.creds].password
except AttributeError:
pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
# mapping: product/dialogs/miq_dialogs/miq_provision_azure_dialogs_template.yaml
recursive_update(provision_data, {
'vm_fields': {
'root_username': vm_user,
'root_password': vm_password}})
request.addfinalizer(
lambda: provider.mgmt.delete_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name) else None)
response = appliance.rest_api.collections.provision_requests.action.create(**provision_data)[0]
assert_response(appliance)
provision_request = appliance.collections.requests.instantiate(description=response.description)
provision_request.wait_for_request()
assert provision_request.is_succeeded(), ("Provisioning failed with the message {}".format(
provision_request.rest.message))
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name),
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
@pytest.mark.uncollectif(lambda provider: not provider.one_of(EC2Provider, OpenStackProvider))
def test_manual_placement_using_rest(
appliance, request, setup_provider, provider, vm_name, provisioning):
""" Tests provisioning cloud instance with manual placement using the REST API.
Metadata:
test_flag: provision, rest
"""
image_guid = appliance.rest_api.collections.templates.get(
name=provisioning['image']['name']).guid
provider_rest = appliance.rest_api.collections.providers.get(name=provider.name)
security_group_name = provisioning['security_group'].split(':')[0].strip()
if ':' in provisioning['instance_type'] and provider.one_of(EC2Provider):
instance_type = provisioning['instance_type'].split(':')[0].strip()
else:
instance_type = provisioning['instance_type']
flavors = appliance.rest_api.collections.flavors.find_by(name=instance_type)
assert flavors
flavor = None
for flavor in flavors:
if flavor.ems_id == provider_rest.id:
break
else:
pytest.fail("Cannot find flavour.")
provider_data = appliance.rest_api.get(provider_rest._href +
'?attributes=cloud_networks,cloud_subnets,security_groups,cloud_tenants')
# find out cloud network
assert provider_data['cloud_networks']
cloud_network_name = provisioning.get('cloud_network').strip()
if provider.one_of(EC2Provider):
cloud_network_name = cloud_network_name.split()[0]
cloud_network = None
for cloud_network in provider_data['cloud_networks']:
# If name of cloud network is available, find match.
# Otherwise just "enabled" is enough.
if cloud_network_name and cloud_network_name != cloud_network['name']:
continue
if cloud_network['enabled']:
break
else:
pytest.fail("Cannot find cloud network.")
# find out security group
assert provider_data['security_groups']
security_group = None
for group in provider_data['security_groups']:
if (group.get('cloud_network_id') == cloud_network['id'] and
group['name'] == security_group_name):
security_group = group
break
# OpenStack doesn't seem to have the "cloud_network_id" attribute.
# At least try to find the group where the group name matches.
elif not security_group and group['name'] == security_group_name:
security_group = group
if not security_group:
pytest.fail("Cannot find security group.")
# find out cloud subnet
assert provider_data['cloud_subnets']
cloud_subnet = None
for cloud_subnet in provider_data['cloud_subnets']:
if (cloud_subnet.get('cloud_network_id') == cloud_network['id'] and
cloud_subnet['status'] in ('available', 'active')):
break
else:
pytest.fail("Cannot find cloud subnet.")
def _find_availability_zone_id():
subnet_data = appliance.rest_api.get(provider_rest._href + '?attributes=cloud_subnets')
for subnet in subnet_data['cloud_subnets']:
if subnet['id'] == cloud_subnet['id'] and 'availability_zone_id' in subnet:
return subnet['availability_zone_id']
return False
# find out availability zone
availability_zone_id = None
if provisioning.get('availability_zone'):
availability_zone_entities = appliance.rest_api.collections.availability_zones.find_by(
name=provisioning['availability_zone'])
if availability_zone_entities and availability_zone_entities[0].ems_id == flavor.ems_id:
availability_zone_id = availability_zone_entities[0].id
if not availability_zone_id and 'availability_zone_id' in cloud_subnet:
availability_zone_id = cloud_subnet['availability_zone_id']
if not availability_zone_id:
availability_zone_id, _ = wait_for(
_find_availability_zone_id, num_sec=100, delay=5, message="availability_zone present")
# find out cloud tenant
cloud_tenant_id = None
tenant_name = provisioning.get('cloud_tenant')
if tenant_name:
for tenant in provider_data.get('cloud_tenants', []):
if (tenant['name'] == tenant_name and
tenant['enabled'] and
tenant['ems_id'] == flavor.ems_id):
cloud_tenant_id = tenant['id']
provision_data = {
"version": "1.1",
"template_fields": {
"guid": image_guid
},
"vm_fields": {
"vm_name": vm_name,
"instance_type": flavor.id,
"request_type": "template",
"placement_auto": False,
"cloud_network": cloud_network['id'],
"cloud_subnet": cloud_subnet['id'],
"placement_availability_zone": availability_zone_id,
"security_groups": security_group['id'],
"monitoring": "basic"
},
"requester": {
"user_name": "admin",
"owner_first_name": "Administrator",
"owner_last_name": "Administratorovich",
"owner_email": "[email protected]",
"auto_approve": True,
},
"tags": {
},
"additional_values": {
},
"ems_custom_attributes": {
},
"miq_custom_attributes": {
}
}
if cloud_tenant_id:
provision_data['vm_fields']['cloud_tenant'] = cloud_tenant_id
request.addfinalizer(
lambda: provider.mgmt.delete_vm(vm_name) if provider.mgmt.does_vm_exist(vm_name) else None)
response = appliance.rest_api.collections.provision_requests.action.create(**provision_data)[0]
assert_response(appliance)
provision_request = appliance.collections.requests.instantiate(description=response.description)
provision_request.wait_for_request()
assert provision_request.is_succeeded(), ("Provisioning failed with the message {}".format(
provision_request.rest.message))
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name),
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
VOLUME_METHOD = ("""
prov = $evm.root["miq_provision"]
prov.set_option(
:clone_options,
{{ :block_device_mapping => [{}] }})
""")
ONE_FIELD = """{{:volume_id => "{}", :device_name => "{}"}}"""
@pytest.fixture(scope="module")
def domain(request, appliance):
domain = DomainCollection(appliance).create(name=fauxfactory.gen_alphanumeric(), enabled=True)
request.addfinalizer(domain.delete_if_exists)
return domain
@pytest.fixture(scope="module")
def original_request_class(appliance):
return DomainCollection(appliance).instantiate(name='ManageIQ')\
.namespaces.instantiate(name='Cloud')\
.namespaces.instantiate(name='VM')\
.namespaces.instantiate(name='Provisioning')\
.namespaces.instantiate(name='StateMachines')\
.classes.instantiate(name='Methods')
@pytest.fixture(scope="module")
def modified_request_class(request, domain, original_request_class):
with error.handler("error: Error during 'Automate Class copy'"):
# methods of this class might have been copied by other fixture, so this error can occur
original_request_class.copy_to(domain)
klass = domain\
.namespaces.instantiate(name='Cloud')\
.namespaces.instantiate(name='VM')\
.namespaces.instantiate(name='Provisioning')\
.namespaces.instantiate(name='StateMachines')\
.classes.instantiate(name='Methods')
request.addfinalizer(klass.delete_if_exists)
return klass
@pytest.fixture(scope="module")
def copy_domains(original_request_class, domain):
methods = ['openstack_PreProvision', 'openstack_CustomizeRequest']
for method in methods:
original_request_class.methods.instantiate(name=method).copy_to(domain)
# Not collected for EC2 in generate_tests above
@pytest.mark.parametrize("disks", [1, 2])
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_from_template_with_attached_disks(request, testing_instance, provider, disks,
soft_assert, domain, modified_request_class,
copy_domains, provisioning):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
"""
instance, inst_args, image = testing_instance
# Modify availiability_zone for Azure provider
if provider.one_of(AzureProvider):
recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}})
device_name = "/dev/sd{}"
device_mapping = []
with provider.mgmt.with_volumes(1, n=disks) as volumes:
for i, volume in enumerate(volumes):
device_mapping.append((volume, device_name.format(chr(ord("b") + i))))
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_PreProvision")
with update(method):
disk_mapping = []
for mapping in device_mapping:
disk_mapping.append(ONE_FIELD.format(*mapping))
method.script = VOLUME_METHOD.format(", ".join(disk_mapping))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance.create(**inst_args)
for volume_id in volumes:
soft_assert(vm_name in provider.mgmt.volume_attachments(volume_id))
for volume, device in device_mapping:
soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == device)
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_with_boot_volume(request, testing_instance, provider, soft_assert,
modified_request_class, appliance, copy_domains):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
"""
instance, inst_args, image = testing_instance
with provider.mgmt.with_volume(1, imageRef=provider.mgmt.get_template_id(image)) as volume:
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}]
}}
)
'''.format(volume))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
instance.create(**inst_args)
request_description = 'Provision from [{}] to [{}]'.format(image,
instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise e
msg = "Provisioning failed with the message {}".format(
provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
soft_assert(instance.name in provider.mgmt.volume_attachments(volume))
soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda")
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.uncollectif(lambda provider: not provider.one_of(OpenStackProvider))
def test_provision_with_additional_volume(request, testing_instance, provider, small_template,
soft_assert, modified_request_class, appliance,
copy_domains):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
"""
instance, inst_args, image = testing_instance
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
try:
image_id = provider.mgmt.get_template_id(small_template.name)
except KeyError:
pytest.skip("No small_template in provider adta!")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}}]
}}
)
'''.format(image_id))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance.create(**inst_args)
request_description = 'Provision from [{}] to [{}]'.format(small_template.name, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise e
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
prov_instance = provider.mgmt._find_instance_by_name(instance.name)
try:
assert hasattr(prov_instance, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider.mgmt.volume_exists(volume_id)
volume = provider.mgmt.get_volume(volume_id)
assert volume.size == 3
finally:
instance.delete_from_provider()
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
if "volume_id" in locals(): # To handle the case of 1st or 2nd assert
if provider.mgmt.volume_exists(volume_id):
provider.mgmt.delete_volume(volume_id)
@pytest.mark.parametrize('testing_instance', ['tag'], indirect=True)
def test_cloud_provision_with_tag(provisioned_instance, tag):
""" Tests tagging instance using provisioning dialogs.
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit instance page, it should display the selected tags
Metadata:
test_flag: provision
"""
assert provisioned_instance.does_vm_exist_on_provider(), "Instance wasn't provisioned"
tags = provisioned_instance.get_tags()
assert any(
instance_tag.category.display_name == tag.category.display_name and
instance_tag.display_name == tag.display_name for instance_tag in tags), (
"{}: {} not in ({})".format(tag.category.display_name, tag.display_name, str(tags)))
|
akarol/cfme_tests
|
cfme/tests/cloud/test_provisioning.py
|
Python
|
gpl-2.0
| 27,785
|
[
"VisIt"
] |
6b2e8e4f18f97b5b6da2ced4418ec7917ad4673df9063c8ebf0a14ac093ed578
|
# sybase/base.py
# Copyright (C) 2010-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
def visit_TINYINT(self, type_, **kw):
return "TINYINT"
def visit_IMAGE(self, type_, **kw):
return "IMAGE"
def visit_BIT(self, type_, **kw):
return "BIT"
def visit_MONEY(self, type_, **kw):
return "MONEY"
def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
limit = select._limit
if limit:
# if select._limit == 1:
# s += "FIRST "
# else:
# s += "TOP %s " % (select._limit,)
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
if not limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select, **kw):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
supports_simple_order_by_label = False
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement),
default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
# is this necessary
# if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
bdh1011/cupeye
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/sybase/base.py
|
Python
|
bsd-3-clause
| 28,812
|
[
"ASE"
] |
6096c8dde9a2062f5379ba15a27c704359e7071b93d99e22f4d715a89aa74aba
|
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
"""
Guidelines for writing new hacking checks
- Use only for Cinder specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to
cinder/tests/test_hacking.py
"""
# NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects
UNDERSCORE_IMPORT_FILES = ['./cinder/objects/__init__.py']
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"(.)*_\(\s*('|\")")
vi_header_re = re.compile(r"^#\s+vim?:.+")
underscore_import_check = re.compile(r"(.)*i18n\s+import\s+_(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
no_audit_log = re.compile(r"(.)*LOG\.audit(.)*")
no_print_statements = re.compile(r"\s*print\s*\(.+\).*")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
# NOTE(jsbryant): When other oslo libraries switch over non-namespaced
# imports, we will need to add them to the regex below.
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](concurrency|db"
"|config|utils|serialization|log)")
no_contextlib_nested = re.compile(r"\s*with (contextlib\.)?nested\(")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(exception|error)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
# Need to disable pylint check here as it doesn't catch CHECK_DESC
# being defined in the subclasses.
message = message or self.CHECK_DESC # pylint: disable=E1101
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif(translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
def check_assert_called_once(logical_line, filename):
msg = ("N327: assert_called_once is a no-op. please use assert_called_"
"once_with to test with explicit parameters or an assertEqual with"
" call_count.")
if 'cinder/tests/functional' or 'cinder/tests/unit' in filename:
pos = logical_line.find('.assert_called_once(')
if pos != -1:
yield (pos, msg)
def validate_log_translations(logical_line, filename):
# Translations are not required in the test directory.
# This will not catch all instances of violations, just direct
# misuse of the form LOG.info('Message').
if "cinder/tests" in filename:
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = ("N329: LOG.exception and LOG.error messages require "
"translations `_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
def check_oslo_namespace_imports(logical_line):
if re.match(oslo_namespace_imports, logical_line):
msg = ("N333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def check_datetime_now(logical_line, noqa):
if noqa:
return
msg = ("C301: Found datetime.now(). "
"Please use timeutils.utcnow() from oslo_utils.")
if 'datetime.now' in logical_line:
yield(0, msg)
def check_unicode_usage(logical_line, noqa):
if noqa:
return
msg = "C302: Found unicode() call. Please use six.text_type()."
if 'unicode(' in logical_line:
yield(0, msg)
def check_no_print_statements(logical_line, filename, noqa):
# The files in cinder/cmd do need to use 'print()' so
# we don't need to check those files. Other exemptions
# should use '# noqa' to avoid failing here.
if "cinder/cmd" not in filename and not noqa:
if re.match(no_print_statements, logical_line):
msg = ("C303: print() should not be used. "
"Please use LOG.[info|error|warning|exception|debug]. "
"If print() must be used, use '# noqa' to skip this check.")
yield(0, msg)
def check_no_log_audit(logical_line):
"""Ensure that we are not using LOG.audit messages
Plans are in place going forward as discussed in the following
spec (https://review.openstack.org/#/c/91446/) to take out
LOG.audit messages. Given that audit was a concept invented
for OpenStack we can enforce not using it.
"""
if no_audit_log.match(logical_line):
yield(0, "C304: Found LOG.audit. Use LOG.info instead.")
def check_no_contextlib_nested(logical_line):
msg = ("C305: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested "
"for more information.")
if no_contextlib_nested.match(logical_line):
yield(0, msg)
def check_timeutils_strtime(logical_line):
msg = ("C306: Found timeutils.strtime(). "
"Please use datetime.datetime.isoformat() or datetime.strftime()")
if 'timeutils.strtime' in logical_line:
yield(0, msg)
def no_log_warn(logical_line):
msg = "C307: LOG.warn is deprecated, please use LOG.warning!"
if "LOG.warn(" in logical_line:
yield (0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor "
"with a sequence of key-value pairs.")
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def check_timeutils_isotime(logical_line):
msg = ("C308: Found timeutils.isotime(). "
"Please use datetime.datetime.isoformat()")
if 'timeutils.isotime' in logical_line:
yield(0, msg)
def factory(register):
register(no_vi_headers)
register(no_translate_debug_logs)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(CheckForStrUnicodeExc)
register(check_assert_called_once)
register(check_oslo_namespace_imports)
register(check_datetime_now)
register(check_timeutils_strtime)
register(check_timeutils_isotime)
register(validate_log_translations)
register(check_unicode_usage)
register(check_no_print_statements)
register(check_no_log_audit)
register(check_no_contextlib_nested)
register(no_log_warn)
register(dict_constructor_with_list_copy)
|
julianwang/cinder
|
cinder/hacking/checks.py
|
Python
|
apache-2.0
| 12,426
|
[
"VisIt"
] |
4618cfd94478b6ebbc3cb34218eb8ee8bc425395158e226a6c1d7db77732d7c4
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
======================================
fMRI: surface smooth - FreeSurfer, SPM
======================================
This tutorial illustrates how to perform surface-based smoothing of
cortical data using FreeSurfer_ and then perform firstlevel model and
contrast estimation using SPM_. A surface-based second level glm
illustrates the use of spherical registration and freesurfer's glm
functions.
Preparing environment
=====================
Step 0
------
In order to run this tutorial you need to have SPM_ and FreeSurfer_
tools installed and accessible from matlab/command line. Check by
calling mri_info from the command line.
Step 1
------
Link the *fsaverage* directory for your freesurfer distribution. To do
this type:
::
cd nipype-tutorial/fsdata
ln -s $FREESURFER_HOME/subjects/fsaverage
cd ..
Defining the workflow
=====================
"""
from __future__ import print_function
from builtins import range
import os # system functions
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
import nipype.interfaces.freesurfer as fs # freesurfer
import nipype.interfaces.io as nio # i/o routines
import nipype.interfaces.matlab as mlab # how to run matlab
import nipype.interfaces.spm as spm # spm
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
"""
Preliminaries
-------------
Set any package specific configuration.
Setting the subjects directory and the appropriate matlab command to use. if
you want to use a different spm version/path, it should also be entered here.
These are currently being set at the class level, so every node will inherit
these settings. However, these can also be changed or set for an individual
node.
"""
# Tell freesurfer what subjects directory to use
subjects_dir = os.path.abspath('fsdata')
fs.FSCommand.set_default_subjects_dir(subjects_dir)
# Set the way matlab should be called
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
# If SPM is not in your MATLAB path you should add it here
mlab.MatlabCommand.set_default_paths('/software/spm8')
"""
Setup preprocessing workflow
----------------------------
"""
preproc = pe.Workflow(name='preproc')
"""
Use :class:`nipype.interfaces.spm.Realign` for motion correction
and register all images to the mean image.
"""
realign = pe.Node(interface=spm.Realign(), name="realign")
realign.inputs.register_to_mean = True
"""
Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = pe.Node(interface=ra.ArtifactDetect(), name="art")
art.inputs.use_differences = [True, False]
art.inputs.use_norm = True
art.inputs.norm_threshold = 1
art.inputs.zintensity_threshold = 3
art.inputs.mask_type = 'file'
art.inputs.parameter_source = 'SPM'
"""
Use :class:`nipype.interfaces.freesurfer.BBRegister` to coregister the mean
functional image generated by realign to the subjects' surfaces.
"""
surfregister = pe.Node(interface=fs.BBRegister(), name='surfregister')
surfregister.inputs.init = 'fsl'
surfregister.inputs.contrast_type = 't2'
"""
Use :class:`nipype.interfaces.io.FreeSurferSource` to retrieve various image
files that are automatically generated by the recon-all process.
"""
FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource')
"""
Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert the
brainmask generated by freesurfer into the realigned functional space.
"""
ApplyVolTransform = pe.Node(interface=fs.ApplyVolTransform(),
name='applyreg')
ApplyVolTransform.inputs.inverse = True
"""
Use :class:`nipype.interfaces.freesurfer.Binarize` to extract a binary brain
mask.
"""
Threshold = pe.Node(interface=fs.Binarize(), name='threshold')
Threshold.inputs.min = 10
Threshold.inputs.out_type = 'nii'
"""
Two different types of functional data smoothing are performed in this
workflow. The volume smoothing option performs a standard SPM smoothin. using
:class:`nipype.interfaces.spm.Smooth`. In addition, we use a smoothing routine
from freesurfer (:class:`nipype.interfaces.freesurfer.Binarize`) to project the
functional data from the volume to the subjects' surface, smooth it on the
surface and fit it back into the volume forming the cortical ribbon. The
projection uses the average value along a "cortical column". In addition to the
surface smoothing, the rest of the volume is smoothed with a 3d gaussian kernel.
.. note::
It is very important to note that the projection to the surface takes a 3d
manifold to a 2d manifold. Hence the reverse projection, simply fills the
thickness of cortex with the smoothed data. The smoothing is not performed
in a depth specific manner. The output of this branch should only be used
for surface-based analysis and visualization.
"""
volsmooth = pe.Node(interface=spm.Smooth(), name="volsmooth")
surfsmooth = pe.MapNode(interface=fs.Smooth(proj_frac_avg=(0, 1, 0.1)), name="surfsmooth",
iterfield=['in_file'])
"""
We connect up the different nodes to implement the preprocessing workflow.
"""
preproc.connect([(realign, surfregister, [('mean_image', 'source_file')]),
(FreeSurferSource, ApplyVolTransform, [('brainmask', 'target_file')]),
(surfregister, ApplyVolTransform, [('out_reg_file', 'reg_file')]),
(realign, ApplyVolTransform, [('mean_image', 'source_file')]),
(ApplyVolTransform, Threshold, [('transformed_file', 'in_file')]),
(realign, art, [('realignment_parameters', 'realignment_parameters'),
('realigned_files', 'realigned_files')]),
(Threshold, art, [('binary_file', 'mask_file')]),
(realign, volsmooth, [('realigned_files', 'in_files')]),
(realign, surfsmooth, [('realigned_files', 'in_file')]),
(surfregister, surfsmooth, [('out_reg_file', 'reg_file')]),
])
"""
Set up volume analysis workflow
-------------------------------
"""
volanalysis = pe.Workflow(name='volanalysis')
"""
Generate SPM-specific design information using
:class:`nipype.interfaces.spm.SpecifyModel`.
"""
modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec")
modelspec.inputs.concatenate_runs = True
"""
Generate a first level SPM.mat file for analysis
:class:`nipype.interfaces.spm.Level1Design`.
"""
level1design = pe.Node(interface=spm.Level1Design(), name="level1design")
level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}}
"""
Use :class:`nipype.interfaces.spm.EstimateModel` to determine the
parameters of the model.
"""
level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate")
level1estimate.inputs.estimation_method = {'Classical': 1}
"""
Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the
first level contrasts specified in a few steps above.
"""
contrastestimate = pe.Node(interface=spm.EstimateContrast(), name="contrastestimate")
volanalysis.connect([(modelspec, level1design, [('session_info', 'session_info')]),
(level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
(level1estimate, contrastestimate, [('spm_mat_file', 'spm_mat_file'),
('beta_images', 'beta_images'),
('residual_image', 'residual_image')]),
])
"""
Set up surface analysis workflow
--------------------------------
We simply clone the volume analysis workflow.
"""
surfanalysis = volanalysis.clone(name='surfanalysis')
"""
Set up volume normalization workflow
------------------------------------
The volume analysis is performed in individual space. Therefore, post analysis
we normalize the contrast images to MNI space.
"""
volnorm = pe.Workflow(name='volnormconimages')
"""
Use :class:`nipype.interfaces.freesurfer.MRIConvert` to convert the brainmask,
an mgz file and the contrast images (nifti-1 img/hdr pairs), to single volume
nifti images.
"""
convert = pe.Node(interface=fs.MRIConvert(out_type='nii'), name='convert2nii')
convert2 = pe.MapNode(interface=fs.MRIConvert(out_type='nii'),
iterfield=['in_file'],
name='convertimg2nii')
"""
Use :class:`nipype.interfaces.spm.Segment` to segment the structural image and
generate the transformation file to MNI space.
.. note::
Segment takes longer than usual because the nose is wrapped behind
the head in the structural image.
"""
segment = pe.Node(interface=spm.Segment(), name='segment')
"""
Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert contrast
images into freesurfer space.
"""
normwreg = pe.MapNode(interface=fs.ApplyVolTransform(),
iterfield=['source_file'],
name='applyreg2con')
"""
Use :class:`nipype.interfaces.spm.Normalize` to normalize the contrast images
to MNI space
"""
normalize = pe.Node(interface=spm.Normalize(jobtype='write'),
name='norm2mni')
"""
Connect up the volume normalization components
"""
volnorm.connect([(convert, segment, [('out_file', 'data')]),
(convert2, normwreg, [('out_file', 'source_file')]),
(segment, normalize, [('transformation_mat', 'parameter_file')]),
(normwreg, normalize, [('transformed_file', 'apply_to_files')]),
])
"""
Preproc + Analysis + VolumeNormalization workflow
-------------------------------------------------
Connect up the lower level workflows into an integrated analysis. In addition,
we add an input node that specifies all the inputs needed for this
workflow. Thus, one can import this workflow and connect it to their own data
sources. An example with the nifti-tutorial data is provided below.
For this workflow the only necessary inputs are the functional images, a
freesurfer subject id corresponding to recon-all processed data, the session
information for the functional runs and the contrasts to be evaluated.
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'subject_id',
'session_info',
'contrasts']),
name='inputnode')
"""
Connect the components into an integrated workflow.
"""
l1pipeline = pe.Workflow(name='firstlevel')
l1pipeline.connect([(inputnode, preproc, [('func', 'realign.in_files'),
('subject_id', 'surfregister.subject_id'),
('subject_id', 'fssource.subject_id'),
]),
(inputnode, volanalysis, [('session_info', 'modelspec.subject_info'),
('contrasts', 'contrastestimate.contrasts')]),
(inputnode, surfanalysis, [('session_info', 'modelspec.subject_info'),
('contrasts', 'contrastestimate.contrasts')]),
])
# attach volume and surface model specification and estimation components
l1pipeline.connect([(preproc, volanalysis, [('realign.realignment_parameters',
'modelspec.realignment_parameters'),
('volsmooth.smoothed_files',
'modelspec.functional_runs'),
('art.outlier_files',
'modelspec.outlier_files'),
('threshold.binary_file',
'level1design.mask_image')]),
(preproc, surfanalysis, [('realign.realignment_parameters',
'modelspec.realignment_parameters'),
('surfsmooth.smoothed_file',
'modelspec.functional_runs'),
('art.outlier_files',
'modelspec.outlier_files'),
('threshold.binary_file',
'level1design.mask_image')])
])
# attach volume contrast normalization components
l1pipeline.connect([(preproc, volnorm, [('fssource.orig', 'convert2nii.in_file'),
('surfregister.out_reg_file', 'applyreg2con.reg_file'),
('fssource.orig', 'applyreg2con.target_file')]),
(volanalysis, volnorm, [('contrastestimate.con_images',
'convertimg2nii.in_file'),
])
])
"""
Data specific components
------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1', 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
struct=[['subject_id', 'struct']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Set preprocessing parameters
----------------------------
"""
l1pipeline.inputs.preproc.fssource.subjects_dir = subjects_dir
l1pipeline.inputs.preproc.volsmooth.fwhm = 4
l1pipeline.inputs.preproc.surfsmooth.surface_fwhm = 5
l1pipeline.inputs.preproc.surfsmooth.vol_fwhm = 4
"""
Experimental paradigm specific components
-----------------------------------------
Here we create a function that returns subject-specific information
about the experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information
necessary to generate an SPM design matrix. In this tutorial, the same
paradigm was used for every participant.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print("Subject ID: %s\n" % str(subject_id))
output = []
names = ['Task-Odd', 'Task-Even']
for r in range(4):
onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names],
))
return output
"""Setup the contrast structure that needs to be evaluated. This is a
list of lists. The inner list specifies the contrasts and has the
following format - [Name,Stat,[list of condition names],[weights on
those conditions]. The condition names must match the `names` listed
in the `subjectinfo` function described above.
"""
cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5])
cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1])
contrasts = [cont1, cont2]
"""
Set up node specific inputs
---------------------------
We replicate the modelspec parameters separately for the surface- and
volume-based analysis.
"""
modelspecref = l1pipeline.inputs.volanalysis.modelspec
modelspecref.input_units = 'secs'
modelspecref.time_repetition = 3.
modelspecref.high_pass_filter_cutoff = 120
modelspecref = l1pipeline.inputs.surfanalysis.modelspec
modelspecref.input_units = 'secs'
modelspecref.time_repetition = 3.
modelspecref.high_pass_filter_cutoff = 120
l1designref = l1pipeline.inputs.volanalysis.level1design
l1designref.timing_units = modelspecref.output_units
l1designref.interscan_interval = modelspecref.time_repetition
l1designref = l1pipeline.inputs.surfanalysis.level1design
l1designref.timing_units = modelspecref.output_units
l1designref.interscan_interval = modelspecref.time_repetition
l1pipeline.inputs.inputnode.contrasts = contrasts
"""
Setup the pipeline
------------------
The nodes created above do not describe the flow of data. They merely
describe the parameters used for each function. In this section we
setup the connections between the nodes such that appropriate outputs
from nodes are piped into appropriate inputs of other nodes.
Use the :class:`nipype.pipeline.engine.Workfow` to create a
graph-based execution pipeline for first level analysis.
"""
level1 = pe.Workflow(name="level1")
level1.base_dir = os.path.abspath('volsurf_tutorial/workingdir')
level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(datasource, l1pipeline, [('func', 'inputnode.func')]),
(infosource, l1pipeline, [('subject_id', 'inputnode.subject_id'),
(('subject_id', subjectinfo),
'inputnode.session_info')]),
])
"""
Store the output
----------------
Create a datasink node to store the contrast images and registration info
"""
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out')
datasink.inputs.substitutions = []
def getsubs(subject_id):
subs = [('_subject_id_%s/' % subject_id, '')]
return subs
# store relevant outputs from various stages of the 1st level analysis
level1.connect([(infosource, datasink, [('subject_id', 'container'),
(('subject_id', getsubs), 'substitutions')
]),
(l1pipeline, datasink, [('surfanalysis.contrastestimate.con_images', 'contrasts'),
('preproc.surfregister.out_reg_file', 'registrations'),
])
])
"""
Run the analysis pipeline and also create a dot+png (if graphviz is available)
that visually represents the workflow.
"""
if __name__ == '__main__':
level1.run()
level1.write_graph(graph2use='flat')
"""
Level2 surface-based pipeline
-----------------------------
Create a level2 workflow
"""
l2flow = pe.Workflow(name='l2out')
l2flow.base_dir = os.path.abspath('volsurf_tutorial')
"""
Setup a dummy node to iterate over contrasts and hemispheres
"""
l2inputnode = pe.Node(interface=util.IdentityInterface(fields=['contrasts',
'hemi']),
name='inputnode')
l2inputnode.iterables = [('contrasts', list(range(1, len(contrasts) + 1))),
('hemi', ['lh', 'rh'])]
"""
Use a datagrabber node to collect contrast images and registration files
"""
l2source = pe.Node(interface=nio.DataGrabber(infields=['con_id'],
outfields=['con', 'reg']),
name='l2source')
l2source.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out')
l2source.inputs.template = '*'
l2source.inputs.field_template = dict(con='*/contrasts/con_%04d.img',
reg='*/registrations/*.dat')
l2source.inputs.template_args = dict(con=[['con_id']], reg=[[]])
l2source.inputs.sort_filelist = True
l2flow.connect(l2inputnode, 'contrasts', l2source, 'con_id')
"""
Merge contrast images and registration files
"""
mergenode = pe.Node(interface=util.Merge(2, axis='hstack'),
name='merge')
def ordersubjects(files, subj_list):
outlist = []
for s in subj_list:
for f in files:
if '/%s/' % s in f:
outlist.append(f)
continue
print(outlist)
return outlist
l2flow.connect(l2source, ('con', ordersubjects, subject_list), mergenode, 'in1')
l2flow.connect(l2source, ('reg', ordersubjects, subject_list), mergenode, 'in2')
"""
Concatenate contrast images projected to fsaverage
"""
l2concat = pe.Node(interface=fs.MRISPreproc(), name='concat')
l2concat.inputs.target = 'fsaverage'
l2concat.inputs.fwhm = 5
def list2tuple(listoflist):
return [tuple(x) for x in listoflist]
l2flow.connect(l2inputnode, 'hemi', l2concat, 'hemi')
l2flow.connect(mergenode, ('out', list2tuple), l2concat, 'vol_measure_file')
"""
Perform a one sample t-test
"""
l2ttest = pe.Node(interface=fs.OneSampleTTest(), name='onesample')
l2flow.connect(l2concat, 'out_file', l2ttest, 'in_file')
"""
Run the analysis pipeline and also create a dot+png (if graphviz is available)
that visually represents the workflow.
"""
if __name__ == '__main__':
l2flow.run()
l2flow.write_graph(graph2use='flat')
|
FCP-INDI/nipype
|
examples/fmri_freesurfer_smooth.py
|
Python
|
bsd-3-clause
| 22,931
|
[
"Gaussian"
] |
f7b83213cd936985e3ddf6e27dd203a9f08af7061105cda731df69aad00e90a4
|
#!/usr/bin/env python
#
# Robin Shields-Cutler
# August 2016
# takes standard blast output TSV (outfmt 6-- *.b6 or *.txt, etc) and stores entries in dictionary,
# then writes to dataframe and exports as CSV
usage = 'blastp_to_matrix.py -i BLASTOUT.b6 -s SCORE_METHOD -t THRESHOLD -o OUTFILE.csv'
import argparse
import os
import csv
import pandas as pd
import numpy as np
import re
from collections import defaultdict
def make_arg_parser():
parser = argparse.ArgumentParser(description='Convert blastp output txt table to a scores matrix in csv format')
parser.add_argument('-i', '--input', help='The blast output file to process.', required=True, type=str)
parser.add_argument('-s', '--score', help='Which score to enter into matrix: "pident", "evalue", or "bitscore", or "justnorm" if using R to make the matrix', required=False, type=str, default='bitscore')
parser.add_argument('-t', '--threshold', help='The threshold (float) for entry into matrix.', required=False, type=float, default=1)
parser.add_argument('-o', '--output', help='Where to put the output (CSV or h5)', required=False, type=str, default='blastp_matrixform.csv')
parser.add_argument('-n', '--normalize', help='Normalize bitscore to score of self-self for each cluster (as 100).', action='store_true', required=False, default=False)
parser.add_argument('-r', '--spread', help='The spread matrix from R', required=False)
parser.add_argument('--genbank', help='If the result uses genbank IDs not refseq', action='store_true', required=False, default=False)
return parser
def ofu_tree_parsing(infile, s_method, t):
sparse_blast_id_dict = defaultdict(dict)
with open(infile) as blast_inf:
# next(blast_inf)
blast_tsv = csv.reader(blast_inf, delimiter='\t')
# line[0] query name, line[1] = reference name, line[2] = % match, line[10] = e-value, line[11] = bitscore
if s_method == 'justnorm':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
m = line[0]
n = line[1]
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# # print(mref)
bvalue = np.float(line[11])
if m == n:
self_match_dict[line[0]] = bvalue
elif s_method == 'bitscore':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
m = line[0]
n = line[1]
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# # print(mref)
bvalue = np.float(line[11])
if m == n:
self_match_dict[line[0]] = bvalue
if bvalue > t:
sparse_blast_id_dict[line[0]][line[1]] = bvalue
# TODO: use the evalue of perfect matches to normalize the data
elif s_method == 'evalue':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1)
# nref = n.group(1)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
evalue = np.float(line[10])
if evalue < t:
sparse_blast_id_dict[line[0]][line[1]] = evalue
elif s_method == 'pident':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
ivalue = np.float(line[2])
if ivalue > t:
sparse_blast_id_dict[line[0]][line[1]] = ivalue
return sparse_blast_id_dict
def main():
parser = make_arg_parser()
args = parser.parse_args()
if args.genbank:
s_method = args.score
t = args.threshold
infile = args.input
sparse_blast_id_dict = ofu_tree_parsing(infile, s_method, t)
else:
sparse_blast_id_dict = defaultdict(dict)
with open(args.input) as blast_inf:
# next(blast_inf)
blast_tsv = csv.reader(blast_inf, delimiter='\t')
# line[0] query name, line[1] = reference name, line[2] = % match, line[10] = e-value, line[11] = bitscore
if args.score == 'justnorm':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d+_orf\d+)')
m = p.search(line[0])
n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
cname = ''.join(m.group(1, 2, 3))
rname = ''.join(n.group(1, 2, 3))
# # print(mref)
bvalue = np.float(line[11])
if cname == rname:
self_match_dict[line[0]] = bvalue
elif args.score == 'bitscore':
self_match_dict = defaultdict(dict)
for line in blast_tsv:
p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d+_orf\d+)')
m = p.search(line[0])
n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
cname = ''.join(m.group(1, 2, 3))
rname = ''.join(n.group(1, 2, 3))
# # print(mref)
bvalue = np.float(line[11])
if cname == rname:
self_match_dict[line[0]] = bvalue
if bvalue > args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = bvalue
# TODO: use the evalue of perfect matches to normalize the data
elif args.score == 'evalue':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1)
# nref = n.group(1)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
evalue = np.float(line[10])
if evalue < args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = evalue
elif args.score == 'pident':
for line in blast_tsv:
# p = re.compile(r'(\w+_[\w+\d+]*\.\d)(_\w+\d\d\d)(_ctg\d_orf\d+)')
# m = p.search(line[0])
# n = p.search(line[1])
# mref = m.group(1, 2)
# nref = n.group(1, 2)
# cname = ''.join(m.group(1, 2, 3))
# rname = ''.join(n.group(1, 2, 3))
# if mref == nref:
# pass
# else:
ivalue = np.float(line[2])
if ivalue > args.threshold:
sparse_blast_id_dict[line[0]][line[1]] = ivalue
if args.score == 'justnorm':
with open(args.spread, 'r') as inf:
df = pd.read_csv(inf, header=0, index_col=0, engine='c')
else:
df = pd.DataFrame.from_dict(sparse_blast_id_dict)
df.sort_index(axis=0, inplace=True)
df.sort_index(axis=1, inplace=True)
# print(df.shape[0])
if args.normalize:
vals = []
for cluster in list(df.columns):
vals.append(self_match_dict[cluster])
df = df / vals * 100
df = df.round(decimals=1)
# print(len(vals))
# Check if a matrix is symmetric
# arr = df.values
# print((arr.transpose() == -arr).all())
if args.output.endswith('.csv'):
df.to_csv(args.output)
else:
df.to_hdf(args.output, 'table')
if __name__ == '__main__':
main()
|
RRShieldsCutler/clusterpluck
|
clusterpluck/scripts/blastp_to_matrix.py
|
Python
|
mit
| 6,813
|
[
"BLAST"
] |
73200aa65e0f0f7225b4bb2937262fbc54a4d54cdf53f016f769693fbfaa8046
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
Bibauthorid_webapi
Point of access to the documents clustering facility.
Provides utilities to safely interact with stored data.
'''
import os
from itertools import chain
from copy import deepcopy
import invenio.bibauthorid_config as bconfig
import invenio.bibauthorid_frontinterface as dbapi
import invenio.bibauthorid_name_utils as nameapi
import invenio.webauthorprofile_interface as webauthorapi
from invenio.bibauthorid_general_utils import defaultdict
import invenio.search_engine as search_engine
from invenio.bibformat import format_record
from invenio.search_engine import perform_request_search
from cgi import escape
from invenio.dateutils import strftime
from time import time, gmtime, ctime
from invenio.access_control_admin import acc_find_user_role_actions
from invenio.webuser import collect_user_info, get_session, getUid, email_valid_p
from invenio.webuser import isUserSuperAdmin, get_nickname
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import acc_get_role_id, acc_get_user_roles
from invenio.external_authentication_robot import ExternalAuthRobot
from invenio.external_authentication_robot import load_robot_keys
from invenio.config import CFG_INSPIRE_SITE, CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL, \
CFG_BIBAUTHORID_ENABLED_REMOTE_LOGIN_SYSTEMS, CFG_WEBAUTHORPROFILE_MAX_HEP_CHOICES, \
CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG
from invenio.config import CFG_SITE_URL
from invenio.mailutils import send_email
from invenio.bibauthorid_name_utils import most_relevant_name
from invenio.bibauthorid_general_utils import is_arxiv_id_or_doi
from invenio.shellutils import retry_mkstemp
from invenio.bibrecord import record_xml_output, record_add_field
from invenio.bibtask import task_low_level_submission
from invenio.bibauthorid_dbinterface import get_external_ids_of_author, add_arxiv_papers_to_author, get_arxiv_papers_of_author #pylint: disable-msg=W0614
############################################
# DB Data Accessors #
############################################
def is_profile_available(pid):
'''
check if the profile with pid is not claimed to a user
@param person_id: person id
@type person_id: int
@return: returns if profile is available
@rtype: boolean
'''
uid = get_uid_from_personid(pid)
if uid == -1:
return True
return False
def get_bibrec_from_bibrefrec(bibrefrec):
tmp_split_list = bibrefrec.split(':')
if len(tmp_split_list) == 1:
return -1
tmp_split_list = tmp_split_list[1].split(',')
if len(tmp_split_list) == 1:
return -1
return int(tmp_split_list[1])
def get_bibrefs_from_bibrecs(bibreclist):
'''
Retrieve all bibrefs for all the recids in the list
@param bibreclist: list of record IDs
@type bibreclist: list of int
@return: a list of record->bibrefs
@return: list of lists
'''
return [[bibrec, dbapi.get_matching_bibrefs_for_paper([''], bibrec, always_match=True)]
for bibrec in bibreclist]
def get_canonical_id_from_person_id(person_id):
'''
Finds the person canonical name from personid (e.g. 1)
@param person_id: person id
@type person_id: int
@return: result from the request or person_id on failure
@rtype: int
'''
if not person_id:
return None
canonical_name = person_id
try:
canonical_name = dbapi.get_canonical_name_of_author(person_id)[0][0]
except IndexError:
pass
return canonical_name
def get_external_ids_from_person_id(pid):
'''
Finds the person external ids (doi, arxivids, ..) from personid (e.g. 1)
@param person_id: person id
@type person_id: int
@return: dictionary of external ids
@rtype: dict()
'''
if not pid or not (isinstance(pid, str) or isinstance(pid, (int, long))):
return dict()
if isinstance(pid, str):
return None
external_ids = dbapi.get_external_ids_of_author(pid)
return external_ids
def get_internal_user_id_from_person_id(pid):
'''
Finds the person external ids (doi, arxivids, ..) from personid (e.g. 1)
@param person_id: person id
@type person_id: int
@return: dictionary of external ids
@rtype: dict()
'''
if not pid or not (isinstance(pid, str) or isinstance(pid, (int, long))):
return dict()
if isinstance(pid, str):
return None
return dbapi.get_internal_user_id_of_author(pid)
def get_longest_name_from_pid(person_id= -1):
'''
Finds the longest name of a person to be representative for this person.
@param person_id: the person ID to look at
@type person_id: int
@return: returns the longest normalized name of a person
@rtype: string
'''
if (not person_id > -1) or (not isinstance(person_id, (int, long))):
return "This doesn't look like a person ID!"
longest_name = ""
for name in dbapi.get_names_count_of_author(person_id):
if name and len(name[0]) > len(longest_name):
longest_name = name[0]
if longest_name:
return longest_name
else:
return "This person does not seem to have a name!"
def get_most_frequent_name_from_pid(person_id= -1, allow_none=False):
'''
Finds the most frequent name of a person to be
representative for this person.
@param person_id: the person ID to look at
@type person_id: int
@return: returns the most frequent normalized name of a person
@rtype: string
'''
pid = wash_integer_id(person_id)
if (not pid > -1) or (not isinstance(pid, int)):
if allow_none:
return None
else:
return "'%s' doesn't look like a person ID!" % person_id
person_id = pid
mf_name = ""
try:
nn = dbapi.get_names_count_of_author(person_id)
mf_name = sorted(nn, key=lambda k:k[1], reverse=True)[0][0]
except IndexError:
pass
if mf_name:
return mf_name
else:
if allow_none:
return None
else:
return "This person does not seem to have a name!"
def get_papers_by_person_id(person_id= -1, rec_status= -2, ext_out=False):
'''
Returns all the papers written by the person
@param person_id: identifier of the person to retrieve papers from
@type person_id: int
@param rec_status: minimal flag status a record must have to be displayed
@type rec_status: int
@param ext_out: Extended output (w/ author aff and date)
@type ext_out: boolean
@return: list of record info
@rtype: list of lists of info
'''
if not isinstance(person_id, (int, long)):
try:
person_id = int(person_id)
except (ValueError, TypeError):
return []
if person_id < 0:
return []
if not isinstance(rec_status, int):
return []
records = []
db_data = dbapi.get_papers_info_of_author(person_id,
rec_status,
show_author_name=True,
show_title=False,
show_rt_status=True,
show_affiliations=ext_out,
show_date=ext_out,
show_experiment=ext_out)
if not ext_out:
records = [[int(row["data"].split(",")[1]), row["data"], row["flag"],
row["authorname"]] for row in db_data]
else:
for row in db_data:
recid = row["data"].split(",")[1]
bibref = row["data"]
flag = row["flag"]
authorname = row["authorname"]
rt_status = row['rt_status']
authoraff = ", ".join(row['affiliation'])
try:
date = sorted(row['date'], key=len)[0]
except IndexError:
date = "Not available"
exp = ", ".join(row['experiment'])
# date = ""
records.append([int(recid), bibref, flag, authorname,
authoraff, date, rt_status, exp])
return records
def get_papers_cluster(bibref):
'''
Returns the cluster of documents connected with this one
@param bibref: the table:bibref,bibrec pair to look for
@type bibref: str
@return: a list of record IDs
@rtype: list of int
'''
papers = []
person_id = get_person_id_from_paper(bibref)
if person_id > -1:
papers = get_papers_by_person_id(person_id)
return papers
def get_paper_status(bibref):
'''
Finds an returns the status of a bibrec to person assignment
@param bibref: the bibref-bibrec pair that unambiguously identifies a paper
@type bibref: string
'''
db_data = dbapi.get_author_and_status_of_signature(bibref)
# data,PersonID,flag
status = None
try:
status = db_data[0][2]
except IndexError:
status = -10
status = wash_integer_id(status)
return status
def get_person_redirect_link(pid):
'''
Returns the canonical name of a pid if found, the pid itself otherwise
@param pid: int
'''
cname = dbapi.get_canonical_name_of_author(pid)
if len(cname) > 0:
return str(cname[0][0])
else:
return str(pid)
def get_person_id_from_canonical_id(canonical_id):
'''
Finds the person id from a canonical name (e.g. Ellis_J_R_1)
@param canonical_id: the canonical ID
@type canonical_id: string
@return: result from the request or -1 on failure
@rtype: int
'''
if not canonical_id or not isinstance(canonical_id, str):
return -1
pid = -1
try:
pid = dbapi.get_author_by_canonical_name(canonical_id)[0][0]
except IndexError:
pass
return pid
def get_person_id_from_paper(bibref=None):
'''
Returns the id of the person who wrote the paper
@param bibref: the bibref,bibrec pair that identifies the person
@type bibref: str
@return: the person id
@rtype: int
'''
if not is_valid_bibref(bibref):
return -1
person_id = -1
db_data = dbapi.get_author_and_status_of_signature(bibref)
try:
person_id = db_data[0][1]
except (IndexError):
pass
return person_id
def get_person_comments(person_id):
'''
Get all comments from a person
@param person_id: person id to get the comments from
@type person_id: int
@return the message incl. the metadata if everything was fine, False on err
@rtype: string or boolean
'''
pid = -1
comments = []
try:
pid = int(person_id)
except (ValueError, TypeError):
return False
for row in dbapi.get_persons_data([pid], "comment"):
comments.append(row[1])
return comments
def get_person_db_names_from_id(person_id= -1):
'''
Finds and returns the names associated with this person as stored in the
meta data of the underlying data set along with the
frequency of occurrence (i.e. the number of papers)
@param person_id: an id to find the names for
@type person_id: int
@return: name and number of occurrences of the name
@rtype: tuple of tuple
'''
##retrieve all rows for the person
if (not person_id > -1) or (not isinstance(person_id, (int, long))):
return []
return dbapi.get_names_of_author(person_id)
def get_person_names_from_id(person_id= -1):
'''
Finds and returns the names associated with this person along with the
frequency of occurrence (i.e. the number of papers)
@param person_id: an id to find the names for
@type person_id: int
@return: name and number of occurrences of the name
@rtype: tuple of tuple
'''
##retrieve all rows for the person
if (not person_id > -1) or (not isinstance(person_id, (int, long))):
return []
return dbapi.get_names_count_of_author(person_id)
def get_person_request_ticket(pid=-1, tid=None):
'''
Returns the list of request tickets associated to a person.
@param pid: person id
@param tid: ticket id, to select if want to retrieve only a particular one
@return: tickets [[],[]]
'''
if pid < 0:
return list()
request_tickets = list()
r_tickets = dbapi.get_validated_request_tickets_for_author(pid, tid)
for r_ticket in r_tickets:
tid = None
request_ticket = list()
for tag, value in r_ticket.iteritems():
if tag == 'operations':
request_ticket += value
elif tag == 'tid':
tid = value
else:
request_ticket.append((tag, value))
request_tickets.append([request_ticket, tid])
return request_tickets
def get_persons_with_open_tickets_list():
'''
Finds all the persons with open tickets and returns pids and count of tickets
@return: [[pid,ticket_count]]
'''
return dbapi.get_authors_with_open_tickets()
def get_pid_from_uid(uid):
'''
Return the PID associated with the uid
@param uid: the internal ID of a user
@type uid: int
@return: the Person ID attached to the user or -1 if none found
'''
if isinstance(uid, tuple):
uid = uid[0][0]
assert False, ("AAAAARGH problem in get_pid_from_uid webapi. Got uid as a tuple instead of int.Uid = %s" % str(uid))
pid = dbapi.get_author_by_uid(uid)
if not pid:
return -1
return pid
def get_possible_bibrefs_from_pid_bibrec(pid, bibreclist, always_match=False, additional_names=None):
'''
Returns for each bibrec a list of bibrefs for which the surname matches.
@param pid: person id to gather the names strings from
@param bibreclist: list of bibrecs on which to search
@param always_match: match all bibrefs no matter the name
@param additional_names: [n1,...,nn] names to match other then the one from personid
'''
pid = wash_integer_id(pid)
pid_names = dbapi.get_author_names_from_db(pid)
if additional_names:
pid_names += zip(additional_names)
lists = []
for bibrec in bibreclist:
lists.append([bibrec, dbapi.get_matching_bibrefs_for_paper([n[0] for n in pid_names], bibrec,
always_match)])
return lists
def get_processed_external_recids(pid):
'''
Get list of records that have been processed from external identifiers
@param pid: Person ID to look up the info for
@type pid: int
@return: list of record IDs
@rtype: list of strings
'''
list_str = dbapi.get_processed_external_recids(pid)
return list_str.split(";")
def get_review_needing_records(pid):
'''
Returns list of records associated to pid which are in need of review
(only bibrec ma no bibref selected)
@param pid: pid
'''
pid = wash_integer_id(pid)
db_data = dbapi.get_person_papers_to_be_manually_reviewed(pid)
return [int(row[0][1]) for row in db_data if row[0][1]]
def get_uid_from_personid(pid):
'''
Return the uid associated with the pid
@param pid: the person id
@type uid: int
@return: the internal ID of a user or -1 if none found
'''
result = dbapi.get_uid_of_author(pid)
if not result:
return -1
return result
def get_user_level(uid):
'''
Finds and returns the aid-universe-internal numeric user level
@param uid: the user's id
@type uid: int
@return: A numerical representation of the maximum access level of a user
@rtype: int
'''
actions = [row[1] for row in acc_find_user_role_actions({'uid': uid})]
return max([dbapi.get_paper_access_right(acc) for acc in actions])
def search_person_ids_by_name(namequery, limit_to_recid=None):
'''
Prepares the search to search in the database
@param namequery: the search query the user enquired
@type namequery: string
@return: information about the result w/ probability and occurrence
@rtype: tuple of tuple
'''
query = ""
escaped_query = ""
try:
query = str(namequery)
except (ValueError, TypeError):
return list()
if query:
escaped_query = escape(query, quote=True)
else:
return list()
results = dbapi.find_personIDs_by_name_string(escaped_query)
if not limit_to_recid:
return results
else:
limit_to_persons = set([x[0] for x in dbapi.get_author_to_papers_mapping([limit_to_recid])])
return filter(lambda x: x[0] in limit_to_persons, results)
############################################
# DB Data Mutators #
############################################
def add_person_comment(person_id, message):
'''
Adds a comment to a person after enriching it with meta-data (date+time)
@param person_id: person id to assign the comment to
@type person_id: int
@param message: defines the comment to set
@type message: string
@return the message incl. the metadata if everything was fine, False on err
@rtype: string or boolean
'''
msg = ""
pid = -1
try:
msg = str(message)
pid = int(person_id)
except (ValueError, TypeError):
return False
strtimestamp = strftime("%Y-%m-%d %H:%M:%S", gmtime())
msg = escape(msg, quote=True)
dbmsg = "%s;;;%s" % (strtimestamp, msg)
dbapi.set_person_data(pid, "comment", dbmsg)
return dbmsg
def add_person_external_id(person_id, ext_sys, ext_id, userinfo=''):
'''
Adds an external id for the person
@param person_id: person id
@type person_id: int
@param ext_sys: external system
@type ext_sys: str
@param ext_id: external id
@type ext_id: str
'''
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
tag = 'extid:%s' % ext_sys
dbapi.set_person_data(person_id, tag, ext_id)
webauthorapi.expire_all_cache_for_personid(person_id)
log_value = '%s %s %s' % (person_id, tag, ext_id)
dbapi.insert_user_log(userinfo, person_id, 'data_insertion', 'CMPUI_addexternalid', log_value, 'External id manually added.', userid=uid)
def set_person_uid(person_id, dest_uid, userinfo=''):
'''
Adds an external id for the person
@param person_id: person id
@type person_id: int
@param ext_sys: external system
@type ext_sys: str
@param ext_id: external id
@type ext_id: str
'''
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
dbapi.add_userid_to_author(person_id, int(dest_uid))
log_value = '%s %s %s' % (person_id, 'uid', int(dest_uid))
dbapi.insert_user_log(userinfo, person_id, 'data_insertion', 'CMPUI_set_uid', log_value, 'UID manually set to person.', userid=uid)
def add_review_needing_record(pid, bibrec_id):
'''
Add record in need of review to a person
@param pid: pid
@param bibrec_id: bibrec
'''
pid = wash_integer_id(pid)
bibrec_id = wash_integer_id(bibrec_id)
dbapi.add_person_paper_needs_manual_review(pid, bibrec_id)
def delete_person_external_ids(person_id, existing_ext_ids, userinfo=''):
'''
Deletes external ids of the person
@param person_id: person id
@type person_id: int
@param existing_ext_ids: external ids to delete
@type existing_ext_ids: list
'''
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
deleted_ids = []
for el in existing_ext_ids:
if el.count('||'):
ext_sys = el.split('||')[0]
ext_id = el.split('||')[1]
else:
continue
tag = 'extid:%s' % ext_sys
dbapi.del_person_data(tag, person_id, ext_id)
deleted_ids.append((person_id, tag, ext_id))
dbapi.insert_user_log(userinfo, person_id, 'data_deletion', 'CMPUI_deleteextid', '', 'External ids manually deleted: ' + str(deleted_ids), userid=uid)
def del_review_needing_record(pid, bibrec_id):
'''
Removes a record in need of review from a person
@param pid: personid
@param bibrec_id: bibrec
'''
pid = wash_integer_id(pid)
bibrec_id = wash_integer_id(bibrec_id)
dbapi.del_person_papers_needs_manual_review(pid, bibrec_id)
def insert_log(userinfo, personid, action, tag, value, comment='', transactionid=0):
'''
Log an action performed by a user
Examples (in the DB):
1 2010-09-30 19:30 admin||10.0.0.1 1 assign paper 1133:4442 'from 23'
1 2010-09-30 19:30 admin||10.0.0.1 1 assign paper 8147:4442
2 2010-09-30 19:35 admin||10.0.0.1 1 reject paper 72:4442
@param userinfo: information about the user [UID|IP]
@type userinfo: string
@param personid: ID of the person this action is targeting
@type personid: int
@param action: intended action
@type action: string
@param tag: A tag to describe the data entered
@type tag: string
@param value: The value of the action described by the tag
@type value: string
@param comment: Optional comment to describe the transaction
@type comment: string
@param transactionid: May group bulk operations together
@type transactionid: int
@return: Returns the current transactionid
@rtype: int
'''
userinfo = escape(str(userinfo))
action = escape(str(action))
tag = escape(str(tag))
value = escape(str(value))
comment = escape(str(comment))
if not isinstance(personid, int):
try:
personid = int(personid)
except (ValueError, TypeError):
return -1
if not isinstance(transactionid, int):
try:
transactionid = int(transactionid)
except (ValueError, TypeError):
return -1
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
return dbapi.insert_user_log(userinfo, personid, action, tag,
value, comment, transactionid, userid=uid)
def move_internal_id(person_id_of_owner, person_id_of_receiver):
'''
Assign an existing uid to another profile while keeping it to the old profile under the tag 'uid-old'
@param person_id_of_owner pid: Person ID of the profile that currently has the internal id
@type pid: int
@param person_id_of_receiver pid: Person ID of the profile that will be assigned the internal id
@type pid: int
'''
internal_id = dbapi.get_uid_of_author(person_id_of_owner)
if not internal_id:
return False
dbapi.mark_internal_id_as_old(person_id_of_owner, internal_id)
dbapi.add_author_data(person_id_of_receiver, 'uid', internal_id)
return True
def move_external_ids(person_id_of_owner, person_id_of_receiver):
'''
Assign existing external ids to another profile
@param person_id_of_owner pid: Person ID of the profile that currently has the internal id
@type pid: int
@param person_id_of_receiver pid: Person ID of the profile that will be assigned the internal id
@type pid: int
'''
pass
def set_processed_external_recids(pid, recid_list):
'''
Set list of records that have been processed from external identifiers
@param pid: Person ID to set the info for
@type pid: int
@param recid_list: list of recids
@type recid_list: list of int
'''
if isinstance(recid_list, list):
recid_list_str = ";".join(recid_list)
dbapi.set_processed_external_recids(pid, recid_list_str)
def swap_person_canonical_name(person_id, desired_cname, userinfo=''):
'''
Swaps the canonical names of person_id and the person who withholds the desired canonical name.
@param person_id: int
@param desired_cname: string
'''
personid_with_desired_cname = get_person_id_from_canonical_id(desired_cname)
if personid_with_desired_cname == person_id:
return
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
current_cname = get_canonical_id_from_person_id(person_id)
create_log_personid_with_desired_cname = False
# nobody withholds the desired canonical name
if personid_with_desired_cname == -1:
dbapi.modify_canonical_name_of_authors([(person_id, desired_cname)])
# person_id doesn't own a canonical name
elif not isinstance(current_cname, str):
dbapi.modify_canonical_name_of_authors([(person_id, desired_cname)])
dbapi.update_canonical_names_of_authors([personid_with_desired_cname], overwrite=True)
create_log_personid_with_desired_cname = True
# both person_id and personid_with_desired_cname own a canonical name
else:
dbapi.modify_canonical_name_of_authors([(person_id, desired_cname), (personid_with_desired_cname, current_cname)])
create_log_personid_with_desired_cname = True
dbapi.insert_user_log(userinfo, person_id, 'data_update', 'CMPUI_changecanonicalname', '', 'Canonical name manually updated.', userid=uid)
if create_log_personid_with_desired_cname:
dbapi.insert_user_log(userinfo, personid_with_desired_cname, 'data_update', 'CMPUI_changecanonicalname', '', 'Canonical name manually updated.', userid=uid)
def update_person_canonical_name(person_id, canonical_name, userinfo=''):
'''
Updates a person's canonical name
@param person_id: person id
@param canonical_name: string
'''
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
dbapi.update_canonical_names_of_authors([person_id], overwrite=True, suggested=canonical_name)
dbapi.insert_user_log(userinfo, person_id, 'data_update', 'CMPUI_changecanonicalname', '', 'Canonical name manually updated.', userid=uid)
############################################
# NOT TAGGED YET #
############################################
def wash_integer_id(param_id):
'''
Creates an int out of either int or string
@param param_id: the number to be washed
@type param_id: int or string
@return: The int representation of the param or -1
@rtype: int
'''
pid = -1
try:
pid = int(param_id)
except (ValueError, TypeError):
return (-1)
return pid
def is_valid_bibref(bibref):
'''
Determines if the provided string is a valid bibref-bibrec pair
@param bibref: the bibref-bibrec pair that unambiguously identifies a paper
@type bibref: string
@return: True if it is a bibref-bibrec pair and False if it's not
@rtype: boolean
'''
if (not isinstance(bibref, str)) or (not bibref):
return False
if not bibref.count(":"):
return False
if not bibref.count(","):
return False
try:
table = bibref.split(":")[0]
ref = bibref.split(":")[1].split(",")[0]
bibrec = bibref.split(":")[1].split(",")[1]
except IndexError:
return False
try:
table = int(table)
ref = int(ref)
bibrec = int(bibrec)
except (ValueError, TypeError):
return False
return True
def is_valid_canonical_id(cid):
'''
Checks if presented canonical ID is valid in structure
Must be of structure: ([Initial|Name]\.)*Lastname\.Number
Example of valid cid: J.Ellis.1
@param cid: The canonical ID to check
@type cid: string
@return: Is it valid?
@rtype: boolean
'''
if not cid.count("."):
return False
xcheck = -1
sp = cid.split(".")
if not (len(sp) > 1 and sp[-1]):
return False
try:
xcheck = int(sp[-1])
except (ValueError, TypeError, IndexError):
return False
if xcheck and xcheck > -1:
return True
else:
return False
def author_has_papers(pid):
'''
Checks if the given author identifier has papers.
@param pid: author identifier
@type pid: int
@return: author has papers
@rtype: bool
'''
try:
pid = int(pid)
except ValueError:
return False
papers = dbapi.get_papers_of_author(pid)
if papers:
return True
return False
def user_can_modify_data(uid, pid):
'''
Determines if a user may modify the data of a person
@param uid: the id of a user (invenio user id)
@type uid: int
@param pid: the id of a person
@type pid: int
@return: True if the user may modify data, False if not
@rtype: boolean
@raise ValueError: if the supplied parameters are invalid
'''
if not isinstance(uid, int):
try:
uid = int(uid)
except (ValueError, TypeError):
raise ValueError("User ID has to be a number!")
if not isinstance(pid, int):
try:
pid = int(pid)
except (ValueError, TypeError):
raise ValueError("Person ID has to be a number!")
return dbapi.user_can_modify_data_of_author(uid, pid)
def user_can_modify_paper(uid, paper):
'''
Determines if a user may modify the record assignments of a person
@param uid: the id of a user (invenio user id)
@type uid: int
@param pid: the id of a person
@type pid: int
@return: True if the user may modify data, False if not
@rtype: boolean
@raise ValueError: if the supplied parameters are invalid
'''
if not isinstance(uid, int):
try:
uid = int(uid)
except (ValueError, TypeError):
raise ValueError("User ID has to be a number!")
if not paper:
raise ValueError("A bibref is expected!")
return dbapi.user_can_modify_paper(uid, paper)
def person_bibref_is_touched_old(pid, bibref):
'''
Determines if an assignment has been touched by a user (i.e. check for
the flag of an assignment being 2 or -2)
@param pid: the id of the person to check against
@type pid: int
@param bibref: the bibref-bibrec pair that unambiguously identifies a paper
@type bibref: string
@raise ValueError: if the supplied parameters are invalid
'''
if not isinstance(pid, int):
try:
pid = int(pid)
except (ValueError, TypeError):
raise ValueError("Person ID has to be a number!")
if not bibref:
raise ValueError("A bibref is expected!")
return dbapi.paper_affirmed_from_user_input(pid, bibref)
#def is_logged_in_through_arxiv(req):
# '''
# Checks if the user is logged in through the arXiv.
#
# @param req: Apache request object
# @type req: Apache request object
# '''
# session = get_session(req)
# #THOMAS: ask samK about this variables: probably it would be better to rename them in the session as arxiv_sso_blabla
# #THOMAS: ask samK if this is correct, what other way there is to discover is we are SSOed through arxiv?
# #user_info = collect_user_info(req)
# #isGuestUser(req)
# # TO DO THIS SHOULD BE CHANGED
# if 'user_info' in session.keys() and 'email' in session['user_info'].keys() and session['user_info']['email']:
# return True
# return False
#
#def is_logged_in_through_orcid(req):
# '''
# Checks if the user is logged in through the orcid.
#
# @param req: Apache request object
# @type req: Apache request object
# '''
# session_bareinit(req)
# session = get_session(req)
# pinfo = session['personinfo']
#
# if 'orcid' in pinfo and pinfo['orcid']['id'] and pinfo['orcid']['access_token']:
# return True
#
# return False
def get_user_role(req):
'''
Determines whether a user is guest, user or admin
'''
minrole = 'guest'
role = 'guest'
if not req:
return minrole
uid = getUid(req)
if not isinstance(uid, int):
return minrole
admin_role_id = acc_get_role_id(bconfig.CLAIMPAPER_ADMIN_ROLE)
user_role_id = acc_get_role_id(bconfig.CLAIMPAPER_USER_ROLE)
user_roles = acc_get_user_roles(uid)
if admin_role_id in user_roles:
role = 'admin'
elif user_role_id in user_roles:
role = 'user'
if role == 'guest' and is_external_user(uid):
role = 'user'
return role
def get_hepnames(person_id, bibauthorid_data=None):
'''
Returns hepnames data.
@param bibauthorid_data: dict with 'is_baid':bool, 'cid':canonicalID, 'pid':personid
@return: [data, bool]
'''
def get_bibauthorid_data(person_id):
bibauthorid_data = {"is_baid": True, "pid": person_id, "cid": person_id}
cname = get_person_redirect_link(person_id)
if is_valid_canonical_id(cname):
bibauthorid_data['cid'] = cname
return bibauthorid_data
if bibauthorid_data is None:
bibauthorid_data = get_bibauthorid_data(person_id)
searchid = '035:"%s"' % bibauthorid_data['cid']
hepRecord = perform_request_search(rg=0, cc='HepNames', p=' %s ' % searchid)[:CFG_WEBAUTHORPROFILE_MAX_HEP_CHOICES]
hepnames_data = {}
hepnames_data['cid'] = bibauthorid_data['cid']
hepnames_data['pid'] = person_id
if not hepRecord or len(hepRecord) > 1:
# present choice dialog with alternatives?
dbnames = [name for name, count in dbapi.get_names_of_author(person_id)]
query = ' or '.join(['author:"%s"' % str(n) for n in dbnames])
additional_records = perform_request_search(rg=0, cc='HepNames', p=query)[:CFG_WEBAUTHORPROFILE_MAX_HEP_CHOICES]
hepRecord += additional_records
hepnames_data['HaveHep'] = False
hepnames_data['HaveChoices'] = bool(hepRecord)
# limit possible choiches!
hepnames_data['HepChoices'] = [(format_record(x, 'hb'), x) for x in hepRecord ]
hepnames_data['heprecord'] = hepRecord
hepnames_data['bd'] = bibauthorid_data
else:
# show the heprecord we just found.
hepnames_data['HaveHep'] = True
hepnames_data['HaveChoices'] = False
hepnames_data['heprecord'] = format_record(hepRecord[0], 'hd')
hepnames_data['bd'] = bibauthorid_data
return hepnames_data
def _update_ulevel(req, pinfo):
if 'ulevel' not in pinfo:
uid = getUid(req)
ulevel = get_user_role(req)
if isUserSuperAdmin({'uid': uid}):
ulevel = 'admin'
pinfo['ulevel'] = ulevel
def _update_uid(req, pinfo):
if 'uid' not in pinfo:
pinfo['uid'] = int(getUid(req))
def _update_pid(req, pinfo):
if 'pid' not in pinfo:
pinfo['pid'] = int(get_pid_from_uid(getUid(req)))
def _initialize_should_check_to_autoclaim(pinfo):
if 'should_check_to_autoclaim' not in pinfo:
pinfo['should_check_to_autoclaim'] = False
def _initialize_login_info_message(pinfo):
if 'login_info_message' not in pinfo:
pinfo["login_info_message"] = None
def _initialize_merge_info_message(pinfo):
if 'merge_info_message' not in pinfo:
pinfo["merge_info_message"] = None
def _initialize_claimpaper_admin_last_viewed_pid(pinfo):
if "claimpaper_admin_last_viewed_pid" not in pinfo:
pinfo["claimpaper_admin_last_viewed_pid"] = -2
def _initialize_ln(pinfo):
if 'ln' not in pinfo:
pinfo["ln"] = 'en'
def _initialize_merge_primary_profile(pinfo):
if 'merge_primary_profile' not in pinfo:
pinfo["merge_primary_profile"] = None
def _initialize_merge_profiles(pinfo):
if 'merge_profiles' not in pinfo:
pinfo["merge_profiles"] = list()
def _initialize_orcid(pinfo):
if 'orcid' not in pinfo:
pinfo['orcid'] = {'imported_pubs': list(), 'import_pubs': False, 'has_orcid_id': False}
def _initialize_arxiv_status(pinfo):
if 'arxiv_status' not in pinfo:
pinfo['arxiv_status'] = False
def _initialize_autoclaim(pinfo):
if not 'autoclaim' in pinfo:
pinfo['autoclaim'] = dict()
pinfo['autoclaim']['ticket'] = list()
pinfo['autoclaim']['external_pubs_association'] = dict()
pinfo['autoclaim']['res'] = None
def _initialize_marked_visit(pinfo):
if 'marked_visit' not in pinfo:
pinfo['marked_visit'] = None
def _initialize_visit_diary(pinfo):
if 'visit_diary' not in pinfo:
pinfo['visit_diary'] = defaultdict(list)
def _initialize_diary_size_per_category(pinfo):
if 'diary_size_per_category' not in pinfo:
pinfo['diary_size_per_category'] = 5
def _initialize_most_compatible_person(pinfo):
if 'most_compatible_person' not in pinfo:
pinfo['most_compatible_person'] = None
def _initialize_profile_suggestion_info(pinfo):
if 'profile_suggestion_info' not in pinfo:
pinfo["profile_suggestion_info"] = None
def _initialize_ticket(pinfo):
if 'ticket' not in pinfo:
pinfo["ticket"] = list()
def _initialize_users_open_tickets_storage(pinfo):
if 'users_open_tickets_storage' not in pinfo:
pinfo["users_open_tickets_storage"] = list()
def _initialize_claim_in_process(pinfo):
if 'claim_in_process' not in pinfo:
pinfo['claim_in_process'] = False
def _initialize_incomplete_autoclaimed_tickets_storage(pinfo):
if 'incomplete_autoclaimed_tickets_storage' not in pinfo:
pinfo["incomplete_autoclaimed_tickets_storage"] = list()
def _initialize_remote_login_system(pinfo):
if 'remote_login_system' not in pinfo:
pinfo["remote_login_system"] = dict()
for system in CFG_BIBAUTHORID_ENABLED_REMOTE_LOGIN_SYSTEMS:
if system not in pinfo["remote_login_system"]:
pinfo['remote_login_system'][system] = {'name': None, 'email': None}
def session_bareinit(req):
'''
Initializes session personinfo entry if none exists
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
if 'personinfo' not in session:
session['personinfo'] = dict()
pinfo = session['personinfo']
_update_ulevel(req, pinfo)
_update_uid(req, pinfo)
_update_pid(req, pinfo)
_initialize_should_check_to_autoclaim(pinfo)
_initialize_login_info_message(pinfo)
_initialize_merge_info_message(pinfo)
_initialize_claimpaper_admin_last_viewed_pid(pinfo)
_initialize_ln(pinfo)
_initialize_merge_primary_profile(pinfo)
_initialize_merge_profiles(pinfo)
_initialize_orcid(pinfo)
_initialize_arxiv_status(pinfo)
_initialize_autoclaim(pinfo)
_initialize_marked_visit(pinfo)
_initialize_visit_diary(pinfo)
_initialize_diary_size_per_category(pinfo)
_initialize_most_compatible_person(pinfo)
_initialize_profile_suggestion_info(pinfo)
_initialize_ticket(pinfo)
_initialize_users_open_tickets_storage(pinfo)
_initialize_claim_in_process(pinfo)
_initialize_incomplete_autoclaimed_tickets_storage(pinfo)
_initialize_remote_login_system(pinfo)
session.dirty = True
# all teh get_info methods should standardize the content:
def get_arxiv_info(req, uinfo):
session_bareinit(req)
session = get_session(req)
arXiv_info = dict()
try:
name = uinfo['external_firstname']
except KeyError:
name = ''
try:
surname = uinfo['external_familyname']
except KeyError:
surname = ''
if surname:
session['personinfo']['remote_login_system']['arXiv']['name'] = nameapi.create_normalized_name(
nameapi.split_name_parts(surname + ', ' + name))
else:
session['personinfo']['remote_login_system']['arXiv']['name'] = ''
session['personinfo']['remote_login_system']['arXiv']['email'] = uinfo['email']
arXiv_info['name'] = session['personinfo']['remote_login_system']['arXiv']['name']
arXiv_info['email'] = uinfo['email']
session.dirty = True
return arXiv_info
# {the dictionary we define in _webinterface}
# all teh get_info methods should standardize the content:
def get_orcid_info(req, uinfo):
return dict()
# {the dictionary we define in _webinterface}
def get_remote_login_systems_info(req, remote_logged_in_systems):
'''
For every remote_login_system get all of their info but for records and store them into a session dictionary
@param req: Apache request object
@type req: Apache request object
@param remote_logged_in_systems: contains all remote_logged_in_systems tha the user is logged in through
@type remote_logged_in_systems: dict
'''
session_bareinit(req)
user_remote_logged_in_systems_info = dict()
uinfo = collect_user_info(req)
for system in remote_logged_in_systems:
user_remote_logged_in_systems_info[system] = REMOTE_LOGIN_SYSTEMS_FUNCTIONS[system](req, uinfo)
return user_remote_logged_in_systems_info
def get_ids_from_arxiv(req):
'''
Collects the external ids that the user has in arXiv.
@param req: Apache request object
@type req: Apache request object
@return: external ids
@rtype: list
'''
uinfo = collect_user_info(req)
current_external_ids = []
if 'external_arxivids' in uinfo.keys() and uinfo['external_arxivids']:
current_external_ids = uinfo['external_arxivids'].split(';')
return current_external_ids
def get_ids_from_orcid(req):
'''
Collects the external ids that the user has in orcid.
@param req: Apache request object
@type req: Apache request object
@return: external ids
@rtype: list
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
dois = list()
if 'imported_pubs' in pinfo['orcid']:
for doi in pinfo['orcid']['imported_pubs']:
dois.append(doi)
return dois
def get_external_ids_type(external_id):
pass
def get_external_ids_to_recids_association(req, external_ids):
'''
Associates the external ids of remote login systems to inspire recids
@param req: Apache request object
@type req: Apache request object
@param external_ids: external ids
@type external_ids: list
@return: recids
@rtype: list
'''
session = get_session(req)
pinfo = session['personinfo']
recids_from_external_system = []
# stored so far association in the session
cached_ids_association = pinfo['autoclaim']['external_pubs_association']
for external_id in external_ids:
id_type = is_arxiv_id_or_doi(external_id)
if (id_type, external_id) in cached_ids_association:
recid = cached_ids_association[(id_type, external_id)]
recids_from_external_system.append(recid)
else:
# recid_list = perform_request_search(p=bconfig.CFG_BIBAUTHORID_REMOTE_LOGIN_SYSTEMS_IDENTIFIERS['arXiv'] + str(arxivid), of='id', rg=0)
recid_list = perform_request_search(p=external_id, f=bconfig.CFG_BIBAUTHORID_REMOTE_LOGIN_SYSTEMS_IDENTIFIERS[id_type], m1='e', cc='HEP')
if len(recid_list) == 1:
recid = recid_list[0]
recids_from_external_system.append(recid)
cached_ids_association[(id_type, external_id)] = recid
pinfo['autoclaim']['external_pubs_association'] = cached_ids_association
session.dirty = True
return recids_from_external_system
def get_remote_login_systems_recids(req, remote_logged_in_systems):
'''
Collects the equivalent inspire id of the remote login system's id for every system that the user is logged in through.
@param req: Apache request object
@type req: Apache request object
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: recids
@rtype: list
'''
session_bareinit(req)
remote_login_systems_recids = []
for system in remote_logged_in_systems:
# collect system's external ids
external_ids = REMOTE_LOGIN_SYSTEMS_GET_RECIDS_FUNCTIONS[system](req)
# associate the external ids to recids
system_recids = get_external_ids_to_recids_association(req, external_ids)
remote_login_systems_recids += system_recids
# mocking
#remote_login_systems_recids = [14, 18, 8, 11]
return list(set(remote_login_systems_recids))
def get_cached_id_association(req):
'''
get external ids to recid association saved in the session so far
@param req: Apache request object
@type req: Apache request object
@return: the associaton in the following form: {(system1, external_id1):recid1, (system1, external_id2):recid2, (system2, external_id3):recid3...}
@rtype: dict
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
return pinfo['autoclaim']['external_pubs_association']
def get_user_pid(uid):
'''
find user's pid by his uid
@param uid: the user ID to check permissions for
@type uid: int
@return: user's person id or -1 if has none
@rtype: int
'''
pid = dbapi.get_author_by_uid(uid)
if not pid:
return -1
return pid
def merge_is_allowed(primary_pid, pids_to_merge, is_admin):
'''
Check if merging is allowed by finding the number of profiles that are owned by user. Merging can be perform
only if at most one profile is connected to a user. Only admins can merge profile when 2 or more of them have claimed papers
@param profiles: all the profiles that are going to be merged including the primary profile
@type list
@return: returs if merge is allowed
@rtype: boolean
'''
try:
primary_orcid = dbapi.get_orcid_id_of_author(primary_pid)[0][0]
except IndexError:
primary_orcid = None
for pid in pids_to_merge:
has_uid = bool(dbapi.get_uid_of_author(pid))
if has_uid:
return False, pid
if primary_orcid:
try:
orcid = dbapi.get_orcid_id_of_author(pid)[0][0]
except IndexError:
orcid = None
if orcid and primary_orcid != orcid:
return False, pid
if not is_admin:
has_claimed_papers = bool(dbapi.get_claimed_papers_of_author(pid))
if has_claimed_papers:
return False, pid
return True, None
#def open_ticket_for_papers_of_merged_profiles(req, primary_profile, profiles):
# '''
# instead of actually merging the papers it opens a ticket for them to be merged
# '''
# records = dbapi.defaultdict(list)
#
# profiles.append(primary_profile)
# for pid in profiles:
# papers = get_papers_by_person_id(pid)
# if papers:
# for rec_info in papers:
# records[rec_info[0]] += [rec_info[1]]
#
# recs_to_merge = []
# for recid in records.keys():
# # if more than one with the same recid we append only the recid and we let the user to solve tha problem in ticket_review
# if len(records[recid]) > 1:
# recs_to_merge.append(recid)
# else:
# recs_to_merge.append(records[recid][0])
#
# add_tickets(req, primary_profile, recs_to_merge, 'assign')
def get_papers_of_merged_profiles(primary_profile, profiles):
'''
Get the papers of the merged profiles that can be merged
@param primary_profile: the Person id of the primary profile
@type primary_profile: int
@param profiles: a list of person ids
@type profiles: list
@return: bibrecrefs
@rtype: list
'''
records = dict()
# firstly the papers of the primary profile should be added as they should
# be preffered from similar papers of other profiles with the same level of claim
for pid in [primary_profile] + profiles:
papers = get_papers_by_person_id(pid)
for paper in papers:
# if paper is rejected skip
if paper[2] == -2:
continue
# if there is already a paper with the same record
# and the new one is claimed while the existing one is not
# keep only the claimed one
if not paper[0] in records:
records[paper[0]] = paper
elif records[paper[0]] and records[paper[0]][2] == 0 and paper[2] == 2 :
records[paper[0]] = paper
return [records[recid] for recid in records.keys()]
def get_uid_for_merged_profiles(persons_data):
'''
Get the uid of the merged profiles. It should be max 1
@param persons_data: data of the profiles
@type persons_data: dict
@return: uid tuple
@rtype: tuple
'''
for pid in persons_data.keys():
for data in persons_data[pid]:
if data[-1] == 'uid':
return data
return None
def get_data_union_for_merged_profiles(persons_data, new_profile_bibrecrefs):
'''
Get the union of all the data that exist in the given profiles but for the papers, the uid, the canonical ids and rt repeal tickets
@param persons_data: data of the profiles
@type persons_data: dict
@param new_profile_bibrecrefs: the bibrecrefs of the new profile
@type new_profile_bibrecrefs: list
@return: union of persons' data
@rtype: list
'''
new_profile_data = list()
# rt_new_counter will deal with the enumeration of rt_ticket in the merged profile
rt_new_counter = 1
rt_old_counter = -1
for pid in persons_data.keys():
for data in persons_data[pid]:
if data[-1].startswith("rt_repeal") and not data[0] in new_profile_bibrecrefs:
continue
elif data[-1] == 'uid':
continue
elif data[-1] == 'canonical_name':
continue
elif data[-1].startswith("rt_"):
if rt_old_counter != data[1]:
rt_old_counter = data[1]
rt_new_counter += 1
data = (data[0],rt_new_counter,data[2],data[3],data[4])
new_profile_data.append(data)
return list(set(new_profile_data))
def merge_profiles(primary_pid, pids_to_merge):
def merge_papers():
primary_recs = [rec[0] for rec in dbapi.get_papers_of_author(primary_pid)]
for pid in pids_to_merge:
papers_data = list(dbapi.get_all_paper_data_of_author(pid))
for paper_data in list(papers_data):
rec = paper_data[3]
if rec in primary_recs:
papers_data.remove(paper_data)
dbapi.transfer_papers_to_author(papers_data, primary_pid)
def merge_data():
primary_request_tickets = dbapi.get_request_tickets_for_author(primary_pid)
for pid in pids_to_merge:
author_data = list(dbapi.get_all_author_data_of_author(pid))
for data in list(author_data):
tag = data[1]
if tag in ['canonical_name', 'arxiv_papers']:
author_data.remove(data)
elif tag in ['request_tickets']:
author_data.remove(data)
request_tickets = dbapi.get_request_tickets_for_author(pid)
dbapi.remove_request_ticket_for_author(pid)
primary_request_tickets += request_tickets
dbapi.transfer_data_to_author(author_data, primary_pid)
dbapi.remove_request_ticket_for_author(primary_pid)
for request_ticket in primary_request_tickets:
try:
del request_ticket['tid']
except KeyError:
pass
dbapi.update_request_ticket_for_author(primary_pid, request_ticket)
merge_papers()
merge_data()
dbapi.remove_empty_authors()
def auto_claim_papers(req, pid, recids):
'''
finding the unclaimed recids and add them in the ticket
@param req: Apache request object
@type req: Apache request object
@param pid: the Person id
@type pid: int
@param recids: the records that need to be autoclaimed
@type recids: list
'''
session_bareinit(req)
# retrieve users existing papers
pid_bibrecs = set([i[0] for i in dbapi.get_all_personids_recs(pid, claimed_only=True)])
# retrieve the papers that need to be imported
missing_bibrecs = list(set(recids) - pid_bibrecs)
# store any users open ticket elsewhere until we have processed the autoclaimed tickets
store_users_open_tickets(req)
# add autoclaimed tickets to the session
add_tickets(req, pid, missing_bibrecs, 'assign')
def get_name_variants_list_from_remote_systems_names(remote_login_systems_info):
'''
return the names that a user has in the external systems
@param remote_logged_in_systems: contains all remote_logged_in_systems tha the user is logged in through
@type remote_logged_in_systems: dict
@return: name variants
@rtype: list
'''
name_variants = []
for system in remote_login_systems_info.keys():
try:
name = remote_login_systems_info[system]['name']
name_variants.append(name)
except KeyError:
pass
return list(set(name_variants))
def match_profile(req, recids, remote_login_systems_info):
'''
Find if a profile in inspire matches to the profile that the user has in arXiv
(judging from the papers the name etc)
@param req: Apache request object
@type req: Apache request object
@param recids: arXiv record ids
@type recids: list
@param remote_logged_in_systems: contains all remote_logged_in_systems tha the user is logged in through
@type remote_logged_in_systems: dict
@return: person id of the most compatible person
@rtype: int
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
most_compatible_person = pinfo['most_compatible_person']
if most_compatible_person != None:
return most_compatible_person
name_variants = get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
most_compatible_person = dbapi.find_most_compatible_person(recids, name_variants)
pinfo['most_compatible_person'] = most_compatible_person
return most_compatible_person
def get_profile_suggestion_info(req, pid, recids_in_arXiv):
'''
get info on the profile that we are suggesting to the user coming from an external system to login
@param req: Apache request object
@type req: Apache request object
@param pid: the profile's id
@type pid: int
@param recids_in_arXiv: recids from arxiv
@type recids_in_arXiv: list
@return: pid, if the claim was succesfull
@rtype: dict
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profile_suggestion_info = pinfo['profile_suggestion_info']
if profile_suggestion_info != None and pid == profile_suggestion_info['pid']:
return profile_suggestion_info
profile_suggestion_info = dict()
profile_suggestion_info['canonical_id'] = dbapi.get_canonical_name_of_author(pid)
name_variants = [element[0] for element in get_person_names_from_id(pid)]
name = most_relevant_name(name_variants)
profile_suggestion_info['name_string'] = "[No name available] "
profile_suggestion_info['num_of_arXiv_papers'] = len(recids_in_arXiv)
# find the number of papers that are both in recids and probable person's papers
profile_suggestion_info['num_of_recids_intersection'] = len(set(recids_in_arXiv) & set([bibrecref[0] for bibrecref in get_papers_by_person_id(pid)]))
if name != None:
profile_suggestion_info['name_string'] = name
if len(profile_suggestion_info['canonical_id']) > 0:
profile_suggestion_info['canonical_name_string'] = "(" + profile_suggestion_info['canonical_id'][0][0] + ")"
profile_suggestion_info['canonical_id'] = str(profile_suggestion_info['canonical_id'][0][0])
else:
profile_suggestion_info['canonical_name_string'] = "(" + str(pid) + ")"
profile_suggestion_info['canonical_id'] = str(pid)
profile_suggestion_info['pid'] = pid
pinfo['profile_suggestion_info'] = profile_suggestion_info
return profile_suggestion_info
def claim_profile(uid, pid):
'''
Try to claim the profile pid for the user uid
@param uid: the user ID
@type uid: int
@param pid: the profile's id
@type pid: int
@return: pid, if the claim was succesfull
@rtype: int, boolean
'''
return dbapi.assign_person_to_uid(uid, pid)
def external_user_can_perform_action(uid):
'''
Check for SSO user and if external claims will affect the
decision wether or not the user may use the Invenio claiming platform
@param uid: the user ID to check permissions for
@type uid: int
@return: is user allowed to perform actions?
@rtype: boolean
'''
# If no EXTERNAL_CLAIMED_RECORDS_KEY we bypass this check
if not bconfig.EXTERNAL_CLAIMED_RECORDS_KEY:
return True
uinfo = collect_user_info(uid)
keys = []
for k in bconfig.EXTERNAL_CLAIMED_RECORDS_KEY:
if k in uinfo:
keys.append(k)
full_key = False
for k in keys:
if uinfo[k]:
full_key = True
break
return full_key
def is_external_user(uid):
'''
Check for SSO user and if external claims will affect the
decision wether or not the user may use the Invenio claiming platform
@param uid: the user ID to check permissions for
@type uid: int
@return: is user allowed to perform actions?
@rtype: boolean
'''
# If no EXTERNAL_CLAIMED_RECORDS_KEY we bypass this check
if not bconfig.EXTERNAL_CLAIMED_RECORDS_KEY:
return False
uinfo = collect_user_info(uid)
keys = []
for k in bconfig.EXTERNAL_CLAIMED_RECORDS_KEY:
if k in uinfo:
keys.append(k)
full_key = False
for k in keys:
if uinfo[k]:
full_key = True
break
return full_key
def check_transaction_permissions(uid, bibref, pid, action):
'''
Check if the user can perform the given action on the given pid,bibrefrec pair.
return in: granted, denied, warning_granted, warning_denied
@param uid: The internal ID of a user
@type uid: int
@param bibref: the bibref pair to check permissions for
@type bibref: string
@param pid: the Person ID to check on
@type pid: int
@param action: the action that is to be performed
@type action: string
@return: granted, denied, warning_granted xor warning_denied
@rtype: string
'''
c_own = True
c_override = False
is_superadmin = isUserSuperAdmin({'uid': uid})
access_right = _resolve_maximum_acces_rights(uid)
bibref_status = dbapi.get_status_of_signature(bibref)
old_flag = bibref_status[0]
if old_flag == 2 or old_flag == -2:
if action in ['assign']:
new_flag = 2
elif action in ['reject']:
new_flag = -2
elif action in ['reset']:
new_flag = 0
c_override = True
if dbapi.get_author_by_uid(uid) != int(pid):
c_own = False
# if we cannot override an already touched bibref, no need to go on checking
if c_override:
if is_superadmin:
return 'warning_granted'
if access_right[1] < bibref_status[1]:
return "denied"
else:
if is_superadmin:
return 'granted'
# let's check if invenio is allowing us the action we want to perform
if c_own:
action = bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS
else:
action = bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS
auth = acc_authorize_action(uid, action)
if auth[0] != 0:
return "denied"
# now we know if claiming for ourselfs, we can ask for external ideas
if c_own:
action = 'claim_own_paper'
else:
action = 'claim_other_paper'
ext_permission = external_user_can_perform_action(uid)
# if we are here invenio is allowing the thing and we are not overwriting a
# user with higher privileges, if externals are ok we go on!
if ext_permission:
if not c_override:
return "granted"
else:
return "warning_granted"
return "denied"
def delete_request_ticket(pid, tid):
'''
Delete a request ticket associated to a person
@param pid: pid (int)
@param ticket: ticket id (int)
'''
dbapi.remove_request_ticket_for_author(pid, tid)
def delete_transaction_from_request_ticket(pid, tid, action, bibrefrec):
'''
Deletes a transaction from a ticket. If ticket empty, deletes it.
@param pid: pid
@param tid: ticket id
@param action: action
@param bibref: bibref
'''
try:
request_ticket = dbapi.get_validated_request_tickets_for_author(pid, tid)[0]
except IndexError:
return
for operation in list(request_ticket['operations']):
op_action, op_bibrefrec = operation
if op_action == action and op_bibrefrec == bibrefrec:
request_ticket['operations'].remove(operation)
if not request_ticket['operations']:
dbapi.remove_request_ticket_for_author(pid, tid)
else:
dbapi.update_request_ticket_for_author(pid, request_ticket, tid)
def create_request_ticket(userinfo, ticket):
'''
Creates a request ticket and sends an email to RT.
@param usernfo: dictionary of info about user
@param ticket: dictionary ticket
'''
udata = list()
mailcontent = list()
m = mailcontent.append
m("A user sent a change request through the web interface.")
m("User Information:")
for k, v in userinfo.iteritems():
udata.append([k, v])
if v:
m(" %s: %s" % (k, v))
m("\nOperations:")
tic = dict()
for op in ticket:
bibrefrec = op['bibref'] + ',' + str(op['rec'])
if not op['action'] in ['assign', 'reject', 'reset']:
return False
elif op['pid'] < 0:
return False
elif not is_valid_bibref(bibrefrec):
return False
# ignore reset operations
if op['action'] == 'reset':
continue
cname = get_person_redirect_link(op['pid'])
try:
tic[(op['pid'], cname)].append((op['action'], bibrefrec))
except KeyError:
tic[(op['pid'], cname)] = [(op['action'], bibrefrec),]
preposition = 'to' if op['action'] == 'assign' else 'from'
m(" %s %s %s %s" % (op['action'].title(), bibrefrec, preposition, cname))
m("\nLinks to all issued Person-based requests:\n")
for pid, cname in tic:
data = list()
for i in udata:
data.append(i)
data.append(['date', ctime()])
data.append(['operations', tic[(pid, cname)]])
dbapi.update_request_ticket_for_author(pid, dict(data))
m("%s/author/claim/%s?open_claim=True#tabTickets" % (CFG_SITE_URL, cname))
m("\nPlease remember that you have to be logged in "
"in order to see the ticket of a person.\n")
if ticket and tic and mailcontent:
sender = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
if bconfig.TICKET_SENDING_FROM_USER_EMAIL and userinfo['email']:
sender = userinfo['email']
send_email(sender,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
subject="[Author] Change Request",
content="\n".join(mailcontent))
return True
def create_request_message(userinfo, subj = None):
'''
Creates a request message
@param userinfo: dictionary of info about user
@type: dict
@param subj: the subject of the message
@param subj: string
'''
mailcontent = []
for info_type in userinfo:
mailcontent.append(info_type + ': ')
mailcontent.append(str(userinfo[info_type]) + '\n')
sender = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
if bconfig.TICKET_SENDING_FROM_USER_EMAIL and userinfo['email']:
sender = userinfo['email']
if not subj:
subj = "[Author] Help Request"
send_email(sender,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
subject=subj,
content="\n".join(mailcontent))
def send_user_commit_notification_email(userinfo, ticket):
'''
Sends commit notification email to RT system
'''
# send eMail to RT
mailcontent = []
m = mailcontent.append
m("A user committed a change through the web interface.")
m("User Information:")
for k, v in userinfo.iteritems():
if v:
m(" %s: %s" % (k, v))
m("\nChanges:\n")
for t in ticket:
m(" --- <start> --- \n")
for k, v in t.iteritems():
m(" %s: %s \n" % (str(k), str(v)))
if k == 'bibref':
try:
br = int(v.split(',')[1])
m(" Title: %s\n" % search_engine.get_fieldvalues(br, "245__a"))
except (TypeError, ValueError, IndexError):
pass
m(" --- <end> --- \n")
if ticket and mailcontent:
sender = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
send_email(sender,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
subject="[Author] NO ACTIONS NEEDED. Changes performed by SSO user.",
content="\n".join(mailcontent))
return True
def user_can_view_CMP(uid):
action = bconfig.CLAIMPAPER_VIEW_PID_UNIVERSE
auth = acc_authorize_action(uid, action)
if auth[0] == 0:
return True
else:
return False
def _resolve_maximum_acces_rights(uid):
'''
returns [max_role, lcul] to use in execute_action and check_transaction_permissions.
Defaults to ['guest',0] if user has no roles assigned.
Always returns the maximum privilege.
'''
roles = {bconfig.CLAIMPAPER_ADMIN_ROLE: acc_get_role_id(bconfig.CLAIMPAPER_ADMIN_ROLE),
bconfig.CLAIMPAPER_USER_ROLE: acc_get_role_id(bconfig.CLAIMPAPER_USER_ROLE)}
uroles = acc_get_user_roles(uid)
max_role = ['guest', 0]
for r in roles:
if roles[r] in uroles:
rright = bconfig.CMPROLESLCUL[r]
if rright >= max_role[1]:
max_role = [r, rright]
return max_role
def create_new_person(uid, uid_is_owner=False):
'''
Create a new person.
@param uid: User ID to attach to the person
@type uid: int
@param uid_is_owner: Is the uid provided owner of the new person?
@type uid_is_owner: bool
@return: the resulting person ID of the new person
@rtype: int
'''
pid = dbapi.create_new_author_by_uid(uid, uid_is_owner=uid_is_owner)
return pid
def execute_action(action, pid, bibref, uid, userinfo='', comment=''):
'''
Executes the action, setting the last user right according to uid
@param action: the action to perform
@type action: string
@param pid: the Person ID to perform the action on
@type pid: int
@param bibref: the bibref pair to perform the action for
@type bibref: string
@param uid: the internal user ID of the currently logged in user
@type uid: int
@return: list of a tuple: [(status, message), ] or None if something went wrong
@rtype: [(bool, str), ]
'''
pid = wash_integer_id(pid)
if not action in ['assign', 'reject', 'reset']:
return None
elif pid == bconfig.CREATE_NEW_PERSON:
pid = create_new_person(uid, uid_is_owner=False)
elif pid < 0:
return None
elif not is_valid_bibref(bibref):
return None
if userinfo.count('||'):
uid = userinfo.split('||')[0]
else:
uid = ''
user_level = _resolve_maximum_acces_rights(uid)[1]
res = None
if action in ['assign']:
dbapi.insert_user_log(userinfo, pid, 'assign', 'CMPUI_ticketcommit', bibref, comment, userid=uid)
res = dbapi.confirm_papers_to_author(pid, [bibref], user_level)
elif action in ['reject']:
dbapi.insert_user_log(userinfo, pid, 'reject', 'CMPUI_ticketcommit', bibref, comment, userid=uid)
res = dbapi.reject_papers_from_author(pid, [bibref], user_level)
elif action in ['reset']:
dbapi.insert_user_log(userinfo, pid, 'reset', 'CMPUI_ticketcommit', bibref, comment, userid=uid)
res = dbapi.reset_papers_of_author(pid, [bibref])
# This is the only point which modifies a person, so this can trigger the
# deletion of a cached page
webauthorapi.expire_all_cache_for_personid(pid)
return res
def sign_assertion(robotname, assertion):
'''
Sign an assertion for the export of IDs
@param robotname: name of the robot. E.g. 'arxivz'
@type robotname: string
@param assertion: JSONized object to sign
@type assertion: string
@return: The signature
@rtype: string
'''
robotname = ""
secr = ""
if not robotname:
return ""
robot = ExternalAuthRobot()
keys = load_robot_keys()
try:
secr = keys["Robot"][robotname]
except:
secr = ""
return robot.sign(secr, assertion)
def get_orcids_by_pid(pid):
orcids = dbapi.get_orcid_id_of_author(pid)
return tuple(str(x[0]) for x in orcids)
def add_orcid_to_pid(pid, orcid):
if orcid in get_orcids_by_pid(pid):
return
dbapi.add_orcid_id_to_author(pid, orcid)
webauthorapi.expire_all_cache_for_personid(pid)
def get_person_info_by_pid(pid):
'''
Collect person's info such as name variants, name and canonical_id
by his person id
@param uid: the person id of the user
@type uid: int
@return: person's info
@rtype: dict
'''
person_info = dict()
person_info['pid'] = pid
name_variants = [x for (x,y) in get_person_db_names_from_id(pid)]
person_info['name'] = most_relevant_name(name_variants)
person_info['canonical_name'] = get_canonical_id_from_person_id(pid)
return person_info
############################################
# Ticket Functions #
############################################
def add_tickets(req, pid, bibrefs, action):
'''
adding the missing bibrecs in the ticket
@param req: Apache request object
@type req: Apache request object
@param pid: the Person id
@type pid: int
@param bibrefs: the missing records that need to be autoclaimed
@type bibrefs: list
@param action: the action that is required to be performed on the tickets
@type action: string
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
# the user wanted to create a new person to resolve the tickets to it
if pid == bconfig.CREATE_NEW_PERSON:
uid = getUid(req)
pid = create_new_person(uid)
tempticket = []
for bibref in bibrefs:
tempticket.append({'pid': pid, 'bibref': bibref, 'action': action})
# check if ticket targets (bibref for pid) are already in ticket
for t in tempticket:
tempticket_is_valid_bibref = is_valid_bibref(t['bibref'])
should_append = True
for e in list(ticket):
ticket_is_valid_bibref = is_valid_bibref(e['bibref'])
# if they are the same leave ticket as it is and continue to the next tempticket
if e['bibref'] == t['bibref'] and e['pid'] == t['pid']:
ticket.remove(e)
break
# if we are comparing two different bibrefrecs with the same recids we remove the current bibrefrec and we add their recid
elif e['pid'] == t['pid'] and tempticket_is_valid_bibref and ticket_is_valid_bibref and t['bibref'].split(',')[1] == e['bibref'].split(',')[1]:
ticket.remove(e)
ticket.append({'pid': pid, 'bibref': t['bibref'].split(',')[1], 'action': action})
should_append = False
break
elif e['pid'] == t['pid'] and is_valid_bibref(e['bibref']) and str(t['bibref']) == e['bibref'].split(',')[1]:
should_append = False
break
elif e['pid'] == t['pid'] and is_valid_bibref(t['bibref']) and str(e['bibref']) == t['bibref'].split(',')[1]:
ticket.remove(e)
break
if should_append:
ticket.append(t)
session.dirty = True
#def manage_tickets(req, autoclaim_show_review, autoclaim):
# '''
# managing the tickets. This involves reviewing them, try to guess the correct one if a ticket is incomplete,
# give them to the user for a review, handle the results of that review, check if the permissions to commit a ticket are granted, commit the ticket if possible
#
# @param req: Apache request object
# @type req: Apache request object
#
# @param autoclaim_show_review: shows if the user pressed the button review auto assigned in his manage profile page
# @type autoclaim_show_review: boolean
#
# @param bibrefs: shows if we are autoassigning papers or not
# @type bibrefs: list
#
# '''
# session = get_session(req)
# pinfo = session["personinfo"]
# ticket = pinfo["ticket"]
#
# page_info = dict()
# # check if there is user review that needs to be handled
# reviews_to_handle = is_ticket_review_handling_required(req)
#
# if not reviews_to_handle:
# # check if the tickets need review
# is_required, incomplete_tickets = is_ticket_review_required(req)
#
# if is_required:
# # if review is required and we are not in the workflow that builds the autoassigned papers box of the manage profile page
# # then it returns to the user for review
# if not autoclaim or autoclaim_show_review:
# bibrefs_auto_assigned, bibrefs_to_confirm = ticket_review(req, incomplete_tickets)
# page_info['type'] = 'Submit Attribution'
# page_info['title'] = 'Submit Attribution Information'
# page_info['body_params'] = [bibrefs_auto_assigned, bibrefs_to_confirm]
# return page_info
# else:
# # tries to guess the incomplete tickets, move the still incomplete to their storage, and user can review them by clicking the button
# # of the autoassigned papers box in the manage profile page
# guess_signature(req, incomplete_tickets)
# failed_to_autoclaim_tickets = []
# for t in list(ticket):
# if 'incomplete' in t:
# failed_to_autoclaim_tickets.append(t)
# ticket.remove(t)
# store_incomplete_autoclaim_tickets(req, failed_to_autoclaim_tickets)
# session.dirty = True
# else:
# handle_ticket_review_results(req, autoclaim_show_review)
#
# for t in ticket:
# if 'incomplete' in t:
# assert False, "Wtf one ticket is incomplete " + str(pinfo)
# if ',' not in str(t['bibref']) or ':' not in str(t['bibref']):
# assert False, "Wtf one ticket is invalid " + str(pinfo)
# uid = getUid(req)
#
# for t in ticket:
# # TODO be carefull if an admin connects through arxiv
# t['status'] = check_transaction_permissions(uid,
# t['bibref'],
# t['pid'],
# t['action'])
# failed_to_autoclaim_tickets = []
# if autoclaim and not autoclaim_show_review:
# for t in ticket:
# if 'status' not in t or t['status'] != 'granted':
# failed_to_autoclaim_tickets.append(t)
# ticket.remove(t)
# store_incomplete_autoclaim_tickets(req, failed_to_autoclaim_tickets)
#
# session.dirty = True
#
# add_user_data_to_ticket(req)
#
# if not can_commit_ticket(req):
# mark_yours, mark_not_yours, mark_theirs, mark_not_theirs = confirm_valid_ticket(req)
# page_info['type'] = 'review actions'
# page_info['title'] = 'Please review your actions'
# page_info['body_params'] = [mark_yours, mark_not_yours, mark_theirs, mark_not_theirs]
# return page_info
#
# ticket_commit(req)
# page_info['type'] = 'dispatch end'
# return page_info
def confirm_valid_ticket(req):
'''
displays the user what can/cannot finally be done
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
ticket = [row for row in ticket if not "execution_result" in row]
upid = pinfo["upid"]
for tt in list(ticket):
if not 'bibref' in tt or not 'pid' in tt:
del(ticket[tt])
continue
tt['authorname_rec'] = dbapi.get_bibrefrec_name_string(tt['bibref'])
tt['person_name'] = get_most_frequent_name_from_pid(tt['pid'])
mark_yours = []
mark_not_yours = []
if upid >= 0:
mark_yours = [row for row in ticket
if (str(row["pid"]) == str(upid) and
row["action"] in ["to_other_person", "assign"])]
mark_not_yours = [row for row in ticket
if (str(row["pid"]) == str(upid) and
row["action"] in ["reject", "reset"])]
mark_theirs = [row for row in ticket
if ((not str(row["pid"]) == str(upid)) and
row["action"] in ["to_other_person", "assign"])]
mark_not_theirs = [row for row in ticket
if ((not str(row["pid"]) == str(upid)) and
row["action"] in ["reject", "reset"])]
session.dirty = True
return mark_yours, mark_not_yours, mark_theirs, mark_not_theirs
def guess_signature(req, incomplete_tickets):
'''
Tries to guess a bibrecref based on a recid and names of the person. It writes the fix directly in the session
@param req: apache request object
@type req: apache request object
@param incomplete_tickets: list of incomplete tickets
@type incomplete_tickets: list
'''
session = get_session(req)
pinfo = session["personinfo"]
tickets = pinfo["ticket"]
if 'arxiv_name' in pinfo:
arxiv_name = [pinfo['arxiv_name']]
else:
arxiv_name = None
for incomplete_ticket in incomplete_tickets:
# convert recid from string to int
recid = wash_integer_id(incomplete_ticket['bibref'])
if recid < 0:
# this doesn't look like a recid--discard!
tickets.remove(incomplete_ticket)
else:
pid = incomplete_ticket['pid']
possible_signatures_per_rec = get_possible_bibrefs_from_pid_bibrec(pid, [recid], additional_names=arxiv_name)
for [rec, possible_signatures] in possible_signatures_per_rec:
# if there is only one bibreceref candidate for the given recid
if len(possible_signatures) == 1:
# fix the incomplete ticket with the retrieved bibrecref
for ticket in list(tickets):
if incomplete_ticket['bibref'] == ticket['bibref'] and incomplete_ticket['pid'] == ticket['pid']:
ticket['bibref'] = possible_signatures[0][0]+','+str(rec)
ticket.pop('incomplete', True)
break
session.dirty = True
def ticket_review(req, needs_review):
'''
Tries to guess the full ticket if incomplete and when finished it shows all the tickets to the user to review them
@param req: apache request object
@type req: apache request object
@param needs_review: list of incomplete tickets
@type needs_review: list
'''
session = get_session(req)
pinfo = session["personinfo"]
tickets = pinfo["ticket"]
if 'arxiv_name' in pinfo:
arxiv_name = [pinfo['arxiv_name']]
else:
arxiv_name = None
bibrefs_auto_assigned = {}
bibrefs_to_confirm = {}
guess_signature(req, needs_review)
for ticket in list(tickets):
pid = ticket['pid']
person_name = get_most_frequent_name_from_pid(pid, allow_none=True)
if not person_name:
if arxiv_name:
person_name = ''.join(arxiv_name)
else:
person_name = " "
if 'incomplete' not in ticket:
recid = get_bibrec_from_bibrefrec(ticket['bibref'])
if recid == -1:
# No bibrefs on record--discard
tickets.remove(ticket)
continue
bibrefs_per_recid = get_bibrefs_from_bibrecs([recid])
for bibref in bibrefs_per_recid[0][1]:
if bibref[0] == ticket['bibref'].split(",")[0]:
most_possible_bibref = bibref
bibrefs_per_recid[0][1].remove(bibref)
sorted_bibrefs = most_possible_bibref + sorted(bibrefs_per_recid[0][1], key=lambda x: x[1])
if not pid in bibrefs_to_confirm:
bibrefs_to_confirm[pid] = {
'person_name': person_name,
'canonical_id': "TBA",
'bibrecs': {recid: sorted_bibrefs}}
else:
bibrefs_to_confirm[pid]['bibrecs'][recid] = sorted_bibrefs
else:
# convert recid from string to int
recid = wash_integer_id(ticket['bibref'])
bibrefs_per_recid = get_bibrefs_from_bibrecs([recid])
try:
name = bibrefs_per_recid[0][1]
sorted_bibrefs = sorted(name, key=lambda x: x[1])
except IndexError:
# No bibrefs on record--discard
tickets.remove(ticket)
continue
# and add it to bibrefs_to_confirm list
if not pid in bibrefs_to_confirm:
bibrefs_to_confirm[pid] = {
'person_name': person_name,
'canonical_id': "TBA",
'bibrecs': {recid: sorted_bibrefs}}
else:
bibrefs_to_confirm[pid]['bibrecs'][recid] = sorted_bibrefs
if bibrefs_to_confirm or bibrefs_auto_assigned:
pinfo["bibref_check_required"] = True
baa = deepcopy(bibrefs_auto_assigned)
btc = deepcopy(bibrefs_to_confirm)
for pid in baa:
for rid in baa[pid]['bibrecs']:
baa[pid]['bibrecs'][rid] = []
for pid in btc:
for rid in btc[pid]['bibrecs']:
btc[pid]['bibrecs'][rid] = []
pinfo["bibrefs_auto_assigned"] = baa
pinfo["bibrefs_to_confirm"] = btc
else:
pinfo["bibref_check_required"] = False
session.dirty = True
return bibrefs_auto_assigned, bibrefs_to_confirm
def add_user_data_to_ticket(req):
session = get_session(req)
uid = getUid(req)
userinfo = collect_user_info(uid)
pinfo = session["personinfo"]
upid = -1
user_first_name = ""
user_first_name_sys = False
user_last_name = ""
user_last_name_sys = False
user_email = ""
user_email_sys = False
if ("external_firstname" in userinfo
and userinfo["external_firstname"]):
user_first_name = userinfo["external_firstname"]
user_first_name_sys = True
elif "user_first_name" in pinfo and pinfo["user_first_name"]:
user_first_name = pinfo["user_first_name"]
if ("external_familyname" in userinfo
and userinfo["external_familyname"]):
user_last_name = userinfo["external_familyname"]
user_last_name_sys = True
elif "user_last_name" in pinfo and pinfo["user_last_name"]:
user_last_name = pinfo["user_last_name"]
if ("email" in userinfo
and not userinfo["email"] == "guest"):
user_email = userinfo["email"]
user_email_sys = True
elif "user_email" in pinfo and pinfo["user_email"]:
user_email = pinfo["user_email"]
pinfo["user_first_name"] = user_first_name
pinfo["user_first_name_sys"] = user_first_name_sys
pinfo["user_last_name"] = user_last_name
pinfo["user_last_name_sys"] = user_last_name_sys
pinfo["user_email"] = user_email
pinfo["user_email_sys"] = user_email_sys
# get pid by user id
if "upid" in pinfo and pinfo["upid"]:
upid = pinfo["upid"]
else:
upid = get_pid_from_uid(uid)
pinfo["upid"] = upid
session.dirty = True
def can_commit_ticket(req):
'''
checks if the tickets can be commited
@param req: apache request object
@type req: apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
ticket = [row for row in ticket if not "execution_result" in row]
skip_checkout_page = True
skip_checkout_page2 = True
if not (pinfo["user_first_name"] or pinfo["user_last_name"] or pinfo["user_email"]):
skip_checkout_page = False
if [row for row in ticket
if row["status"] in ["denied", "warning_granted",
"warning_denied"]]:
skip_checkout_page2 = False
if (not ticket or skip_checkout_page2
or ("checkout_confirmed" in pinfo
and pinfo["checkout_confirmed"]
and "checkout_faulty_fields" in pinfo
and not pinfo["checkout_faulty_fields"]
and skip_checkout_page)):
return True
return False
#def clean_ticket(req):
# '''
# Removes from a ticket the transactions with an execution_result flag
# '''
# session = get_session(req)
# pinfo = session["personinfo"]
# ticket = pinfo["ticket"]
# for t in list(ticket):
# if 'execution_result' in t:
# ticket.remove(t)
# session.dirty = True
def is_ticket_review_handling_required(req):
'''
checks if the results of ticket reviewing should be handled
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
# if check is needed
if ("bibref_check_required" in pinfo and pinfo["bibref_check_required"]
and "bibref_check_reviewed_bibrefs" in pinfo):
return True
return False
def handle_ticket_review_results(req, autoclaim):
'''
handle the results of ticket reviewing by either fixing tickets or removing them based on the review performed
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
# for every bibref in need of review
for rbibreft in pinfo["bibref_check_reviewed_bibrefs"]:
# if it's not in proper form skip it ( || delimiter is being added in bibauthorid_templates:tmpl_bibref_check function, coma delimiter
# are being added in bibauthorid_webinterface: action function )
# rbibreft ex: 'pid||bibrecref','8||100:4,45'
if not rbibreft.count("||") or not rbibreft.count(","):
continue
# get pid and bibrecref
rpid, rbibref = rbibreft.split("||")
# get recid out of bibrecref
rrecid = rbibref.split(",")[1]
# convert string pid to int
rpid = wash_integer_id(rpid)
# updating ticket status with fixed bibrefs
# and removing them from incomplete
for ticket_update in [row for row in ticket
if (str(row['bibref']) == str(rrecid) and
str(row['pid']) == str(rpid))]:
ticket_update["bibref"] = rbibref
if "incomplete" in ticket_update:
del(ticket_update["incomplete"])
session.dirty = True
# tickets that could't be fixed will be removed or if they were to be autoclaimed they will be stored elsewhere
if autoclaim:
failed_to_autoclaim_tickets = []
for ticket_remove in [row for row in ticket
if ('incomplete' in row)]:
failed_to_autoclaim_tickets.append(ticket_remove)
ticket.remove(ticket_remove)
if failed_to_autoclaim_tickets:
store_incomplete_autoclaim_tickets(req, failed_to_autoclaim_tickets)
else:
for ticket_remove in [row for row in ticket
if ('incomplete' in row)]:
ticket.remove(ticket_remove)
# delete also all bibrefs_auto_assigned, bibrefs_to_confirm and bibref_check_reviewed_bibrefs since the have been handled
if ("bibrefs_auto_assigned" in pinfo):
del(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo):
del(pinfo["bibrefs_to_confirm"])
del(pinfo["bibref_check_reviewed_bibrefs"])
# now there is no check required
pinfo["bibref_check_required"] = False
session.dirty = True
def is_ticket_review_required(req):
'''
checks if there are transactions inside ticket in need for review
@param req: Apache request object
@type req: Apache request object
@return: returns if review is required plus the list of the tickets to be reviewed
@rtype: tuple(boolean, list)
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
needs_review = []
# for every transaction in tickets check if there ara transaction that require review
for transaction in ticket:
if not is_valid_bibref(transaction['bibref']):
transaction['incomplete'] = True
needs_review.append(transaction)
session.dirty = True
if not needs_review:
return (False, [])
return (True, needs_review)
def restore_users_open_tickets(req):
'''
restores any users open ticket, that is in storage , in session as autoclaiming has finished
@param req: Apache request object
@type req: Apache request object
'''
session_bareinit(req)
session = get_session(req)
ticket = session['personinfo']['ticket']
temp_storage = session['personinfo']['users_open_tickets_storage']
for t in list(temp_storage):
ticket.append(t)
temp_storage.remove(t)
temp_storage = []
def store_users_open_tickets(req):
'''
stores any users open ticket elsewhere until we have processed the autoclaimed tickets
@param req: Apache request object
@type req: Apache request object
'''
session_bareinit(req)
session = get_session(req)
ticket = session['personinfo']['ticket']
temp_storage = session['personinfo']['users_open_tickets_storage']
for t in list(ticket):
temp_storage.append(t)
ticket.remove(t)
def store_incomplete_autoclaim_tickets(req, failed_to_autoclaim_tickets):
'''
stores incomplete autoclaim's tickets elsewhere waiting for user interference in order not to mess with new tickets
@param req: Apache request object
@type req: Apache request object
'''
session_bareinit(req)
session = get_session(req)
temp_storage = session['personinfo']['incomplete_autoclaimed_tickets_storage']
for incomplete_ticket in failed_to_autoclaim_tickets:
if incomplete_ticket not in temp_storage:
temp_storage.append(incomplete_ticket)
def restore_incomplete_autoclaim_tickets(req):
'''
restores any users open ticket, that is in storage , in session as autoclaiming has finished
@param req: Apache request object
@type req: Apache request object
'''
session_bareinit(req)
session = get_session(req)
ticket = session['personinfo']['ticket']
temp_storage = session['personinfo']['incomplete_autoclaimed_tickets_storage']
for t in list(temp_storage):
ticket.append(t)
temp_storage.remove(t)
def get_stored_incomplete_autoclaim_tickets(req):
'''
gets the records that its claim to the user profile was unsuccesfull
@param req: Apache request object
@type req: Apache request object
'''
session_bareinit(req)
session = get_session(req)
temp_storage = session['personinfo']['incomplete_autoclaimed_tickets_storage']
return temp_storage
def add_cname_to_hepname_record(cname, recid, uid=None):
"""
Schedule a BibUpload that will append the given personid to the specified record.
"""
rec = {}
record_add_field(rec, '001', controlfield_value=str(recid))
record_add_field(rec,
tag=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3],
ind1=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4],
ind2=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5],
subfields=[
(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6], str(cname)),
(CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6], 'BAI')])
tmp_file_fd, tmp_file_name = retry_mkstemp(suffix='.xml', prefix="bibauthorid-%s" % recid)
tmp_file = os.fdopen(tmp_file_fd, "w")
tmp_file.write(record_xml_output(rec))
tmp_file.close()
task_low_level_submission('bibupload', get_nickname(uid) or "", "-a", tmp_file_name, "-P5", "-N", "bibauthorid")
def connect_author_with_hepname(cname, hepname):
subject = "HepNames record match: %s %s" % (cname, hepname)
content = "Hello! Please connect the author profile %s " \
"with the HepNames record %s. Best regards" % (cname, hepname)
send_email(CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL,
CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL,
subject=subject,
content=content)
def connect_author_with_orcid(cname, orcid):
subject = "ORCiD record match: %s %s" % (cname, orcid)
content = "Hello! Please connect the author profile %s " \
"with the HepNames record %s. Best regards" % (cname, orcid)
send_email(CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL,
CFG_WEBAUTHORPROFILE_CFG_HEPNAMES_EMAIL,
subject=subject,
content=content)
############################################
# Exposed Ticket Functions #
############################################
def construct_operation(operation_parts, pinfo, uid, should_have_bibref=False):
pid = operation_parts['pid']
if pid == bconfig.CREATE_NEW_PERSON:
pid = create_new_person(uid)
action = operation_parts['action']
bibref, rec = split_bibrefrec(operation_parts['bibrefrec'])
bibrefs = None
if rec < 0 or pid < 0 or action not in ['assign', 'reject', 'reset']:
return None
if bibref is None:
bibref = _guess_bibref(pid, rec, pinfo)
if bibref is None:
bibrefs = dbapi.get_all_signatures_of_paper(rec)
# No bibref specified and no bibref candidates to select from.
if not bibref and not bibrefs:
send_email(CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
subject="[Author] No authors on record: %s" % rec,
content="No authors seem to exist on record %s" % rec)
return None
if should_have_bibref and not bibref:
return None
operation = {'pid': pid,
'action': action,
'rec': rec,
'bibref': bibref,
'has_bibref': bibref is not None,
'bibrefs': bibrefs,
'has_all_metadata': False }
return operation
def fill_out_userinfo(additional_info, uid, ip, ulevel, strict_check=True):
if strict_check:
if not additional_info['first_name'] or not additional_info['last_name'] or not email_valid_p(additional_info['email']):
return None
userinfo = {'uid-ip': '%s||%s' % (uid, ip),
'comments': additional_info['comments'],
'firstname': additional_info['first_name'],
'lastname': additional_info['last_name'],
'email': additional_info['email']}
if ulevel in ['guest', 'user'] and not userinfo['comments']:
userinfo['comments'] = 'No comments submitted.'
return userinfo
def get_ticket_status(ticket):
for op in ticket:
_fill_out_operation(op)
return ticket
def update_ticket_status(ticket):
clean_ticket(ticket)
def add_operation_to_ticket(op, ticket):
not_commited_operations = list(ticket)
clean_ticket(not_commited_operations)
for existing_op in not_commited_operations:
if existing_op['pid'] == op['pid'] and existing_op['rec'] == op['rec']:
# if the operation already exists don't do anything
if existing_op['bibref'] == op['bibref'] and existing_op['action'] == op['action']:
return False
# if an existing operation differs in the bibref or the action replace the new with the old one
ticket.remove(existing_op)
ticket.append(op)
return True
# the operation doesn't exist in the ticket so it is added
ticket.append(op)
return True
def modify_operation_from_ticket(updated_op, ticket):
not_commited_operations = list(ticket)
clean_ticket(not_commited_operations)
for existing_op in not_commited_operations:
if existing_op['pid'] == updated_op['pid'] and existing_op['rec'] == updated_op['rec']:
# Preserve bibrefs
updated_op['bibrefs'] = existing_op['bibrefs']
# if an existing operation differs in the bibref or the action replace the new with the old one
ticket.remove(existing_op)
ticket.append(updated_op)
return True
# the operation doesn't exist in the ticket
return False
def remove_operation_from_ticket(op, ticket):
not_commited_operations = list(ticket)
clean_ticket(not_commited_operations)
for existing_op in not_commited_operations:
if existing_op['pid'] == op['pid'] and existing_op['rec'] == op['rec']:
# if an existing operation differs in the bibref or the action, delete it
ticket.remove(existing_op)
return True
# the operation doesn't exist in the ticket
return False
def commit_operations_from_ticket(ticket, userinfo, uid, ulevel):
incomplete_operations = list()
for op in list(ticket):
if not op['has_bibref']:
ticket.remove(op)
incomplete_operations.append(op)
for op in ticket:
bibrefrec = op['bibref'] + ',' + str(op['rec'])
op['status'] = _check_operation_permission(uid, bibrefrec, op['pid'], op['action'])
_commit_ticket(ticket, userinfo, uid, ulevel)
ticket += incomplete_operations
def abort_ticket(ticket, delete_ticket=True):
if delete_ticket:
for op in list(ticket):
ticket.remove(op)
def clean_ticket(ticket):
for op in list(ticket):
if 'execution_result' in op:
ticket.remove(op)
############################################
# Not Exposed Ticket Functions #
############################################
def split_bibrefrec(bibrefrec):
if is_valid_bibref(bibrefrec):
bibref, rec = bibrefrec.split(',')
rec = int(rec)
else:
bibref = None
rec = int(bibrefrec)
return bibref, rec
def _guess_bibref(pid, rec, pinfo):
try:
arxiv_names = [pinfo['arxiv_name']]
except KeyError:
arxiv_names = list()
_, possible_signatures = get_possible_bibrefs_from_pid_bibrec(pid, [rec], additional_names=arxiv_names)[0]
if len(possible_signatures) == 1:
return possible_signatures[0][0]
return None
def _fill_out_operation(op):
if not op['has_all_metadata'] and not 'execution_result' in op:
op['rec_title'] = dbapi.get_title_of_paper(op['rec'])
try:
op['cname'] = dbapi.get_canonical_name_of_author(op['pid'])[0][0]
except IndexError:
op['cname'] = None
op['has_all_metadata'] = True
def _check_operation_permission(uid, bibrefrec, pid, action):
# user is superadmin so transaction permission is granted
is_superadmin = isUserSuperAdmin({'uid': uid})
if is_superadmin:
return 'granted'
owner_of_paper = False
if pid == dbapi.get_author_by_uid(uid):
owner_of_paper = True
if owner_of_paper:
action = bconfig.CLAIMPAPER_CLAIM_OWN_PAPERS
else:
action = bconfig.CLAIMPAPER_CLAIM_OTHERS_PAPERS
auth, _ = acc_authorize_action(uid, action)
if auth != 0:
return 'denied'
old_flag, old_lcul = dbapi.get_status_of_signature(bibrefrec)
override_claim = False
if old_flag in [2, -2]:
override_claim = True
if not override_claim:
return 'granted'
access_right = _resolve_maximum_acces_rights(uid)
if override_claim and access_right[1] >= old_lcul:
return 'granted'
return 'denied'
def _commit_ticket(ticket, userinfo, uid, ulevel):
def commit_ticket_guest(ticket, userinfo, uid, modified_pids):
create_request_ticket(userinfo, ticket)
for op in ticket:
op['execution_result'] = {'success': True, 'operation': 'ticketized'}
def commit_ticket_user(ticket, userinfo, uid, modified_pids):
ok_ops = list()
for op in list(ticket):
if op['status'] == 'granted':
bibrefrec = op['bibref'] + ',' + str(op['rec'])
op['execution_result'] = _execute_operation(op['action'], op['pid'], bibrefrec, uid, userinfo['uid-ip'], str(userinfo))
# This is the only point which modifies a person,
# so this can trigger the deletion of a cached page.
modified_pids.add(op['pid'])
ok_ops.append(op)
ticket.remove(op)
if ticket:
create_request_ticket(userinfo, ticket)
if CFG_INSPIRE_SITE and ok_ops:
send_user_commit_notification_email(userinfo, ok_ops)
for op in ticket:
op['execution_result'] = {'success': True, 'operation': 'ticketized'}
ticket += ok_ops
def commit_ticket_admin(ticket, userinfo, uid, modified_pids):
for op in ticket:
bibrefrec = op['bibref'] + ',' + str(op['rec'])
op['execution_result'] = _execute_operation(op['action'], op['pid'], bibrefrec, uid, userinfo['uid-ip'], str(userinfo))
# This is the only point which modifies a person,
# so this can trigger the deletion of a cached page.
modified_pids.add(op['pid'])
commit = {'guest': commit_ticket_guest,
'user': commit_ticket_user,
'admin': commit_ticket_admin}
modified_pids = set()
not_already_executed_ops = [t for t in ticket if 'execution_result' not in t]
commit[ulevel](not_already_executed_ops, userinfo, uid, modified_pids)
for pid in modified_pids:
webauthorapi.expire_all_cache_for_personid(pid)
def _execute_operation(action, pid, bibrefrec, uid, userinfo='', comment=''):
res = None
user_level = _resolve_maximum_acces_rights(uid)[1]
if action == 'assign':
dbapi.insert_user_log(userinfo, pid, 'assign', 'CMPUI_ticketcommit', bibrefrec, comment, userid=uid)
res = dbapi.confirm_papers_to_author(pid, [bibrefrec], user_level)[0]
elif action == 'reject':
dbapi.insert_user_log(userinfo, pid, 'reject', 'CMPUI_ticketcommit', bibrefrec, comment, userid=uid)
res = dbapi.reject_papers_from_author(pid, [bibrefrec], user_level)[0]
elif action == 'reset':
dbapi.insert_user_log(userinfo, pid, 'reset', 'CMPUI_ticketcommit', bibrefrec, comment, userid=uid)
res = dbapi.confirm_papers_to_author(pid, [bibrefrec], user_level)[0]
res = dbapi.reset_papers_of_author(pid, [bibrefrec])[0]
return res
############################################
# Exposed Autoclaim-relevant Functions #
############################################
def get_login_info(uid, params):
login_info = {'logged_in_to_remote_systems': list(),
'uid': uid,
'logged_in': uid != 0}
for system in CFG_BIBAUTHORID_ENABLED_REMOTE_LOGIN_SYSTEMS:
if IS_LOGGED_IN_THROUGH[system](params[system]):
login_info['logged_in_to_remote_systems'].append(system)
return login_info
def get_papers_from_remote_systems(remote_systems, params, external_pubs_association):
pubs = list()
for system in remote_systems:
pubs += GET_PUBS_FROM_REMOTE_SYSTEM[system](params[system])
papers_from_remote_systems = _get_current_system_related_papers(set(pubs), external_pubs_association)
return papers_from_remote_systems
################################################
# Not Exposed Autoclaim-relevant Functions #
################################################
def _is_logged_in_through_arxiv(user_info):
#TODO: ask Kaplun more accurate way to discover if we are SSOed through arxiv
#WARNING: this assumes that any user logged in and which have an email was logged in through arXiv
if user_info and 'email' in user_info and user_info['email']:
return True
return False
def _is_logged_in_through_orcid(orcid_info):
return orcid_info['has_orcid_id'] and orcid_info['import_pubs']
def _get_pubs_from_arxiv(user_info):
pubs_from_arxiv = list()
if 'external_arxivids' in user_info and user_info['external_arxivids']:
pubs_from_arxiv = user_info['external_arxivids'].split(';')
return pubs_from_arxiv
def _get_pubs_from_orcid(orcid_info):
pubs_from_orcid = list()
if 'imported_pubs' in orcid_info and orcid_info['imported_pubs']:
for doi in orcid_info['imported_pubs']:
pubs_from_orcid.append(doi)
return pubs_from_orcid
def _get_current_system_related_papers(pubs, external_pubs_association):
papers = set()
for pub in pubs:
id_type = is_arxiv_id_or_doi(pub)
try:
recid = external_pubs_association[(id_type, pub)]
papers.add(recid)
except KeyError:
recids = perform_request_search(p=pub, f=bconfig.CFG_BIBAUTHORID_REMOTE_LOGIN_SYSTEMS_IDENTIFIERS[id_type], m1='e', cc='HEP')
if len(recids) == 1:
recid = recids[0]
papers.add(recid)
external_pubs_association[(id_type, pub)] = recid
return papers
IS_LOGGED_IN_THROUGH = {'arXiv': _is_logged_in_through_arxiv, 'orcid': _is_logged_in_through_orcid}
GET_PUBS_FROM_REMOTE_SYSTEM = {'arXiv': _get_pubs_from_arxiv, 'orcid': _get_pubs_from_orcid}
############################################
# Visit diary Functions #
############################################
def history_log_visit(req, page, pid=None, params=None):
"""
logs in the session the page that a user visited to use it when he needs to redirect
@param page: string (claim, manage_profile, profile, search)
@param parameters: string (?param=aoeuaoeu¶m2=blabla)
"""
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
my_diary = pinfo['visit_diary']
my_diary[page].append({'page':page, 'pid':pid, 'params':params, 'timestamp':time()})
if len(my_diary[page]) > pinfo['diary_size_per_category']:
my_diary[page].pop(0)
session.dirty = True
def _get_sorted_history(visit_diary, limit_to_page=None):
history = list()
if not limit_to_page:
history = visit_diary.values()
else:
for page in limit_to_page:
history += visit_diary[page]
history = list(chain(*visit_diary.values()))
history = sorted(history, key=lambda x: x['timestamp'], reverse=True)
return history
def history_get_last_visited_url(visit_diary, limit_to_page=None, just_page=False):
'''
getting a redirect link according to the last page visit of the user.
The limit to page shortens the list of page canditates
@param req: Apache request object
@type req: Apache request object
@param limit_to_page: By giving the subset of pages intrested in redirecting it shortens the list of page canditates
@type limit_to_page: list of strings
@return: redirect link
@rtype: string
'''
history = _get_sorted_history(visit_diary, limit_to_page)
try:
history = history[0]
except IndexError:
return ''
if just_page:
return history['page']
link = [CFG_SITE_URL+'/author/', history['page']]
if history['pid']:
link.append('/'+str(get_canonical_id_from_person_id(history['pid'])))
if history['params']:
link.append(history['params'])
return ''.join(link)
def history_get_last_visited_pid(visit_diary, limit_to_page=None):
history = _get_sorted_history(visit_diary, limit_to_page)
for visit in history:
if visit['pid']:
return visit['pid']
def set_marked_visit_link(req, page, pid = None, params = None):
'''
store a marked redirect link for redirect purpose.
@param req: Apache request object
@type req: Apache request object
@param page: the page to redirect
@type page: string
@param pid: person id
@type pid: int
@param params: url parameters if any of the following format: (?param1_name=param1¶m2_name=param2)
@type: string
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not page:
pinfo['marked_visit'] = None
else:
link = [CFG_SITE_URL+'/author/', page]
if pid:
link.append('/'+str(get_canonical_id_from_person_id(pid)))
if params:
link.append(params)
pinfo['marked_visit'] = ''.join(link)
session.dirty = True
def get_marked_visit_link(req):
'''
getting a marked redirect link if stored there.
Links to hopage if not
@param req: Apache request object
@type req: Apache request object
@return: redirect link
@rtype: string
'''
session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
return pinfo['marked_visit']
def reset_marked_visit_link(req):
'''
empty the marked redirect link.
@param req: Apache request object
@type req: Apache request object
'''
set_marked_visit_link(req, None)
def get_fallback_redirect_link(req):
'''
getting a redirect link if there is no other info at all.
Links to manage profile of the user if logged in.
Links to hopage if not
@param req: Apache request object
@type req: Apache request object
@return: redirect link
@rtype: string
'''
uid = getUid(req)
pid = get_pid_from_uid(uid)
if uid <= 0 and pid < 0:
return '%s' % (CFG_SITE_URL,)
return '%s/author/manage_profile/%s' % (CFG_SITE_URL, get_canonical_id_from_person_id(pid))
REMOTE_LOGIN_SYSTEMS_FUNCTIONS = {'arXiv': get_arxiv_info, 'orcid': get_orcid_info}
REMOTE_LOGIN_SYSTEMS_GET_RECIDS_FUNCTIONS = {'arXiv': get_ids_from_arxiv, 'orcid': get_ids_from_orcid }
|
kaplun/ops
|
modules/bibauthorid/lib/bibauthorid_webapi.py
|
Python
|
gpl-2.0
| 111,884
|
[
"VisIt"
] |
8665d2a71498e2a169709127f3307d69d6bb1e9ede0698d0ac00421261982244
|
# -*- encoding: utf-8
from sqlalchemy import Column
from sqlalchemy import engine_from_config
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.dialects.mssql import adodbapi
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql import pymssql
from sqlalchemy.dialects.mssql import pyodbc
from sqlalchemy.engine import url
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warnings
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql://mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;Trusted_Connection=Yes"], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql:///?dsn=mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;Trusted_Connection=Yes"], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql://username:password@mydsn")
connection = dialect.create_connect_args(u)
eq_([["dsn=mydsn;UID=username;PWD=password"], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://username:password@mydsn/?LANGUAGE=us_" "english&foo=bar"
)
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_hostname(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://username:password@hostspec/database?driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_host_no_driver(self):
dialect = pyodbc.dialect()
u = url.make_url("mssql://username:password@hostspec/database")
def go():
return dialect.create_connect_args(u)
connection = assert_warnings(
go,
[
"No driver name specified; this is expected by "
"PyODBC when using DSN-less connections"
],
)
eq_(
[
[
"Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://username:password@hostspec:12345/data"
"base?driver=SQL Server"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec,12345;Database=datab"
"ase;UID=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://username:password@hostspec/database?p"
"ort=12345&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password;port=12345"
],
{},
],
connection,
)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://username:password@hostspec/database?L"
"ANGUAGE=us_english&foo=bar&driver=SQL+Server"
)
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(
connection[0][0]
in (
"DRIVER={SQL Server};Server=hostspec;Database=database;"
"UID=username;PWD=password;foo=bar;LANGUAGE=us_english",
"DRIVER={SQL Server};Server=hostspec;Database=database;UID="
"username;PWD=password;LANGUAGE=us_english;foo=bar",
),
True,
)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server"
"%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase"
"%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase"
"%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[["dsn=mydsn;Database=database;UID=username;PWD=password"], {}],
connection,
)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = url.make_url(
"mssql://userdiff:passdiff@localhost/dbdiff?od"
"bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer"
"%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse"
"rname%3BPWD%3Dpassword"
)
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={SQL Server};Server=hostspec;Database=database;UI"
"D=username;PWD=password"
],
{},
],
connection,
)
def test_pyodbc_token_injection(self):
token1 = "someuser%3BPORT%3D50001"
token2 = "somepw%3BPORT%3D50001"
token3 = "somehost%3BPORT%3D50001"
token4 = "somedb%3BPORT%3D50001"
u = url.make_url(
"mssql+pyodbc://%s:%s@%s/%s?driver=foob"
% (token1, token2, token3, token4)
)
dialect = pyodbc.dialect()
connection = dialect.create_connect_args(u)
eq_(
[
[
"DRIVER={foob};Server=somehost%3BPORT%3D50001;"
"Database=somedb%3BPORT%3D50001;UID='someuser;PORT=50001';"
"PWD='somepw;PORT=50001'"
],
{},
],
connection,
)
def test_adodbapi_token_injection(self):
token1 = "someuser%3BPORT%3D50001"
token2 = "somepw%3BPORT%3D50001"
token3 = "somehost%3BPORT%3D50001"
token4 = "someport%3BPORT%3D50001"
# this URL format is all wrong
u = url.make_url(
"mssql+adodbapi://@/?user=%s&password=%s&host=%s&port=%s"
% (token1, token2, token3, token4)
)
dialect = adodbapi.dialect()
connection = dialect.create_connect_args(u)
eq_(
[
[
"Provider=SQLOLEDB;"
"Data Source='somehost;PORT=50001', 'someport;PORT=50001';"
"Initial Catalog=None;User Id='someuser;PORT=50001';"
"Password='somepw;PORT=50001'"
],
{},
],
connection,
)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = url.make_url("mssql+pymssql://scott:tiger@somehost/test")
connection = dialect.create_connect_args(u)
eq_(
[
[],
{
"host": "somehost",
"password": "tiger",
"user": "scott",
"database": "test",
},
],
connection,
)
u = url.make_url("mssql+pymssql://scott:tiger@somehost:5000/test")
connection = dialect.create_connect_args(u)
eq_(
[
[],
{
"host": "somehost:5000",
"password": "tiger",
"user": "scott",
"database": "test",
},
],
connection,
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003",
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed",
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
def test_pyodbc_disconnect(self):
dialect = pyodbc.dialect()
class MockDBAPIError(Exception):
pass
class MockProgrammingError(MockDBAPIError):
pass
dialect.dbapi = Mock(
Error=MockDBAPIError, ProgrammingError=MockProgrammingError
)
for error in [
MockDBAPIError("[%s] some pyodbc message" % code)
for code in [
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
]
] + [
MockProgrammingError(message)
for message in [
"(some pyodbc stuff) The cursor's connection has been closed.",
"(some pyodbc stuff) Attempt to use a closed connection.",
]
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(
dialect.is_disconnect(
MockProgrammingError("not an error"), None, None
),
False,
)
@testing.requires.mssql_freetds
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(
exc.SAWarning, "Unrecognized server version info", engine.connect
)
class EngineFromConfigTest(fixtures.TestBase):
def test_legacy_schema_flag(self):
cfg = {
"sqlalchemy.url": "mssql://foodsn",
"sqlalchemy.legacy_schema_aliasing": "false",
}
e = engine_from_config(
cfg, module=Mock(version="MS SQL Server 11.0.92")
)
eq_(e.dialect.legacy_schema_aliasing, False)
class FastExecutemanyTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
__requires__ = ("pyodbc_fast_executemany",)
@testing.provide_metadata
def test_flag_on(self):
t = Table(
"t",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
t.create()
eng = engines.testing_engine(options={"fast_executemany": True})
@event.listens_for(eng, "after_cursor_execute")
def after_cursor_execute(
conn, cursor, statement, parameters, context, executemany
):
if executemany:
assert cursor.fast_executemany
with eng.connect() as conn:
conn.execute(
t.insert(),
[{"id": i, "data": "data_%d" % i} for i in range(100)],
)
conn.execute(t.insert(), {"id": 200, "data": "data_200"})
class VersionDetectionTest(fixtures.TestBase):
@testing.fixture
def mock_conn_scalar(self):
return lambda text: Mock(
exec_driver_sql=Mock(
return_value=Mock(scalar=Mock(return_value=text))
)
)
def test_pymssql_version(self, mock_conn_scalar):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
]:
conn = mock_conn_scalar(vers)
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_productversion(self, mock_conn_scalar):
dialect = pyodbc.MSDialect_pyodbc()
conn = mock_conn_scalar("11.0.9216.62")
eq_(dialect._get_server_version_info(conn), (11, 0, 9216, 62))
def test_pyodbc_version_fallback(self):
dialect = pyodbc.MSDialect_pyodbc()
dialect.dbapi = Mock()
for vers, expected in [
("11.0.9216.62", (11, 0, 9216, 62)),
("notsqlserver.11.foo.0.9216.BAR.62", (11, 0, 9216, 62)),
("Not SQL Server Version 10.5", (5,)),
]:
conn = Mock(
exec_driver_sql=Mock(
return_value=Mock(
scalar=Mock(
side_effect=exc.DBAPIError("stmt", "params", None)
)
)
),
connection=Mock(getinfo=Mock(return_value=vers)),
)
eq_(dialect._get_server_version_info(conn), expected)
class RealIsolationLevelTest(fixtures.TestBase):
__only_on__ = "mssql"
__backend__ = True
@testing.provide_metadata
def test_isolation_level(self):
Table("test", self.metadata, Column("id", Integer)).create(
checkfirst=True
)
with testing.db.connect() as c:
default = testing.db.dialect.get_isolation_level(c.connection)
values = [
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"SERIALIZABLE",
"SNAPSHOT",
]
for value in values:
with testing.db.connect() as c:
c.execution_options(isolation_level=value)
c.exec_driver_sql("SELECT TOP 10 * FROM test")
eq_(
testing.db.dialect.get_isolation_level(c.connection), value
)
with testing.db.connect() as c:
eq_(testing.db.dialect.get_isolation_level(c.connection), default)
class IsolationLevelDetectTest(fixtures.TestBase):
def _fixture(self, view):
class Error(Exception):
pass
dialect = pyodbc.MSDialect_pyodbc()
dialect.dbapi = Mock(Error=Error)
dialect.server_version_info = base.MS_2012_VERSION
result = []
def fail_on_exec(stmt,):
if view is not None and view in stmt:
result.append(("SERIALIZABLE",))
else:
raise Error("that didn't work")
connection = Mock(
cursor=Mock(
return_value=Mock(
execute=fail_on_exec, fetchone=lambda: result[0]
)
)
)
return dialect, connection
def test_dm_pdw_nodes(self):
dialect, connection = self._fixture("dm_pdw_nodes_exec_sessions")
eq_(dialect.get_isolation_level(connection), "SERIALIZABLE")
def test_exec_sessions(self):
dialect, connection = self._fixture("exec_sessions")
eq_(dialect.get_isolation_level(connection), "SERIALIZABLE")
def test_not_supported(self):
dialect, connection = self._fixture(None)
with expect_warnings("Could not fetch transaction isolation level"):
assert_raises_message(
NotImplementedError,
"Can't fetch isolation",
dialect.get_isolation_level,
connection,
)
|
graingert/sqlalchemy
|
test/dialect/mssql/test_engine.py
|
Python
|
mit
| 16,955
|
[
"ASE"
] |
8e88e252b1658a8c0e2b1f4717625fa2451bc0152944655d68790275c1faf243
|
# (c) 2017, Brian Coca
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: pickle
short_description: Pickle formatted files.
description:
- This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
version_added: "2.3"
author: Brian Coca (@bcoca)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
'''
try:
import cPickle as pickle
except ImportError:
import pickle
from ansible.module_utils.six import PY3
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
def _load(self, filepath):
# Pickle is a binary format
with open(filepath, 'rb') as f:
if PY3:
return pickle.load(f, encoding='bytes')
else:
return pickle.load(f)
def _dump(self, value, filepath):
with open(filepath, 'wb') as f:
# Use pickle protocol 2 which is compatible with Python 2.3+.
pickle.dump(value, f, protocol=2)
|
alxgu/ansible
|
lib/ansible/plugins/cache/pickle.py
|
Python
|
gpl-3.0
| 1,997
|
[
"Brian"
] |
dde13d46f249b7bdbc8e34e1eaceebb9d095c16906c4e09b08ac39a2b7668363
|
#!/usr/bin/env python
import radical.utils as ru
#import radical.analytics as ra
import radical.entk as re
from radical.entk import Pipeline, Stage, Task, AppManager
import os
import tarfile
import writeInputs
import time
import git
#os.environ['RADICAL_SAGA_VERBOSE'] = 'INFO'
os.environ['RADICAL_ENTK_VERBOSE'] = 'INFO'
os.environ['RP_ENABLE_OLD_DEFINES'] = 'True'
os.environ['SAGA_PTY_SSH_TIMEOUT'] = '2000'
#os.environ['RADICAL_VERBOSE'] = 'INFO'
replicas = 4
replica_cores = 1
min_temp = 100
max_temp = 200
timesteps = 500
basename = 'ace-ala'
cycle = 0
md_executable = '/home/scm177/mantel/AMBER/amber14/bin/sander'
SYNCHRONICITY = 0.5
wait_ratio = 0
max_waiting_list = 2
global waiting_replicas
waiting_replicas = []
min_completed_cycles = 3
replica_cycles = [0]*replicas
wait_count = 0
def setup_replicas(replicas, min_temp, max_temp, timesteps, basename):
writeInputs.writeInputs(max_temp=max_temp, min_temp=min_temp, replicas=replicas, timesteps=timesteps, basename=basename)
tar = tarfile.open("input_files.tar", "w")
for name in [basename + ".prmtop", basename + ".inpcrd", basename + ".mdin"]:
tar.add(name)
for r in range(replicas):
tar.add('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
tar.close()
for r in range(replicas):
os.remove('mdin-{replica}-{cycle}'.format(replica=r, cycle=0))
setup_p = Pipeline()
setup_p.name = 'untarPipe'
repo = git.Repo('.', search_parent_directories=True)
aux_function_path = repo.working_tree_dir
untar_stg = Stage()
untar_stg.name = 'untarStg'
#Untar Task
untar_tsk = Task()
untar_tsk.name = 'untarTsk'
untar_tsk.executable = ['python']
untar_tsk.upload_input_data = ['untar_input_files.py', 'input_files.tar']
untar_tsk.arguments = ['untar_input_files.py', 'input_files.tar']
untar_tsk.cpu_reqs = 1
untar_tsk.post_exec = []
untar_stg.add_tasks(untar_tsk)
setup_p.add_stages(untar_stg)
global replica_sandbox
replica_sandbox='$Pipeline_%s_Stage_%s_Task_%s'%(setup_p.name, untar_stg.name, untar_tsk.name)
print replica_sandbox
return setup_p
####_----------------------------------------------------------init replicas
class Replica(object):
def __init__(self):
self.state_history = []
self.cycle = 0 #initial cycle
def replica_pipeline(self, rid, cycle, replica_cores, md_executable, timesteps, replica_sandbox):
def add_md_stg(rid,cycle):
#md stg here
print self.cycle
md_tsk = Task()
md_stg = Stage()
md_tsk.name = 'mdtsk-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)
md_tsk.link_input_data += ['%s/inpcrd > inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle) %replica_sandbox,
'%s/prmtop' %replica_sandbox,
'%s/mdin-{replica}-{cycle} > mdin'.format(replica=rid, cycle=self.cycle) %replica_sandbox ]
md_tsk.arguments = ['-O',
'-i', 'mdin',
'-p', 'prmtop',
'-c', 'inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle),
'-o', 'out',
'-x', 'mdcrd',
'-r', '%s/inpcrd-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle+1) %replica_sandbox,
'-inf', '%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle) %replica_sandbox]
md_tsk.executable = ['/home/scm177/mantel/AMBER/amber14/bin/sander']
md_tsk.cpu_reqs = {
'processes': replica_cores,
'process_type': '',
'threads_per_process': 1,
'thread_type': None
}
md_tsk.pre_exec = ['export dummy_variable=19', 'echo $SHARED']
md_stg.add_tasks(md_tsk)
md_stg.post_exec = {
'condition': post_md,
'on_true': start_ex,
'on_false': suspend_replica
}
return md_stg
def add_ex_stg(rid, cycle):
#ex stg here
ex_tsk = Task()
ex_stg = Stage()
ex_tsk.name = 'extsk-{replica}-{cycle}'.format(replica=rid, cycle=cycle)
for rid in range(len(waiting_replicas)):
ex_tsk.link_input_data += ['%s/mdinfo-{replica}-{cycle}'.format(replica=rid, cycle=self.cycle)%replica_sandbox]
ex_tsk.arguments = ['t_ex_gibbs.py', len(waiting_replicas)] #This needs to be fixed
ex_tsk.executable = ['python']
ex_tsk.cpu_reqs = {
'processes': 1,
'process_type': '',
'threads_per_process': 1,
'thread_type': None
}
ex_tsk.pre_exec = ['export dummy_variable=19']
ex_stg.add_tasks(ex_tsk)
ex_stg.post_exec = {
'condition': post_ex,
'on_true': terminate_replicas,
'on_false': continue_md
}
return ex_stg
def post_md():
global replica_cycles, ex_pipeline, max_waiting_list, min_completed_cycles
print replica_cycles, rid
self.cycle += 1
replica_cycles[rid] += 1
print replica_cycles
waiting_replicas.append(rid)
if len(waiting_replicas) < max_waiting_list:
return False
return True
def suspend_replica():
p_replica.suspend()
def start_ex():
ex_stg = add_ex_stg(rid, cycle=self.cycle)
p_replica.add_stages(ex_stg)
def post_ex():
if cycle > min_completed_cycles:
return True
return False
def terminate_replicas():
#Resume all replicas in list without adding stages
for rid in waiting_replicas:
replica_pipelines[rid].resume()
print "DONE"
def continue_md():
# This needs to resume replica_pipelines[rid] for all rid's in wait list
print "continuing replicas"
global waiting_replicas
for rid in waiting_replicas:
try:
md_stg = add_md_stg(rid, cycle)
replica_pipelines[rid].add_stages(md_stg)
if replica_pipelines[rid] is rid:
pass
else:
replica_pipelines[rid].resume() # This is throwing an error: cannot resume itself since it is not suspended.
# Since the pipeline that is triggering this choice is NOT suspended,
# pipeline.resume() fails. This seems to be happening on ALL pipelines somehow.
except:
print "replica is not suspended, cannot resume"
waiting_replicas = []
p_replica = Pipeline()
p_replica.name = 'p_{rid}'.format(rid=rid)
md_stg = add_md_stg(rid, cycle)
p_replica.add_stages(md_stg)
return p_replica
system = setup_replicas(replicas, min_temp, max_temp, timesteps, basename)
replica=[]
replica_pipelines = []
for rid in range(replicas):
print rid
replica = Replica()
r_pipeline = replica.replica_pipeline(rid, cycle, replica_cores, md_executable, timesteps, replica_sandbox)
replica_pipelines.append(r_pipeline)
os.environ['RADICAL_PILOT_DBURL'] = "mongodb://smush:[email protected]:47361/db_repex_4"
res_dict ={
"resource" : 'local.localhost',
"walltime" : 30,
"cpus" : 4,
}
appman = AppManager(autoterminate=False, port=32769)
appman.resource_desc = res_dict
appman.workflow = set([system])
appman.run()
appman.workflow = set(replica_pipelines)
appman.run()
appman.resource_terminate()
|
radical-cybertools/radical.repex
|
old/examples/experimental_async.py
|
Python
|
mit
| 8,631
|
[
"Amber"
] |
4422d70c52fdfcb4971f9da4677eafd3d22775ac720ee22387aecda99b28b1c4
|
# 460. LFU Cache
# Design and implement a data structure for Least Frequently Used (LFU) cache. It should support the following operations: get and put.
#
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least frequently used item before inserting a new item. For the purpose of this problem, when there is a tie (i.e., two or more keys that have the same frequency), the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
#
class ListNode:
def __init__(self, key, val):
self.prev = None
self.next = None
self.key = key
self.val = val
def connect(self, nextNode):
self.next = nextNode
nextNode.prev = self
class LFUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.cap = capacity
self.head = ListNode(None, None)
self.tail = ListNode(None, None)
self.head.connect(self.tail)
# use to record the first ListNode of this count number
self.cnt = {0: self.tail}
# key: key,val:[listNode, visit count]
self.kv = {None:[self.tail, 0]}
def moveforward(self, key):
node, cnt = self.kv[key]
self.add('tmp', node.val, cnt + 1)
self.remove(key)
self.kv[key] = self.kv['tmp']
self.kv[key][0].key = key
del self.kv['tmp']
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.kv:
return -1
self.moveforward(key)
return self.kv[key][0].val
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.cap == 0:
return
if key in self.kv:
self.kv[key][0].val = value
self.moveforward(key)
return
if len(self.kv) > self.cap:
self.remove(self.tail.prev.key)
self.add(key, value, 0)
def remove(self, key):
node, cnt = self.kv[key]
if self.cnt[cnt] != node:
node.prev.connect(node.next)
elif self.kv[node.next.key][1] == cnt:
node.prev.connect(node.next)
self.cnt[cnt] = self.cnt[cnt].next
else:
node.prev.connect(node.next)
del self.cnt[cnt]
del self.kv[key]
def add(self, key, value, cnt):
if cnt in self.cnt:
loc = self.cnt[cnt]
else:
loc = self.cnt[cnt -1]
node = ListNode(key, value)
loc.prev.connect(node)
node.connect(loc)
self.cnt[cnt] = node
self.kv[key] = [node, cnt]
cache = LFUCache(2)
cache.put(1, 1)
cache.put(2, 2)
print(cache.get(1))
cache.put(3, 3)
print(cache.get(2))
print(cache.get(3))
cache.put(4, 4)
print(cache.get(1))
print(cache.get(3))
print(cache.get(4))
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
gengwg/leetcode
|
460_lfu_cache.py
|
Python
|
apache-2.0
| 3,669
|
[
"VisIt"
] |
8fa0e0f94afd8b5adaf3e7d9ceca217c06672ed772b8a89defc31e782ff273f8
|
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""Generates validator-generated.js.
This script reads validator.protoascii and reflects over its contents
to generate Javascript. This Javascript consists of Closure-style
classes and enums, as well as a createRules function which
instantiates the data structures specified in validator.protoascii -
the validator rules.
From a Javascript perspective, this approach looks elaborate - you may
wonder why we're not just writing Javascript directly, or why we're
not encoding our rules in JSON or YAML or even, gasp, XML? Besides the
additional type safety that we gain from our approach, it allows us to
share the rule specifications, error codes, etc. between multiple
validator implementations, including an implementation in C++. This
makes it much easier to keep otherwise likely divergent behavior in
sync.
"""
import copy
import hashlib
import json
import os
def UnderscoreToCamelCase(under_score):
"""Helper function which converts under_score names to camelCase.
In proto buffers, fields have under_scores. In Javascript, fields
have camelCase.
Args:
under_score: A name, segmented by under_scores.
Returns:
A name, segmented as camelCase.
"""
segments = under_score.split('_')
return '%s%s' % (segments[0], ''.join([s.title() for s in segments[1:]]))
def FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name):
"""Finds the message and enum descriptors in the file.
This method finds the message and enum descriptors from a file descriptor;
it will visit the top-level messages, and within those the enums.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
msg_desc_by_name: A map of message descriptors, keyed by full_name.
enum_desc_by_name: A map of enum descriptors, keyed by full name.
"""
for msg_type in list(validator_pb2.DESCRIPTOR.message_types_by_name.values()):
msg_desc_by_name[msg_type.full_name] = msg_type
for enum_type in msg_type.enum_types:
enum_desc_by_name[enum_type.full_name] = enum_type
class OutputFormatter(object):
"""Helper class for indenting lines."""
def __init__(self, lines):
"""Initializes the indenter with indent 0."""
self.lines = lines
self.indent_by_ = [0]
def PushIndent(self, indent):
"""Pushes a particular indent onto the stack."""
self.indent_by_.append(self.indent_by_[-1] + indent)
def PopIndent(self):
"""Pops a particular indent from the stack, reverting to the previous."""
self.indent_by_.pop()
def Line(self, line):
"""Adds a line to self.lines, applying the indent."""
self.lines.append('%s%s' % (' ' * self.indent_by_[-1], line))
class MessageKey(object):
"""A hashable key for a proto message capturing its type and content.
Messages of the same type (we use the short type name here, e.g. AttrSpec)
that serialize to the same byte string are considered the same.
"""
def __init__(self, proto_message):
self.type_name = proto_message.DESCRIPTOR.name
# While it's not strictly necessary to use a digest here, we do so
# to avoid carrying around the whole serialized string all the time.
self.digest = hashlib.sha1(proto_message.SerializeToString()).hexdigest()
def __hash__(self):
return hash((self.type_name, self.digest))
def __eq__(self, other):
return (self.type_name, self.digest) == (other.type_name, other.digest)
def __ne__(self, other):
return not self == other
class MessageRegistry(object):
"""Maps from messages to ids, used for de-duplication."""
def __init__(self):
# We maintain separate message ids for each type name, e.g. for AttrList,
# TagSpec, AttrSpec, etc., there are ids 0 - # unique message instances.
self.next_message_id_by_type_name_ = {}
# The key for this map is an instance of MessageKey.
self.message_id_by_message_key_ = {}
# A bit that keeps track whether a message has been emitted or
# not. Strictly speaking, this bit gets flipped when the message
# is about to be printed - it being true will prevent that
# something gets printed twice.
self.is_printed_by_message_key_ = {}
# References between tag specs in the .protoascii are expressed as
# tag spec names (see also TagSpecName), so we maintain this special
# case mapping to resolve them to message ids.
self.message_id_by_tag_spec_name_ = {}
# References from tag specs to attr specs in the .protoascii are expressed
# as attr list names, so we maintain this mapping to resolve them to
# message ids for the generated Javascript.
self.message_id_by_attr_list_name_ = {}
# Interned strings have negative IDs, starting from -1. This makes it
# easy to distinguish them from other message ids. In the interned_strings_
# array, they can be found by calculating their index -1 - <string_id>.
self.interned_strings_ = []
self.string_id_by_interned_string_ = {}
def InternString(self, a_string):
"""Interns strings to eliminate duplicates and to refer to them as numbers.
Args:
a_string: the string to be interned
Returns:
The string id, a negative number -1 to -MAXINT.
"""
string_id = self.string_id_by_interned_string_.get(a_string, 0)
if string_id != 0:
return string_id
self.interned_strings_.append(a_string)
string_id = -len(self.interned_strings_)
self.string_id_by_interned_string_[a_string] = string_id
return string_id
def InternedStrings(self):
"""The interned strings which will be emitted into validator-generated.js.
Returns:
A list of strings.
"""
return self.interned_strings_
def MessageIdForKey(self, message_key):
"""Yields the message id for a key, registering a new one if needed.
Args:
message_key: an instance of MessageKey
Returns:
The message id - a number.
"""
message_id = self.message_id_by_message_key_.get(message_key, -1)
if message_id != -1:
return message_id
message_id = self.next_message_id_by_type_name_.get(message_key.type_name,
0)
self.next_message_id_by_type_name_[message_key.type_name] = message_id + 1
self.message_id_by_message_key_[message_key] = message_id
return message_id
def MessageReferenceForKey(self, message_key):
"""A message reference is the variable name used in validator-generated.js.
Args:
message_key: an instance of MessageKey
Returns:
The message reference - a string.
"""
return '%s_%d' % (message_key.type_name.lower(),
self.MessageIdForKey(message_key))
def MarkPrinted(self, message_key):
"""Marks a message as printed.
Args:
message_key: an instance of MessageKey to identify the message
"""
self.is_printed_by_message_key_[message_key] = True
def IsPrinted(self, message_key):
"""Whether a message was printed.
Args:
message_key: an instance of MessageKey to identify the message.
Returns:
a boolean indicating whether the message was printed.
"""
return message_key in self.is_printed_by_message_key_
def RegisterTagSpec(self, tag_spec):
"""Registers a tag spec, including for lookups by TagSpecName.
Args:
tag_spec: an instance of validator_pb2.TagSpec
"""
message_id = self.MessageIdForKey(MessageKey(tag_spec))
self.message_id_by_tag_spec_name_[TagSpecName(tag_spec)] = message_id
def MessageIdForTagSpecName(self, tag_spec_name):
"""Looks up a message id for a tag spec by TagSpecName.
Args:
tag_spec_name: a string - see TagSpecName for computing it.
Returns:
The message id - a number.
"""
return self.message_id_by_tag_spec_name_[tag_spec_name]
def RegisterAttrList(self, attr_list):
"""Registers an attr list, including for lookups by name.
Args:
attr_list: an instance of validator_pb2.AttrList
"""
message_id = self.MessageIdForKey(MessageKey(attr_list))
self.message_id_by_attr_list_name_[attr_list.name] = message_id
def MessageIdForAttrListName(self, attr_list_name):
"""Looks up a message id for a tag spec by AttrListName.
Args:
attr_list_name: a string - the AttrList::name field.
Returns:
The message id - a number.
"""
return self.message_id_by_attr_list_name_[attr_list_name]
def ElementTypeFor(descriptor, field_desc):
"""Returns the element Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
Returns:
A string; either the type of a field descriptor or iff the
field descriptor is a repeated field, it's the element type.
"""
# If the field is a reference to a tagspec name (string) or if it's
# holding a message that we're deduplicating and replacing with a
# synthetic reference field, make it a number instead as we'll be
# replacing this with the message id.
if (field_desc.full_name in TAG_SPEC_NAME_REFERENCE_FIELD) or (
field_desc.full_name in SYNTHETIC_REFERENCE_FIELD) or (
field_desc.full_name in ATTR_LIST_NAME_REFERENCE_FIELD):
return 'number'
return {
descriptor.FieldDescriptor.TYPE_DOUBLE:
lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32:
lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL:
lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING:
lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name)
}[field_desc.type]()
def FieldTypeFor(descriptor, field_desc, nullable):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
nullable: Whether or not the value may be null.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = ElementTypeFor(descriptor, field_desc)
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if nullable:
return 'Array<!%s>' % element_type
return '!Array<!%s>' % element_type
if nullable:
return '?%s' % element_type
return '%s' % element_type
def ValueToString(descriptor, field_desc, value):
"""For a non-repeated field, renders the value as a Javascript literal.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the non-repeated field to be rendered.
Returns:
A Javascript literal for the provided non-repeated value.
"""
if field_desc.type == descriptor.FieldDescriptor.TYPE_STRING:
escaped = ('' + value).encode('unicode-escape')
return "'%s'" % escaped.decode().replace("'", "\\'")
if field_desc.type == descriptor.FieldDescriptor.TYPE_BOOL:
if value:
return 'true'
return 'false'
if field_desc.type == descriptor.FieldDescriptor.TYPE_ENUM:
enum_value_name = field_desc.enum_type.values_by_number[value].name
return '%s.%s' % (field_desc.enum_type.full_name, enum_value_name)
if value is None:
return 'null'
return str(value)
CONSTRUCTOR_ARG_FIELDS = [
'amp.validator.AmpLayout.supported_layouts',
'amp.validator.AtRuleSpec.name',
'amp.validator.AtRuleSpec.type',
'amp.validator.AttrSpec.name',
'amp.validator.AttrTriggerSpec.also_requires_attr',
'amp.validator.DenyListedCDataRegex.error_message',
'amp.validator.DenyListedCDataRegex.regex',
'amp.validator.ErrorFormat.code',
'amp.validator.ErrorFormat.format',
'amp.validator.PropertySpec.name',
'amp.validator.PropertySpecList.properties',
'amp.validator.TagSpec.tag_name',
'amp.validator.UrlSpec.allowed_protocol',
'amp.validator.ValidatorRules.tags',
]
# In the .protoascii, some fields reference other tags by tag spec name.
# See TagSpecName for how it's computed. This is a string, and this
# code generator replaces these fields with tag ids, which are numbers.
TAG_SPEC_NAME_REFERENCE_FIELD = [
'amp.validator.ExtensionSpec.deprecated_recommends_usage_of_tag',
'amp.validator.ReferencePoint.tag_spec_name',
'amp.validator.TagSpec.also_requires_tag_warning',
'amp.validator.TagSpec.extension_unused_unless_tag_present',
]
# In the .protoascii, some fields reference other tags by attr list name.
# This is a string, and this code generator replaces these fields with attr
# list ids, which are numbers.
ATTR_LIST_NAME_REFERENCE_FIELD = ['amp.validator.TagSpec.attr_lists']
# These fields contain messages in the .protoascii, but we replace
# them with message ids, which are numbers.
SYNTHETIC_REFERENCE_FIELD = [
'amp.validator.AttrList.attrs',
'amp.validator.AttrSpec.disallowed_value_regex',
'amp.validator.AttrSpec.mandatory_anyof',
'amp.validator.AttrSpec.mandatory_oneof',
'amp.validator.AttrSpec.value_regex',
'amp.validator.AttrSpec.value_regex_casei',
'amp.validator.AttrTriggerSpec.if_value_regex',
'amp.validator.CdataSpec.cdata_regex',
'amp.validator.CssDeclaration.value_regex_casei',
'amp.validator.TagSpec.attrs',
'amp.validator.TagSpec.mandatory_alternatives',
'amp.validator.TagSpec.requires',
'amp.validator.TagSpec.satisfies',
'amp.validator.TagSpec.excludes',
]
def PrintClassFor(descriptor, msg_desc, out):
"""Prints a Javascript class for the given proto message.
This method emits a Javascript class (Closure-style) for the given
proto message to sys.stdout.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg_desc: The descriptor for a particular message type.
out: a list of lines to output (without the newline characters) wrapped as
an OutputFormatter instance, to which this function will append.
"""
constructor_arg_fields = []
constructor_arg_field_names = {}
for field in msg_desc.fields:
if field.full_name in CONSTRUCTOR_ARG_FIELDS:
constructor_arg_fields.append(field)
constructor_arg_field_names[field.name] = 1
out.Line('/**')
for field in constructor_arg_fields:
out.Line(' * @param {%s} %s' %
(LocalModuleName(FieldTypeFor(descriptor, field, nullable=False)),
UnderscoreToCamelCase(field.name)))
out.Line(' * @constructor')
out.Line(' * @struct')
out.Line(' */')
arguments = ','.join(
[UnderscoreToCamelCase(f.name) for f in constructor_arg_fields])
out.Line('const %s = function(%s) {' %
(LocalModuleName(msg_desc.full_name), arguments))
out.PushIndent(2)
for field in msg_desc.fields:
# We generate ValidatorRules.directAttrLists, ValidatorRules.globalAttrs,
# and validator.ampLayoutAttrs instead.
if field.full_name == 'amp.validator.ValidatorRules.attr_lists':
continue
assigned_value = 'null'
if field.name in constructor_arg_field_names:
# field.name is also the parameter name.
assigned_value = UnderscoreToCamelCase(field.name)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# ValidationResult instances may be mutated by validator.js,
# so we can't share the empty arrays. But for all other
# instances, we do share.
if msg_desc.full_name == 'amp.validator.ValidationResult':
assigned_value = '[]'
else:
assigned_value = 'EMPTY_%s_ARRAY' % (
ElementTypeFor(descriptor, field).replace('.', '_'))
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
assigned_value = str(field.default_value).lower()
elif field.type == descriptor.FieldDescriptor.TYPE_INT32:
assigned_value = str(field.default_value)
# TODO(johannes): Increase coverage for default values, e.g. enums.
type_name = FieldTypeFor(
descriptor, field, nullable=assigned_value == 'null')
out.Line('/**@export @type {%s} */' % LocalModuleName(type_name))
out.Line(
'this.%s = %s;' % (UnderscoreToCamelCase(field.name), assigned_value))
if msg_desc.full_name == 'amp.validator.CdataSpec':
out.Line('/** @type {?number} */')
out.Line('this.combinedDenyListedCdataRegex = null;')
if msg_desc.full_name == 'amp.validator.ValidatorRules':
out.Line('/** @type {!Array<!string>} */')
out.Line('this.internedStrings = [];')
out.Line('/** @type {!Array<!AttrSpec>} */')
out.Line('this.attrs = [];')
out.Line('/** @type {!Array<!Array<number>>} */')
out.Line('this.directAttrLists = [];')
out.Line('/** @type {!Array<number>} */')
out.Line('this.globalAttrs = [];')
out.Line('/** @type {!Array<number>} */')
out.Line('this.ampLayoutAttrs = [];')
out.PopIndent()
out.Line('};')
out.Line('exports.%s = %s;' % (LocalModuleName(
msg_desc.full_name), LocalModuleName(msg_desc.full_name)))
def PrintEnumFor(enum_desc, out):
"""Prints a Javascript enum for the given enum descriptor.
Args:
enum_desc: The descriptor for a particular enum type.
out: a list of lines to output (without the newline characters) wrapped as
an OutputFormatter instance, to which this function will append.
"""
out.Line('/**')
out.Line(' * @enum {string}')
out.Line(' */')
out.Line('%s = {' % LocalModuleName(enum_desc.full_name))
out.PushIndent(2)
names = []
for v in enum_desc.values:
names.append('%s' % v.name)
out.Line("%s: '%s'," % (v.name, v.name))
out.PopIndent()
out.Line('};')
out.Line('exports.%s = %s;' % (LocalModuleName(
enum_desc.full_name), LocalModuleName(enum_desc.full_name)))
out.Line('/** @type {!Array<string>} */')
out.Line('%s_NamesByIndex = ["%s"];' %
(LocalModuleName(enum_desc.full_name), '","'.join(names)))
out.Line('/** @type {!Array<!%s>} */' % LocalModuleName(enum_desc.full_name))
out.Line(
'%s_ValuesByIndex = [%s];' %
(LocalModuleName(enum_desc.full_name), ','.join([
'%s.%s' % (LocalModuleName(enum_desc.full_name), n) for n in names
])))
def TagSpecName(tag_spec):
"""Generates a name for a given TagSpec. This should be unique.
Same logic as getTagSpecName(tagSpec) in javascript. We choose the spec_name
if one is set, otherwise use the lower cased version of the tagname
Args:
tag_spec: A TagSpec protocol message instance.
Returns:
This TagSpec's name (string).
"""
if tag_spec.HasField('spec_name'):
return tag_spec.spec_name
return tag_spec.tag_name.lower()
def MaybePrintMessageValue(descriptor, field_val, registry, out):
"""Print field_val if necessary, and return its message reference.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_val: The value of a field, a proto message.
registry: an instance of MessageRegistry, used for mapping from
messages to message keys.
out: a list of lines to output (without the newline characters) wrapped as
an OutputFormatter instance, to which this function will append.
Returns:
This object's message reference, e.g. typically the variable name in
validator-generated.js.
"""
message_key = MessageKey(field_val)
if not registry.IsPrinted(message_key):
PrintObject(descriptor, field_val, registry, out)
return registry.MessageReferenceForKey(message_key)
def IsTrivialAttrSpec(attr):
"""Determines whether a given attr only has its name field set.
Args:
attr: an AttrSpec instance.
Returns:
true iff the only field that is set is the name field.
"""
return (attr.DESCRIPTOR.full_name == 'amp.validator.AttrSpec' and
attr.HasField('name') and len(attr.ListFields()) == 1)
def AssignedValueFor(descriptor, field_desc, field_val, registry, out):
"""Helper function for PrintObject: computes / assigns a value for a field.
Note that if the field is a complex field (a message), this function
may print the message and then reference it via a variable name.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The descriptor for a particular field.
field_val: The value for a particular field.
registry: an instance of MessageRegistry, used for mapping from
messages to message keys.
out: a list of lines to output (without the newline characters) wrapped as
an OutputFormatter instance, to which this function will append.
Returns:
The rendered field value to assign.
"""
# First we establish how an individual value for this field is going
# to be rendered, that is, converted into a string.
render_value = lambda: None
if field_desc.full_name in TAG_SPEC_NAME_REFERENCE_FIELD:
render_value = lambda v: str(registry.MessageIdForTagSpecName(v))
elif field_desc.full_name in ATTR_LIST_NAME_REFERENCE_FIELD:
render_value = lambda v: str(registry.MessageIdForAttrListName(v))
elif field_desc.full_name in SYNTHETIC_REFERENCE_FIELD:
def InternOrReference(value):
if field_desc.type == descriptor.FieldDescriptor.TYPE_STRING:
return str(registry.InternString(value))
if IsTrivialAttrSpec(value):
return str(registry.InternString(value.name))
return str(registry.MessageIdForKey(MessageKey(value)))
render_value = InternOrReference
elif field_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
render_value = (
lambda v: MaybePrintMessageValue(descriptor, v, registry, out))
else:
render_value = (
lambda v: ImportModuleName(ValueToString(descriptor, field_desc, v))) # pylint: disable=cell-var-from-loop
# Then we iterate over the field if it's repeated, or else just
# call the render function once.
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
elements = [render_value(v) for v in field_val]
return '[%s]' % ','.join(elements)
return render_value(field_val)
def PrintObject(descriptor, msg, registry, out):
"""Prints an object, by recursively constructing it.
This routine emits Javascript which will construct an object modeling
the provided message (in practice the ValidatorRules message).
It references the classes and enums enitted by PrintClassFor and PrintEnumFor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg: A protocol message instance.
registry: an instance of MessageRegistry, used for mapping from
messages to message keys.
out: a list of lines to output (without the newline characters) wrapped as
an OutputFormatter instance, to which this function will append.
Returns:
This object's object id, that is, the consumed variable for creating
objects.
"""
this_message_key = MessageKey(msg)
registry.MarkPrinted(this_message_key)
field_and_assigned_values = []
for (field_desc, field_val) in msg.ListFields():
# We generate ValidatorRules.directAttrLists, ValidatorRules.globalAttrs,
# and validator.ampLayoutAttrs instead.
if field_desc.full_name == 'amp.validator.ValidatorRules.attr_lists':
continue
field_and_assigned_values.append(
(field_desc,
AssignedValueFor(descriptor, field_desc, field_val, registry, out)))
# Constructor with the appropriate arguments.
constructor_arg_values = [
value for (field, value) in field_and_assigned_values
if field.full_name in CONSTRUCTOR_ARG_FIELDS
]
this_message_reference = registry.MessageReferenceForKey(this_message_key)
# Construct object field values.
fields = []
fields_string = ''
for (field, value) in field_and_assigned_values:
if field.full_name in CONSTRUCTOR_ARG_FIELDS:
continue
fields.append('%s : %s' % (UnderscoreToCamelCase(field.name), value))
# Construct the object with object literal field assignment. Rather than
# assignment via dot notation, this is more concise and helps reduce the size
# of the binary. We also use Object.assign as to not blow away fields that are
# set constructor instantiation.
if fields:
fields_string = '{' + ','.join(fields) + '}'
out.Line(
'let %s = /** @type {!%s} */ (oa(new %s(%s), %s));' %
(this_message_reference, ImportModuleName(msg.DESCRIPTOR.full_name),
ImportModuleName(msg.DESCRIPTOR.full_name),
','.join(constructor_arg_values), fields_string))
else:
out.Line('let %s = new %s(%s);' %
(this_message_reference, ImportModuleName(
msg.DESCRIPTOR.full_name), ','.join(constructor_arg_values)))
if (msg.DESCRIPTOR.full_name == 'amp.validator.CdataSpec' and
msg.disallowed_cdata_regex):
combined_disallowed_cdata_regex = '(%s)' % '|'.join(
[r.regex for r in msg.disallowed_cdata_regex])
out.Line('%s.%s = %d;' %
(this_message_reference, 'combinedDenyListedCdataRegex',
registry.InternString(combined_disallowed_cdata_regex)))
# When importing the protocol buffer javascript module, we do so as
# protoGenerated.foo();
def ImportModuleName(module_name):
return module_name.replace('amp.validator', 'protoGenerated')
# When naming the protocol buffer javascript exports locally, we do with no
# namespace.
def LocalModuleName(module_name):
return module_name.replace('amp.validator.', '')
def GenerateValidatorGeneratedJs(specfile, validator_pb2, generate_proto_only,
generate_spec_only, text_format, html_format,
descriptor, out):
"""Main method for the code generator.
This method reads the specfile and emits Javascript to sys.stdout.
Args:
specfile: Path to validator.protoascii, the specfile to generate
Javascript from.
validator_pb2: The proto2 Python module generated from validator.proto.
generate_proto_only: If true, then only generate proto definition.
generate_spec_only: If true, then only generate spec.
text_format: The text_format module from the protobuf package, e.g.
google.protobuf.text_format.
html_format: Either a TagSpec.HtmlFormat enum value indicating which
HTML format the generated validator code should support,
or None indicating that all formats should be supported.
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
# Only one of these flags should be true.
assert generate_proto_only is not generate_spec_only
if generate_spec_only:
assert specfile is not None
# First, find the descriptors and enums and generate Javascript
# classes and enums.
msg_desc_by_name = {}
enum_desc_by_name = {}
FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name)
rules_obj = '%s.RULES' % validator_pb2.DESCRIPTOR.package
all_names = [rules_obj] + list(msg_desc_by_name.keys()) + list(enum_desc_by_name.keys())
all_names.sort()
out = OutputFormatter(out)
out.Line('//')
out.Line('// Generated by %s - do not edit.' % os.path.basename(__file__))
out.Line('//')
out.Line('')
if generate_proto_only:
out.Line("goog.module('amp.validator.protogenerated');")
if generate_spec_only:
out.Line("goog.module('amp.validator.createRules');")
out.Line(
"const protoGenerated = goog.require('amp.validator.protogenerated');")
out.Line('')
if generate_proto_only:
# We share the empty arrays between all specification object instances; this
# works because these arrays are never mutated. To make the Closure compiler
# happy, we use one empty array per element type.
# PS: It may also help execution performance in V8 to keep the element types
# separate but we did not verify that.
all_type_names = ['string', 'number', 'boolean'] + [
n for n in all_names if n in msg_desc_by_name
] + [n for n in all_names if n in enum_desc_by_name]
for name in all_type_names:
out.Line('/** @type {!Array<!%s>} */' % LocalModuleName(name))
out.Line('var EMPTY_%s_ARRAY = [];' % name.replace('.', '_'))
out.Line('')
for name in all_names:
if name in msg_desc_by_name:
PrintClassFor(descriptor, msg_desc_by_name[name], out)
elif name in enum_desc_by_name:
PrintEnumFor(enum_desc_by_name[name], out)
if generate_spec_only:
# Read the rules file, validator.protoascii by parsing it as a text
# message of type ValidatorRules.
rules = validator_pb2.ValidatorRules()
text_format.Merge(open(specfile).read(), rules)
# If html_format is set, only keep the tags which are relevant to it.
if html_format is not None:
filtered_rules = [
t for t in rules.tags
if not t.html_format or html_format in t.html_format
]
del rules.tags[:]
rules.tags.extend(filtered_rules)
# Add module/nomodule tagspecs for AMP ExtensionSpec tagspecs.
additional_tagspecs = []
for t in rules.tags:
if t.extension_spec and t.extension_spec.name:
if validator_pb2.HtmlFormat.Code.Value('AMP') in t.html_format:
tagspec = copy.deepcopy(t)
# Reset html_format to AMP
del tagspec.html_format[:]
tagspec.html_format.extend(
[validator_pb2.HtmlFormat.Code.Value('AMP')])
# Reset enabled_by to transformed
del tagspec.enabled_by[:]
tagspec.enabled_by.extend(['transformed'])
# Generate needed attr specs
crossorigin_attrspec = validator_pb2.AttrSpec()
crossorigin_attrspec.name = 'crossorigin'
crossorigin_attrspec.value.extend(['anonymous'])
crossorigin_attrspec.mandatory = True
nomodule_attrspec = validator_pb2.AttrSpec()
nomodule_attrspec.name = 'nomodule'
nomodule_attrspec.value.extend([''])
nomodule_attrspec.mandatory = True
type_attrspec = validator_pb2.AttrSpec()
type_attrspec.name = 'type'
type_attrspec.value.extend(['module'])
type_attrspec.mandatory = True
type_attrspec.dispatch_key = type_attrspec.NAME_VALUE_DISPATCH
# Create module and nomodule extension tagspecs with spec_names
module_tagspec = copy.deepcopy(tagspec)
base_spec_name = module_tagspec.extension_spec.name
if module_tagspec.extension_spec.version_name:
base_spec_name = (
module_tagspec.extension_spec.name + ' ' +
module_tagspec.extension_spec.version_name)
module_tagspec.spec_name = base_spec_name + (' module extension '
'script')
nomodule_tagspec = copy.deepcopy(tagspec)
nomodule_tagspec.spec_name = base_spec_name + (' nomodule extension'
' script')
# Module extension specifics
# Add requires/satisfies pair for module/nomodule
module_tagspec.requires.extend([nomodule_tagspec.spec_name])
module_tagspec.satisfies.extend([module_tagspec.spec_name])
# Add attr specs
module_tagspec.attrs.extend([crossorigin_attrspec, type_attrspec])
# Nomodule extension specifics
# Add requires/satisfies pair for module/nomodule
nomodule_tagspec.requires.extend([module_tagspec.spec_name])
nomodule_tagspec.satisfies.extend([nomodule_tagspec.spec_name])
# Add attr specs
nomodule_tagspec.attrs.extend([nomodule_attrspec])
# Add tag specs
additional_tagspecs.extend([module_tagspec, nomodule_tagspec])
rules.tags.extend(additional_tagspecs)
registry = MessageRegistry()
# Register the tagspecs so they have ids 0 - rules.tags.length. This means
# that rules.tags[tagspec_id] works.
for t in rules.tags:
registry.RegisterTagSpec(t)
# Register the attrlists so they have ids 0 - rules.attr_lists.length.
# This means that rules.attr_lists[attr_list_id] works.
for a in rules.attr_lists:
registry.RegisterAttrList(a)
out.Line('/**')
out.Line(' * @return {!%s}' % ImportModuleName(rules.DESCRIPTOR.full_name))
out.Line(' */')
out.Line('const createRules = function() {')
# Shorthand object.assign to reduce the binary size of the validator rules
# generated.
out.Line('const oa = Object.assign;')
out.PushIndent(2)
PrintObject(descriptor, rules, registry, out)
# We use this below to reference the variable holding the rules instance.
rules_reference = registry.MessageReferenceForKey(MessageKey(rules))
# Create a mapping from attr spec ids to AttrSpec instances, deduping the
# AttrSpecs. Then sort by these ids, so now we get a dense array starting
# with the attr that has attr spec id 0 - number of attr specs.
attrs_by_id = {}
for attr_container in list(rules.attr_lists) + list(rules.tags):
for attr in attr_container.attrs:
if not IsTrivialAttrSpec(attr):
attrs_by_id[registry.MessageIdForKey(MessageKey(attr))] = attr
sorted_attrs = [attr for (_, attr) in sorted(attrs_by_id.items())]
# Emit the attr specs, then assign a list of references to them to
# Rules.attrs.
for attr in sorted_attrs:
PrintObject(descriptor, attr, registry, out)
out.Line('%s.attrs = [%s];' % (rules_reference, ','.join([
registry.MessageReferenceForKey(MessageKey(a)) for a in sorted_attrs
])))
# We emit the attr lists as arrays of arrays of numbers (which are
# the attr ids), and treat the globalAttrs and the ampLayoutAttrs
# seperately for fast access.
direct_attr_lists = []
global_attrs = []
amp_layout_attrs = []
unique_attr_list_names = set()
for attr_list in rules.attr_lists:
assert attr_list.name not in unique_attr_list_names, attr_list.name
unique_attr_list_names.add(attr_list.name)
assert attr_list.attrs
attr_id_list = []
for attr in attr_list.attrs:
if IsTrivialAttrSpec(attr):
attr_id_list.append(registry.InternString(attr.name))
else:
attr_id_list.append(registry.MessageIdForKey(MessageKey(attr)))
if attr_list.name == '$GLOBAL_ATTRS':
global_attrs = attr_id_list
direct_attr_lists.append([])
elif attr_list.name == '$AMP_LAYOUT_ATTRS':
amp_layout_attrs = attr_id_list
direct_attr_lists.append([])
else:
direct_attr_lists.append(attr_id_list)
out.Line('%s.directAttrLists = %s;' % (rules_reference,
json.dumps(direct_attr_lists)))
out.Line('%s.globalAttrs = %s;' % (rules_reference,
json.dumps(global_attrs)))
out.Line('%s.ampLayoutAttrs = %s;' % (rules_reference,
json.dumps(amp_layout_attrs)))
# We emit these after the last call to registry.InternString.
out.Line('%s.internedStrings = %s;' %
(rules_reference, json.dumps(registry.InternedStrings())))
out.Line('return %s;' % rules_reference)
out.PopIndent()
out.Line('}')
out.Line('exports.createRules = createRules;')
out.Line('')
def GenerateValidatorGeneratedJson(specfile, validator_pb2, text_format,
json_format, out):
"""Generates a JSON file with definitions from validator.protoascii.
This method reads the specfile and emits JSON to out.
Args:
specfile: Path to validator.protoascii, the specfile to generate
Javascript from.
validator_pb2: The proto2 Python module generated from validator.proto.
text_format: The text_format module from the protobuf package, e.g.
google.protobuf.text_format.
json_format: The json_format module from the protobuf package, e.g.
google.protobuf.json_format.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
rules = validator_pb2.ValidatorRules()
text_format.Merge(open(specfile).read(), rules)
out.append(json_format.MessageToJson(rules))
|
ampproject/amppackager
|
vendor/github.com/ampproject/amphtml/validator/validator_gen_js.py
|
Python
|
apache-2.0
| 37,258
|
[
"VisIt"
] |
cf925bc9772b04de218b0cc06307556fdfa4c113c0aa93d6a4e738e72f371855
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, absolute_import
import os
import sys
from distutils.spawn import find_executable as _find_executable
import numpy as np
from mdtraj.utils import import_
from mdtraj.utils import enter_temp_directory
##############################################################################
# Globals
##############################################################################
# Possible names for the external commands -- these are expected
# to be found in the PATH.
SHIFTX2 = ['shiftx2.py']
SPARTA_PLUS = ['sparta+', 'SPARTA+', 'SPARTA+.linux']
PPM = ['ppm_linux_64.exe']
__all__ = ['chemical_shifts_shiftx2', 'chemical_shifts_ppm', 'chemical_shifts_spartaplus', "reindex_dataframe_by_atoms"]
def find_executable(names):
for possible in names:
result = _find_executable(possible)
if result is not None:
return result
return None
##############################################################################
# Code
##############################################################################
def compute_chemical_shifts(trj, model="shiftx2", **kwargs):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
model : str, optional, default="shiftx2"
The program to use for calculating chemical shifts. Must be one
of shiftx2, ppm, or sparta+
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have the appropriate chemical soft programs installed
and in your executable path.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference, see docstrings for chemical_shifts_*
for the various possible models.
"""
if model == "shiftx2":
return chemical_shifts_shiftx2(trj, **kwargs)
elif model == "ppm":
return chemical_shifts_ppm(trj, **kwargs)
elif model == "sparta+":
return chemical_shifts_spartaplus(trj, **kwargs)
else:
raise(ValueError("model must be one of shiftx2, ppm, or sparta+"))
def chemical_shifts_shiftx2(trj, pH=5.0, temperature=298.00):
"""Predict chemical shifts of a trajectory using ShiftX2.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
pH : float, optional, default=5.0
pH value which gets passed to the ShiftX2 predictor.
temperature : float, optional, default=298.00
Temperature which gets passed to the ShiftX2 predictor.
Returns
-------
results : pandas DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ShiftX2 available on your path; see (http://www.shiftx2.ca/).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Beomsoo Han, Yifeng Liu, Simon Ginzinger, and David Wishart.
"SHIFTX2: significantly improved protein chemical shift
prediction." J. Biomol. NMR, 50, 1 43-57 (2011)
"""
pd = import_('pandas')
binary = find_executable(SHIFTX2)
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_shiftx2` requires the external program SHIFTX2, available at http://www.shiftx2.ca/' % ', '.join(SHIFTX2))
results = []
with enter_temp_directory():
for i in range(trj.n_frames):
fn = './trj%d.pdb' % i
trj[i].save(fn)
cmd = "%s -b %s -p %.1f -t %.2f" % (binary, fn, pH, temperature)
return_flag = os.system(cmd)
if return_flag != 0:
raise(IOError("Could not successfully execute command '%s', check your ShiftX2 installation or your input trajectory." % cmd))
for i in range(trj.n_frames):
try:
d = pd.read_csv("./trj%d.pdb.cs" % i)
except IOError:
print(os.listdir('.'), file=sys.stderr)
raise
d.rename(columns={"NUM": "resSeq", "RES": "resName", "ATOMNAME": "name"}, inplace=True)
d["frame"] = i
results.append(d)
results = pd.concat(results)
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
return results
def chemical_shifts_ppm(trj):
"""Predict chemical shifts of a trajectory using ppm.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have ppm available on your path; see
(http://spin.ccic.ohio-state.edu/index.php/download/index).
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Li, DW, and Bruschweiler, R. "PPM: a side-chain and backbone chemical
shift predictor for the assessment of protein conformational ensembles."
J Biomol NMR. 2012 Nov;54(3):257-65.
"""
pd = import_('pandas')
binary = find_executable(PPM)
first_resSeq = trj.top.residue(0).resSeq
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_ppm` requires the external program PPM, available at http://spin.ccic.ohio-state.edu/index.php/download/index' % ', '.join(PPM))
with enter_temp_directory():
trj.save("./trj.pdb")
cmd = "%s -pdb trj.pdb -mode detail" % binary
return_flag = os.system(cmd)
if return_flag != 0:
raise(IOError("Could not successfully execute command '%s', check your PPM installation or your input trajectory." % cmd))
d = pd.read_table("./bb_details.dat", index_col=False, header=None, sep="\s*").drop([3], axis=1)
d = d.rename(columns={0: "resSeq", 1: "resName", 2: "name"})
d["resSeq"] += first_resSeq - 1 # Fix bug in PPM that reindexes to 1
d = d.drop("resName", axis=1)
d = d.set_index(["resSeq", "name"])
d.columns = np.arange(trj.n_frames)
d.columns.name = "frame"
return d
def _get_lines_to_skip(filename):
"""Determine the number of comment lines in a SPARTA+ output file."""
format_string = """FORMAT %4d %4s %4s %9.3f %9.3f %9.3f %9.3f %9.3f %9.3f"""
handle = open(filename)
for i, line in enumerate(handle):
if line.find(format_string) != -1:
return i + 2
raise(Exception("No format string found in SPARTA+ file!"))
def chemical_shifts_spartaplus(trj, rename_HN=True):
"""Predict chemical shifts of a trajectory using SPARTA+.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
rename_HN : bool, optional, default=True
SPARTA+ calls the amide proton "HN" instead of the standard "H".
When True, this option renames the output as "H" to match the PDB
and BMRB nomenclature.
Returns
-------
results : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Notes
-----
You must have SPARTA+ available on your path; see
(http://spin.niddk.nih.gov/bax/software/SPARTA+/). Also, the SPARTAP_DIR
environment variable must be set so that SPARTA+ knows where to find
its database files.
Chemical shift prediction is for PROTEIN atoms; trajectory objects
with ligands, solvent, ions, or other non-protein components may give
UNKNOWN RESULTS.
Please cite the appropriate reference below.
References
----------
.. [1] Shen, Y., and Bax, Ad. "SPARTA+: a modest improvement in empirical
NMR chemical shift prediction by means of an artificial neural network."
J. Biomol. NMR, 48, 13-22 (2010)
"""
pd = import_('pandas')
binary = find_executable(SPARTA_PLUS)
if binary is None:
raise OSError('External command not found. Looked for %s in PATH. `chemical_shifts_spartaplus` requires the external program SPARTA+, available at http://spin.niddk.nih.gov/bax/software/SPARTA+/' % ', '.join(SPARTA_PLUS))
names = ["resSeq", "resName", "name", "SS_SHIFT", "SHIFT", "RC_SHIFT", "HM_SHIFT", "EF_SHIFT", "SIGMA"]
with enter_temp_directory():
for i in range(trj.n_frames):
trj[i].save("./trj%d.pdb" % i)
cmd = "%s -in %s" % (binary, ' '.join("trj%d.pdb" % i for i in range(trj.n_frames)))
return_flag = os.system(cmd)
if return_flag != 0:
raise(IOError("Could not successfully execute command '%s', check your SPARTA+ installation or your input trajectory." % cmd))
lines_to_skip = _get_lines_to_skip("trj0_pred.tab")
results = []
for i in range(trj.n_frames):
d = pd.read_table("./trj%d_pred.tab" % i, names=names, header=None, sep="\s*", skiprows=lines_to_skip)
d["frame"] = i
results.append(d)
results = pd.concat(results)
if rename_HN:
results.name[results.name == "HN"] = "H"
results = results.pivot_table(rows=["resSeq", "name"], cols="frame", values="SHIFT")
return results
def reindex_dataframe_by_atoms(trj, frame):
"""Reindex chemical shift output to use atom number (serial) indexing.
Parameters
----------
trj : Trajectory
Trajectory to predict shifts for.
frame : pandas.DataFrame
Dataframe containing results, with index consisting of
(resSeq, atom_name) pairs and columns for each frame in trj.
Returns
-------
new_frame : pandas.DataFrame
Dataframe containing results, with index consisting of atom
indices (AKA the 'serial' entry in a PDB). Columns correspond to
each frame in trj.
Notes
-----
Be aware that this function may DROP predictions if the atom naming
is different between the input trajectory and the output of various
chemical shift prediction tools.
"""
top, bonds = trj.top.to_dataframe()
top["serial"] = top.index
top = top.set_index(["resSeq", "name"])
new_frame = frame.copy()
new_frame["serial"] = top.ix[new_frame.index].serial
new_frame = new_frame.dropna().reset_index().set_index("serial").drop(["resSeq", "name"], axis=1)
return new_frame
|
kyleabeauchamp/mdtraj
|
mdtraj/nmr/shift_wrappers.py
|
Python
|
lgpl-2.1
| 12,264
|
[
"MDTraj"
] |
f7412dc25e2c7d426b69ff74009864e6ee6bfbb458d57550fc7ebf8a732416cc
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
from ._utils import _cd
from ..unitquantity import UnitConstant
N_A = L = Avogadro_constant = UnitConstant(
'Avogadro_constant',
_cd('Avogadro constant'),
symbol='N_A'
)
n_0 = Loschmidt_constant = UnitConstant(
'Loschmidt_constant',
_cd('Loschmidt constant (273.15 K, 101.325 kPa)'),
symbol='n_0',
u_symbol='n₀'
)
R = molar_gas_constant = UnitConstant(
'molar_gas_constant',
_cd('molar gas constant'),
symbol='R'
)
k = Boltzmann_constant = UnitConstant(
'Boltzmann_constant',
_cd('Boltzmann constant'),
symbol='k'
)
Boltzmann_constant_in_eV_per_K = UnitConstant(
'Boltzmann_constant_in_eV_per_K',
_cd('Boltzmann constant in eV/K')
)
Boltzmann_constant_in_Hz_per_K = UnitConstant(
'Boltzmann_constant_in_Hz_per_K',
_cd('Boltzmann constant in Hz/K')
)
Boltzmann_constant_in_inverse_meters_per_kelvin = UnitConstant(
'Boltzmann_constant_in_inverse_meters_per_kelvin',
_cd('Boltzmann constant in inverse meters per kelvin')
)
M_u = molar_mass_constant = UnitConstant(
'molar_mass_constant',
_cd('molar mass constant'),
symbol='M_u',
u_symbol='Mᵤ'
)
molar_volume_of_ideal_gas_ST_100kPa = UnitConstant(
'molar_volume_of_ideal_gas_ST_100kPa',
_cd('molar volume of ideal gas (273.15 K, 100 kPa)')
)
molar_volume_of_ideal_gas_STP = UnitConstant(
'molar_volume_of_ideal_gas_STP',
_cd('molar volume of ideal gas (273.15 K, 101.325 kPa)')
)
molar_volume_of_silicon = UnitConstant(
'molar_volume_of_silicon',
_cd('molar volume of silicon')
)
del UnitConstant, _cd
|
AdaptiveApplications/carnegie
|
tarc_bus_locator_client/quantities-0.10.1/quantities/constants/statisticalmechanics.py
|
Python
|
mit
| 1,648
|
[
"Avogadro"
] |
f2df8b6e65a3aab739ec39295f5815b614b0b4c2f68bd5226a573aa81fbb03b8
|
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from os.path import join
from . import mp_cent_dens
from . import add_version_plot
from . import prep_plots
from . prep_plots import grid_x, grid_y, figsize_x, figsize_y
def main(npd, cld_i, pd, clp):
"""
Make A2 block plots.
"""
fig = plt.figure(figsize=(figsize_x, figsize_y))
gs = gridspec.GridSpec(grid_y, grid_x)
add_version_plot.main()
# Obtain plotting parameters and data.
x_min, x_max, y_min, y_max = prep_plots.frame_max_min(
cld_i['x'], cld_i['y'])
asp_ratio = prep_plots.aspect_ratio(x_min, x_max, y_min, y_max)
coord, x_name, y_name = "deg", "ra", "dec"
st_sizes_arr = prep_plots.star_size(cld_i['mags'][0])
_, y_ax = prep_plots.ax_names(pd['colors'][0], pd['filters'][0], 'mag')
# Structure plots.
arglist = [
# pl_full_frame: x,y finding chart of full frame.
[gs, fig, pd['project'], clp['x_offset'], clp['y_offset'], x_name,
y_name, coord, x_min, x_max, y_min, y_max, asp_ratio, clp['kde_cent'],
cld_i['x'], cld_i['y'], st_sizes_arr, clp['clust_rad']],
# pl_densmap: 2D Gaussian convolved histogram.
[gs, fig, asp_ratio, x_name, y_name, coord, clp['bw_list'],
clp['kde_cent'], clp['frame_kde_cent'], clp['fr_dens'],
clp['clust_rad']],
# pl_knn_dens
[gs, fig, pd['plot_style'], asp_ratio, x_min, x_max, y_min, y_max,
x_name, y_name, coord, clp['NN_dd'], clp['xy_filtered'],
clp['fr_dens'], clp['NN_dist'], pd['project'], clp['x_offset'],
clp['y_offset'], clp['kde_cent'], clp['clust_rad']],
# pl_field_dens
[gs, pd['plot_style'], coord, pd['fdens_method'], clp['xy_cent_dist'],
clp['fr_dens'], clp['fdens_min_d'], clp['fdens_lst'],
clp['fdens_std_lst'], clp['field_dens_d'], clp['field_dens'],
clp['field_dens_std']],
# pl_centdist_vs_mag
[gs, fig, pd['plot_style'], y_ax, coord, cld_i['x'], cld_i['y'],
cld_i['mags'][0], clp['kde_cent'], clp['clust_rad'],
clp['integ_dists'], clp['integ_mags']]
]
for n, args in enumerate(arglist):
mp_cent_dens.plot(n, *args)
fig.tight_layout()
fname = join(npd['output_subdir'], npd['clust_name'] + '_A2' + npd['ext'])
plt.savefig(fname)
# Close to release memory.
plt.clf()
plt.close("all")
|
asteca/ASteCA
|
packages/out/make_A2_plot.py
|
Python
|
gpl-3.0
| 2,412
|
[
"Gaussian"
] |
145c12c57aceee5b1e6386ab816e7e2f4a2ef0cad37812c2be6a6e6d60bd54d4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#This module calculates statistics and saves it to a file
import numpy as np
import stat_functions as stat
from scipy import stats as scstats
import functions as fu
import loglikelihood as logl
from tkinter import font as tkfont
import tkinter as tk
STANDARD_LENGTH=8
class statistics:
def __init__(self,results,correl_vars=None,descriptives_vars=None,simple_statistics=False,name=None):
"""This class calculates, stores and prints statistics and statistics"""
self.G=results.direction.G
self.H=results.direction.H
self.ll=results.ll
self.panel=results.panel
self.ll.standardize()
self.Rsq, self.Rsqadj, self.LL_ratio,self.LL_ratio_OLS=stat.goodness_of_fit(self.ll,True)
self.LL_restricted=logl.LL(self.panel.args.args_restricted, self.panel).LL
self.LL_OLS=logl.LL(self.panel.args.args_OLS, self.panel).LL
self.name=name
self.no_ac_prob,rhos,RSqAC=stat.breusch_godfrey_test(self.panel,self.ll,10)
self.norm_prob=stat.JB_normality_test(self.ll.e_norm,self.panel)
self.multicollinearity_check(self.G)
self.data_correlations,self.data_statistics=self.correl_and_statistics(correl_vars,descriptives_vars)
self.adf_test=stat.adf_test(self.panel,self.ll,10)
#self.save_stats()
def correl_and_statistics(self,correl_vars,descriptives_vars):
panel=self.panel
X_names=[]
X=[]
correl_X,correl_names=get_variables(panel, correl_vars)
descr_X,descr_names=get_variables(panel, descriptives_vars)
c=stat.correl(correl_X)
c=np.concatenate((correl_names,c),0)
n=descr_X.shape[1]
vstat=np.concatenate((np.mean(descr_X,0).reshape((n,1)),
np.std(descr_X,0).reshape((n,1)),
np.min(descr_X,0).reshape((n,1)),
np.max(descr_X,0).reshape((n,1))),1)
vstat=np.concatenate((descr_names.T,vstat),1)
vstat=np.concatenate(([['','Mean','SD','min','max']],vstat),0)
correl_names=np.append([['']],correl_names,1).T
c=np.concatenate((correl_names,c),1)
return c,vstat
def multicollinearity_check(self,G):
"Returns a variance decompostition matrix with headings"
panel=self.panel
vNames=['Max(var_proportion)','CI:']+panel.args.names_v
k=len(vNames)-1
matr=stat.var_decomposition(X=G,concat=True)
matr=np.round(matr,3)
maxp=np.max(matr[:,1:],1).reshape((matr.shape[0],1))
matr=np.concatenate((maxp,matr),1)
matr=np.concatenate(([vNames],matr))
self.MultiColl=matr
def save_stats(self):
"""Saves the various statistics assigned to self"""
ll=self.ll
panel=self.panel
N,T,k=panel.X.shape
output=dict()
name_list=[]
add_output(output,name_list,'Information',[
['Description:',panel.input.descr],
['LL:',ll.LL],
['Number of IDs:',N],
['Maximum number of dates:',T],
['A) Total number of observations:',panel.NT_before_loss],
['B) Observations lost to GARCH/ARIMA',panel.tot_lost_obs],
[' Total after loss of observations (A-B):',panel.NT],
['C) Number of Random/Fixed Effects coefficients:',N],
['D) Number of Random/Fixed Effects coefficients in the variance process:',N],
['E) Number of coefficients:',panel.args.n_args],
['DF (A-B-C-D-E):',panel.df],
['RSq:',self.Rsq],
['RSq Adj:',self.Rsqadj],
['LL-ratio:',self.LL_ratio],
['no ac_prob:',self.no_ac_prob],
['norm prob:',self.norm_prob],
['ADF (dicky fuller):',self.adf_test, "1% and 5 % lower limit of confidence intervals, respectively"],
['Dependent:',panel.input.Y_names]
])
add_output(output,name_list,'Regression',self.reg_output)
add_output(output,name_list,'Multicollinearity',self.MultiColl)
add_output(output,name_list,'Descriptive statistics',self.data_statistics)
add_output(output,name_list,'Correlation Matrix',self.data_correlations)
add_output(output,name_list,'Number of dates in each ID',panel.T_arr.reshape((N,1)))
output_table=[['']]
output_positions=['']
for i in name_list:
if i!='Statistics':
output_table.extend([[''],['']])
pos=len(output_table)+1
output_table.extend([[i+':']])
output_table.extend(output[i])
output_positions.append('%s~%s~%s~%s' %(i,pos,len(output[i]),len(output[i][0])))
output_table[0]=output_positions
if self.name is None:
fname=panel.input.descr.replace('\n','').replace('\r', '')
else:
fname=self.name
if len(fname)>65:
fname=fname[:30]+'...'+fname[-30:]
fu.savevar(output_table,fname+'.csv')
self.output_dict=output
def add_variable(name,panel,names,variables):
if name in panel.dataframe.keys():
d=dict(panel.dataframe[[name]])
if type(d)==np.ndarray:
names.append(name)
variables.append(d)
def get_variables(panel,input_str):
v=fu.split_input(input_str)
names=[]
variables=[]
if not v is None:
for i in v:
add_variable(i, panel, names, variables)
if v is None or len(names)==0:
for i in panel.dataframe.keys():
add_variable(i, panel, names, variables)
n=len(names)
X=np.concatenate(variables,1)
names=np.array(names).reshape((1,n))
return X,names
def add_output(output_dict,name_list,name,table):
if type(table)==np.ndarray:
table=np.concatenate(([[''] for i in range(len(table))],table),1)
output_dict[name]=table
name_list.append(name)
|
espensirnes/paneltime
|
build/lib.win-amd64-3.7/paneltime/stat_object.py
|
Python
|
gpl-3.0
| 5,294
|
[
"ADF"
] |
db93ded9862e796782301579c88d67a1651423ecffe400bdbe9b1f3d5fdbb30e
|
#
# A number of functions which can be used to generate different types of noise,
# which can then be added to model output to simulate experimental data.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
def independent(sigma, shape):
r"""
Generates independent Gaussian noise iid :math:`\mathcal{N}(0,\sigma)`.
Returns an array of shape ``shape`` containing the generated noise.
Parameters
----------
sigma
The standard deviation of the noise. Must be zero or greater.
shape
A tuple (or sequence) defining the shape of the generated noise array.
Example
-------
::
values = model.simulate(parameters, times)
noisy_values = values + noise.independent(5, values.shape)
"""
import numpy as np
# Don't test sigma/shape: handled by numpy for higher-dimensions etc.!
return np.random.normal(0, sigma, shape)
def ar1(rho, sigma, n):
r"""
Generates first-order autoregressive (AR1) noise that can be added to a
vector of simulated data.
The generated noise follows the distribution
.. math::
e(t) = \rho e(t - 1) + v(t),
where :math:`v(t) \stackrel{\text{iid}}{\sim }\mathcal{N}(0, \sigma
\sqrt{1 - \rho ^2})`.
Returns an array of length ``n`` containing the generated noise.
Parameters
----------
rho
Determines the magnitude of the noise :math:`\rho` (see above). Must
be less than 1.
sigma
The marginal standard deviation :math:`\sigma` of ``e(t)`` (see above).
Must be greater than zero.
n
The length of the signal. (Only single time-series are supported.)
Example
-------
::
values = model.simulate(parameters, times)
noisy_values = values + noise.ar1(0.9, 5, len(values))
"""
import numpy as np
if abs(rho) >= 1:
raise ValueError(
'Magnitude of rho must be less than 1 (otherwise the process'
' is non-stationary).')
if sigma <= 0:
raise ValueError('Standard deviation must be positive.')
n = int(n)
if n < 1:
raise ValueError('Number of values to generate must be at least one.')
# Generate noise
s = sigma * np.sqrt(1 - rho**2)
v = np.random.normal(0, s, n)
v[0] = np.random.rand()
for t in range(1, n):
v[t] += rho * v[t - 1]
return v
def arma11(rho, theta, sigma, n):
r"""
Generates an ARMA(1,1) error process of the form:
.. math::
e(t) = (1 - \rho) + \rho * e(t - 1) + v(t) + \theta * v(t-1),
where :math:`v(t) \stackrel{\text{iid}}{\sim }\mathcal{N}(0, \sigma ')`,
and
.. math::
\sigma ' = \sigma \sqrt{\frac{1 - \rho ^ 2}{1 + 2 \theta \rho +
\theta ^ 2}}.
"""
import numpy as np
if abs(rho) >= 1:
raise ValueError(
'Magnitude of rho must be less than 1 (otherwise the process'
' is non-stationary).')
if abs(theta) >= 1.0:
raise ValueError('Absolute value of theta must be less than 1 ' +
'so that the process is invertible.')
if sigma <= 0:
raise ValueError('Standard deviation must be positive.')
n = int(n)
if n < 1:
raise ValueError('Number of values to generate must be at least one.')
# Generate noise
s = sigma * np.sqrt((1 - rho**2) / (1 + 2 * theta * rho + theta**2))
v = np.random.normal(0, s, n)
e = np.zeros(n)
e[0] = v[0]
for i in range(1, n):
e[i] = rho * e[i - 1] + v[i] + theta * v[i - 1]
return e
def ar1_unity(rho, sigma, n):
"""
Generates noise following an autoregressive order 1 process of mean 1, that
a vector of simulated data can be multiplied with.
Returns an array of length ``n`` containing the generated noise.
Parameters
----------
rho
Determines the magnitude of the noise (see :meth:`ar1`). Must be less
than or equal to 1.
sigma
The marginal standard deviation of ``e(t)`` (see :meth:`ar`).
Must be greater than 0.
n : int
The length of the signal. (Only single time-series are supported.)
Example
-------
::
values = model.simulate(parameters, times)
noisy_values = values * noise.ar1_unity(0.5, 0.8, len(values))
"""
import numpy as np
if abs(rho) >= 1:
raise ValueError(
'Magnitude of rho must be less than 1 (otherwise the process is'
' non-stationary).')
if sigma <= 0:
raise ValueError('Standard deviation must be positive.')
n = int(n)
if n < 1:
raise ValueError('Number of values to generate must be at least one.')
# Generate noise
v = np.random.normal(0, sigma * np.sqrt(1 - rho**2), n + 1)
v[0] = 1
for t in range(1, n + 1):
v[t] += (1 - rho) + rho * v[t - 1]
return v[1:]
def arma11_unity(rho, theta, sigma, n):
"""
Generates an ARMA(1,1) error process of the form:
``e(t) = (1 - rho) + rho * e(t - 1) + v(t) + theta * v[t-1]``,
where ``v(t) ~ iid N(0, sigma')``,
and
``sigma' = sigma * sqrt((1 - rho^2) / (1 + 2 * theta * rho + theta^2))``.
Returns an array of length ``n`` containing the generated noise.
Parameters
----------
rho
Determines the long-run persistence of the noise (see :meth:`ar1`).
Must be less than 1.
theta
Contributes to first order autocorrelation of noise. Must be less
than 1.
sigma
The marginal standard deviation of ``e(t)`` (see :meth:`ar`).
Must be greater than 0.
n : int
The length of the signal. (Only single time-series are supported.)
Example
-------
::
values = model.simulate(parameters, times)
noisy_values = values * noise.ar1_unity(0.5, 0.8, len(values))
"""
import numpy as np
if abs(rho) >= 1:
raise ValueError(
'Magnitude of rho must be less than 1 (otherwise the process is'
' explosive).')
if abs(theta) >= 1.0:
raise ValueError('Absolute value of theta must be less than 1 ' +
'so that the process is invertible.')
if sigma <= 0:
raise ValueError('Standard deviation must be positive.')
n = int(n)
if n < 1:
raise ValueError('Number of values to generate must be at least one.')
# Generate noise
s = sigma * np.sqrt((1 - rho**2) / (1 + 2 * theta * rho + theta**2))
v = np.random.normal(0, s, n + 1)
e = np.zeros(n)
e[0] = v[1]
for i in range(1, n):
e[i] = (1 - rho) + rho * e[i - 1] + v[i] + theta * v[i - 1]
return e
def multiplicative_gaussian(eta, sigma, f):
r"""
Generates multiplicative Gaussian noise for a single output.
With multiplicative noise, the measurement error scales with the magnitude
of the output. Given a model taking the form,
.. math::
X(t) = f(t; \theta) + \epsilon(t)
multiplicative Gaussian noise models the noise term as:
.. math::
\epsilon(t) = f(t; \theta)^\eta v(t)
where v(t) is iid Gaussian:
.. math::
v(t) \stackrel{\text{ iid }}{\sim} \mathcal{N}(0, \sigma)
The output magnitudes ``f`` are required as an input to this function. The
noise terms are returned in an array of the same shape as ``f``.
Parameters
----------
``eta``
The exponential power controlling the rate at which the noise scales
with the output. The argument must be either a float (for single-output
or multi-output noise) or an array_like of floats (for multi-output
noise only, with one value for each output).
``sigma``
The baseline standard deviation of the noise (must be greater than
zero). The argument must be either a float (for single-output
or multi-output noise) or an array_like of floats (for multi-output
noise only, with one value for each output).
``f``
A NumPy array giving the time-series for the output over time. For
multiple outputs, the array should have shape ``(n_outputs, n_times)``.
"""
import numpy as np
f = np.array(f)
# Check the dimensions of the inputs
if f.ndim > 2:
raise ValueError('f must have be of shape (n_outputs, n_times).')
if f.ndim == 2:
n_outputs = f.shape[0]
else:
n_outputs = 1
if not np.isscalar(eta):
eta = np.array(eta)
if eta.ndim > 1 or (eta.shape[0] != 1 and eta.shape[0] != n_outputs):
raise ValueError('eta must be a scalar or of shape (n_outputs,).')
# Reshape eta so that it broadcasts to f correctly
eta = eta[:, np.newaxis]
if not np.isscalar(sigma):
sigma = np.array(sigma)
if sigma.ndim > 1 or (sigma.shape[0] != 1 and
sigma.shape[0] != n_outputs):
raise ValueError('sigma must be a scalar or of shape '
'(n_outputs,).')
# Reshape sigma so that it broadcasts to f correctly
sigma = sigma[:, np.newaxis]
# Check the values of the inputs
if np.isscalar(sigma):
if sigma <= 0:
raise ValueError('Standard deviation must be greater than zero.')
else:
if np.any(sigma <= 0):
raise ValueError('Standard deviation must be greater than zero.')
e = np.random.normal(0, sigma, f.shape)
return f ** eta * e
|
martinjrobins/hobo
|
pints/noise.py
|
Python
|
bsd-3-clause
| 9,614
|
[
"Gaussian"
] |
c2d87aa104a0897771d1b1e5508b99ec52e550397e55f3741f6624ff8181b63d
|
import fileinput
import math
import collections
import time
import numpy as np
from pylab import *
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
#file_path = '/media/ABB4-4F3A/DATALOG.TXT'
file_path = 'DATALOG.TXT'
def split_in_blocks(txt_file, pattern):
'''
Find the last appears of the text that indicate a new flight and divide in the number of blocks generated by the rocket
Return: A list that contains all the different blocks of data and a list containing the header.
'''
num_times_find_pattern = []
for num_line, line in enumerate(fileinput.input(txt_file)):
if pattern in line:
num_times_find_pattern.append(num_line)
if num_line == 0:
header = list(line.strip().split(","))
#print header
blocks_of_data = []
with open(txt_file) as f:
lines = f.readlines()
for num_header_line in num_times_find_pattern:
if num_header_line == 0:
num_header_line_prev = num_header_line
else:
block_lines = lines[num_header_line_prev + 1 : num_header_line - 1]
blocks_of_data.append(block_lines)
num_header_line_prev = num_header_line
block_lines = lines[num_header_line_prev + 1 : num_line + 1]
blocks_of_data.append(block_lines)
return blocks_of_data, header
def manage_data_from_blocks(blocks, header):
'''
Divide al the text in blocks tagged with their type of data (accelaration, temperature, ...) continued by a number of block
Return: A dict that contains all the different types of data diferentiated and numbered.
'''
# TODO: Automatize this function to accept more headers!!
blocks_dict = collections.OrderedDict()
for block_number, block in enumerate(blocks):
for item in header:
blocks_dict['%s%s' % (item,block_number)] = []
for line in block:
line_list = line.strip().split(",")
blocks_dict['f%s' % block_number].append(int(line_list[0]))
blocks_dict['ax%s' % block_number].append(float(line_list[1]))
blocks_dict['ay%s' % block_number].append(float(line_list[2]))
blocks_dict['az%s' % block_number].append(float(line_list[3]))
blocks_dict['gx%s' % block_number].append(float(line_list[4]))
blocks_dict['gy%s' % block_number].append(float(line_list[5]))
blocks_dict['gz%s' % block_number].append(float(line_list[6]))
blocks_dict['mx%s' % block_number].append(float(line_list[7]))
blocks_dict['my%s' % block_number].append(float(line_list[8]))
blocks_dict['mz%s' % block_number].append(float(line_list[9]))
blocks_dict['t%s' % block_number].append(float(line_list[10]))
blocks_dict['p%s' % block_number].append(int(line_list[11]))
blocks_dict['h%s' % block_number].append(float(line_list[12]))
return blocks_dict
def process_data(blocks_dict, header):
block_list_header_based = []
for num, item in enumerate(header):
block_list_header_based.append([])
for block in blocks_dict:
if block.startswith(header[num]):
block_list_header_based[num].append(block)
# DEBUG! print "%s: %s" % (block, blocks_dict[block])
print block_list_header_based
#fingerprint_basic_info = basic_process_only_for_fingerprints(block_list_header_based[0])
temp_basic_info = basic_process_data(block_list_header_based[12])
#height_basic_info = basic_process_data(block_list_header_based[12])
print_basic_histograms(block_list_header_based[12])
print_basic_scatters(block_list_header_based[12])
print_basic_evolution_2_axis(block_list_header_based[0], block_list_header_based[12])
def basic_process_only_for_fingerprints(fingerprints):
fingerprint_basic_info = collections.OrderedDict()
fingerprint_list = []
for num, fingerprint_block in enumerate(fingerprints):
millis_interval_list = []
for position, millis in enumerate(blocks_dict[fingerprint_block]):
if position != 0:
millis_interval = millis - millis_prev
millis_interval_list.append(millis_interval)
millis_prev = millis
blocks_dict["fp%s" % (num)] = millis_interval_list
fingerprint_list.append("fp%s" % (num))
fingerprint_basic_info = basic_process_data(fingerprint_list)
return fingerprint_basic_info
def basic_process_data(data_list):
data_basic_info = collections.OrderedDict()
for data_block in data_list:
data_basic_info[data_block] = {}
data_avg_mean = np.mean(blocks_dict[data_block]) # Average
data_avg_weighted = np.average(blocks_dict[data_block]) # Average weighted
data_amax = np.amax(blocks_dict[data_block]) # MAX
data_amin = np.amin(blocks_dict[data_block]) # MIN
data_med = np.median(blocks_dict[data_block]) # Median
data_std = np.std(blocks_dict[data_block]) # Standard deviation
data_ptp = np.ptp(blocks_dict[data_block]) # Distance MAX to MIN
data_var = np.var(blocks_dict[data_block]) # Variance
data_basic_info[data_block] = {"AVM" : "%.3f" % data_avg_mean, "AVW" : "%.3f" % data_avg_weighted, "MAX" : "%.3f" % data_amax,
"MIN" : "%.3f" % data_amin, "MED" : "%.3f" % data_med, "STD" : "%.3f" % data_std,
"PTP" : "%.3f" % data_ptp, "VAR" : "%.3f" % data_var}
# PLOT NORMAL PDF FROM THA DATA
#sigma = sqrt(data_var)
#x = np.linspace(data_amin,data_amax)
#plt.plot(x,mlab.normpdf(x,data_avg_mean,sigma))
plt.show()
for key in data_basic_info:
print data_basic_info[key]
return data_basic_info
def print_basic_histograms(data_list):
#plt.ion()
plt.figure(1)
for num, data in enumerate(data_list):
nrows = int(math.ceil(float(len(data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.hist(blocks_dict[data], bins=20)
#data_new = np.histogramdd(blocks_dict[data])
#plt.hist(data_new, bins=20)
plt.xlabel("Value", fontsize=8)
plt.ylabel("Frequency", fontsize=8)
plt.suptitle("Gaussian Histogram", fontsize=12)
plt.show()
#plt.show(block=True)
def print_basic_scatters(data_list):
#plt.ion()
plt.figure(1)
for num, data in enumerate(data_list):
nrows = int(math.ceil(float(len(data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.scatter(np.random.randn(1000), np.random.randn(1000))
plt.suptitle("Gaussian Histogram", fontsize=12)
plt.show()
#plt.show(block=True)
def print_basic_evolution_2_axis(x_axis_data_list, y_axis_data_list):
plt.figure(1)
for num in range(len(x_axis_data_list)):
x = blocks_dict[x_axis_data_list[num]]
y = blocks_dict[y_axis_data_list[num]]
#subplot(nrows, ncols, plot_number)
nrows = int(math.ceil(float(len(x_axis_data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.plot(x, y, linewidth=1.0, color="green")
xlabel('time (milliseconds)', fontsize = 8)
#ylabel('temperature (C)', fontsize = 8)
#title('', fontsize=10)
grid(True)
plt.xticks(blocks_dict[x_axis_data_list[num]][::len(blocks_dict[x_axis_data_list[num]])/10], rotation=30, fontsize=8)
#plt.annotate('Despegue', xy=(2200, 34.82), xytext=(2300, 34.88),
# bbox=dict(boxstyle="round", fc="0.8"),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#plt.annotate('Paracaidas', xy=(7200, 34.82), xytext=(6300, 34.88),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#axvline(x=2200)
#axhspan(34.80, 34.82, facecolor='0.5', alpha=0.5, color="red")
plt.ylim(min(blocks_dict[y_axis_data_list[num]]) - 0.02, max(blocks_dict[y_axis_data_list[num]]) + 0.02)
plt.yticks(fontsize=8)
#plt.suptitle('temperatures in data', fontsize=12)
plt.show()
#start = time.time()
blocks, header = split_in_blocks(file_path, "m")
blocks_dict = manage_data_from_blocks(blocks, header)
process_data(blocks_dict, header)
#stop = time.time()
#total_time = stop -start
#print total_time
|
gmartinvela/OpenRocket
|
generate_statistics_from_SD.py
|
Python
|
mit
| 7,682
|
[
"Gaussian"
] |
cc6c40a69c1b27edce41269b3e96addafa07b803055dcc8addfa3ca70b466128
|
"""
BigchainDB: A Scalable Blockchain Database
For full docs visit https://bigchaindb.readthedocs.org
"""
from setuptools import setup, find_packages
tests_require = [
'pytest',
'coverage',
'pep8',
'pyflakes',
'pylint',
'pytest',
'pytest-cov',
'pytest-xdist',
'pytest-flask',
]
dev_require = [
'ipdb',
'ipython',
]
docs_require = [
'recommonmark>=0.4.0',
'Sphinx>=1.3.5',
'sphinxcontrib-napoleon>=0.4.4',
'sphinx-rtd-theme>=0.1.9',
]
setup(
name='BigchainDB-Examples',
version='0.1.0',
description='Example usages for BigchainDB',
long_description=__doc__,
url='https://github.com/BigchainDB/bigchaindb-examples/',
author='BigchainDB Contributors',
author_email='[email protected]',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(exclude=['tests*']),
entry_points={
'console_scripts': [
'bigchaindb-examples=commands.bigchaindb_examples:main'
]
},
install_requires=[
"rethinkdb==2.3.0",
"BigchainDB==0.5.0",
"decorator==4.0.9",
"flask==0.10.1",
"flask-cors==2.1.2",
"tornado"
],
setup_requires=['pytest-runner'],
tests_require=tests_require,
extras_require={
'test': tests_require,
'dev': dev_require + tests_require + docs_require,
'docs': docs_require,
},
)
|
bigchaindb/bigchaindb-examples
|
setup.py
|
Python
|
apache-2.0
| 2,007
|
[
"VisIt"
] |
f6729395bb52362ec14fec492655f15b2f37a11f2c923f9689d45b181d71bbc5
|
"""
This object is a small tool to allow user to quickly
determine the variance in q from the
instrumental parameters.
"""
import sys
from math import pi, sqrt
import math
import logging
import numpy as np
from .instrument import Sample
from .instrument import Detector
from .instrument import TOF as Neutron
from .instrument import Aperture
logger = logging.getLogger(__name__)
#Plank's constant in cgs unit
_PLANK_H = 6.62606896E-27
#Gravitational acc. in cgs unit
_GRAVITY = 981.0
class ResolutionCalculator(object):
"""
compute resolution in 2D
"""
def __init__(self):
# wavelength
self.wave = Neutron()
# sample
self.sample = Sample()
# aperture
self.aperture = Aperture()
# detector
self.detector = Detector()
# 2d image of the resolution
self.image = []
self.image_lam = []
# resolutions
# lamda in r-direction
self.sigma_lamd = 0
# x-dir (no lamda)
self.sigma_1 = 0
#y-dir (no lamda)
self.sigma_2 = 0
# 1D total
self.sigma_1d = 0
self.gravity_phi = None
# q min and max
self.qx_min = -0.3
self.qx_max = 0.3
self.qy_min = -0.3
self.qy_max = 0.3
# q min and max of the detector
self.detector_qx_min = -0.3
self.detector_qx_max = 0.3
self.detector_qy_min = -0.3
self.detector_qy_max = 0.3
# possible max qrange
self.qxmin_limit = 0
self.qxmax_limit = 0
self.qymin_limit = 0
self.qymax_limit = 0
# plots
self.plot = None
# instrumental params defaults
self.mass = 0
self.intensity = 0
self.wavelength = 0
self.wavelength_spread = 0
self.source_aperture_size = []
self.source2sample_distance = []
self.sample2sample_distance = []
self.sample_aperture_size = []
self.sample2detector_distance = []
self.detector_pix_size = []
self.detector_size = []
self.get_all_instrument_params()
# max q range for all lambdas
self.qxrange = []
self.qyrange = []
def compute_and_plot(self, qx_value, qy_value, qx_min, qx_max,
qy_min, qy_max, coord='cartesian'):
"""
Compute the resolution
: qx_value: x component of q
: qy_value: y component of q
"""
# make sure to update all the variables need.
# except lambda, dlambda, and intensity
self.get_all_instrument_params()
# wavelength etc.
lamda_list, dlamb_list = self.get_wave_list()
intens_list = []
sig1_list = []
sig2_list = []
sigr_list = []
sigma1d_list = []
num_lamda = len(lamda_list)
for num in range(num_lamda):
lam = lamda_list[num]
# wavelength spread
dlam = dlamb_list[num]
intens = self.setup_tof(lam, dlam)
intens_list.append(intens)
# cehck if tof
if num_lamda > 1:
tof = True
else:
tof = False
# compute 2d resolution
_, _, sigma_1, sigma_2, sigma_r, sigma1d = \
self.compute(lam, dlam, qx_value, qy_value, coord, tof)
# make image
image = self.get_image(qx_value, qy_value, sigma_1, sigma_2,
sigma_r, qx_min, qx_max, qy_min, qy_max,
coord, False)
if qx_min > self.qx_min:
qx_min = self.qx_min
if qx_max < self.qx_max:
qx_max = self.qx_max
if qy_min > self.qy_min:
qy_min = self.qy_min
if qy_max < self.qy_max:
qy_max = self.qy_max
# set max qranges
self.qxrange = [qx_min, qx_max]
self.qyrange = [qy_min, qy_max]
sig1_list.append(sigma_1)
sig2_list.append(sigma_2)
sigr_list.append(sigma_r)
sigma1d_list.append(sigma1d)
# redraw image in global 2d q-space.
self.image_lam = []
total_intensity = 0
sigma_1 = 0
sigma_r = 0
sigma_2 = 0
sigma1d = 0
for ind in range(num_lamda):
lam = lamda_list[ind]
dlam = dlamb_list[ind]
intens = self.setup_tof(lam, dlam)
out = self.get_image(qx_value, qy_value, sig1_list[ind],
sig2_list[ind], sigr_list[ind],
qx_min, qx_max, qy_min, qy_max, coord)
# this is the case of q being outside the detector
#if numpy.all(out==0.0):
# continue
image = out
# set variance as sigmas
sigma_1 += sig1_list[ind] * sig1_list[ind] * self.intensity
sigma_r += sigr_list[ind] * sigr_list[ind] * self.intensity
sigma_2 += sig2_list[ind] * sig2_list[ind] * self.intensity
sigma1d += sigma1d_list[ind] * sigma1d_list[ind] * self.intensity
total_intensity += self.intensity
if total_intensity != 0:
# average variance
image_out = image / total_intensity
sigma_1 = sigma_1 / total_intensity
sigma_r = sigma_r / total_intensity
sigma_2 = sigma_2 / total_intensity
sigma1d = sigma1d / total_intensity
# set sigmas
self.sigma_1 = sqrt(sigma_1)
self.sigma_lamd = sqrt(sigma_r)
self.sigma_2 = sqrt(sigma_2)
self.sigma_1d = sqrt(sigma1d)
# rescale
max_im_val = 1
if max_im_val > 0:
image_out /= max_im_val
else:
image_out = image * 0.0
# Don't calculate sigmas nor set self.sigmas!
sigma_1 = 0
sigma_r = 0
sigma_2 = 0
sigma1d = 0
if len(self.image) > 0:
self.image += image_out
else:
self.image = image_out
# plot image
return self.plot_image(self.image)
def setup_tof(self, wavelength, wavelength_spread):
"""
Setup all parameters in instrument
: param ind: index of lambda, etc
"""
# set wave.wavelength
self.set_wavelength(wavelength)
self.set_wavelength_spread(wavelength_spread)
self.intensity = self.wave.get_intensity()
if wavelength == 0:
msg = "Can't compute the resolution: the wavelength is zero..."
raise RuntimeError(msg)
return self.intensity
def compute(self, wavelength, wavelength_spread, qx_value, qy_value,
coord='cartesian', tof=False):
"""
Compute the Q resoltuion in || and + direction of 2D
: qx_value: x component of q
: qy_value: y component of q
"""
coord = 'cartesian'
lamb = wavelength
lamb_spread = wavelength_spread
# the shape of wavelength distribution
if tof:
# rectangular
tof_factor = 2
else:
# triangular
tof_factor = 1
# Find polar values
qr_value, phi = self._get_polar_value(qx_value, qy_value)
# vacuum wave transfer
knot = 2*pi/lamb
# scattering angle theta; always true for plane detector
# aligned vertically to the ko direction
if qr_value > knot:
theta = pi/2
else:
theta = math.asin(qr_value/knot)
# source aperture size
rone = self.source_aperture_size
# sample aperture size
rtwo = self.sample_aperture_size
# detector pixel size
rthree = self.detector_pix_size
# source to sample(aperture) distance
l_ssa = self.source2sample_distance[0]
# sample(aperture) to detector distance
l_sad = self.sample2detector_distance[0]
# sample (aperture) to sample distance
l_sas = self.sample2sample_distance[0]
# source to sample distance
l_one = l_ssa + l_sas
# sample to detector distance
l_two = l_sad - l_sas
# Sample offset correction for l_one and Lp on variance calculation
l1_cor = (l_ssa * l_two) / (l_sas + l_two)
lp_cor = (l_ssa * l_two) / (l_one + l_two)
# the radial distance to the pixel from the center of the detector
radius = math.tan(theta) * l_two
#Lp = l_one*l_two/(l_one+l_two)
# default polar coordinate
comp1 = 'radial'
comp2 = 'phi'
# in the case of the cartesian coordinate
if coord == 'cartesian':
comp1 = 'x'
comp2 = 'y'
# sigma in the radial/x direction
# for source aperture
sigma_1 = self.get_variance(rone, l1_cor, phi, comp1)
# for sample apperture
sigma_1 += self.get_variance(rtwo, lp_cor, phi, comp1)
# for detector pix
sigma_1 += self.get_variance(rthree, l_two, phi, comp1)
# for gravity term for 1d
sigma_1grav1d = self.get_variance_gravity(l_ssa, l_sad, lamb,
lamb_spread, phi, comp1, 'on') / tof_factor
# for wavelength spread
# reserve for 1d calculation
A_value = self._cal_A_value(lamb, l_ssa, l_sad)
sigma_wave_1, sigma_wave_1_1d = self.get_variance_wave(A_value,
radius, l_two, lamb_spread,
phi, 'radial', 'on')
sigma_wave_1 /= tof_factor
sigma_wave_1_1d /= tof_factor
# for 1d
variance_1d_1 = (sigma_1 + sigma_1grav1d) / 2 + sigma_wave_1_1d
# normalize
variance_1d_1 = knot * knot * variance_1d_1 / 12
# for 2d
#sigma_1 += sigma_wave_1
# normalize
sigma_1 = knot * sqrt(sigma_1 / 12)
sigma_r = knot * sqrt(sigma_wave_1 / (tof_factor *12))
# sigma in the phi/y direction
# for source apperture
sigma_2 = self.get_variance(rone, l1_cor, phi, comp2)
# for sample apperture
sigma_2 += self.get_variance(rtwo, lp_cor, phi, comp2)
# for detector pix
sigma_2 += self.get_variance(rthree, l_two, phi, comp2)
# for gravity term for 1d
sigma_2grav1d = self.get_variance_gravity(l_ssa, l_sad, lamb,
lamb_spread, phi, comp2, 'on') / tof_factor
# for wavelength spread
# reserve for 1d calculation
sigma_wave_2, sigma_wave_2_1d = self.get_variance_wave(A_value,
radius, l_two, lamb_spread,
phi, 'phi', 'on')
sigma_wave_2 /= tof_factor
sigma_wave_2_1d /= tof_factor
# for 1d
variance_1d_2 = (sigma_2 + sigma_2grav1d) / 2 + sigma_wave_2_1d
# normalize
variance_1d_2 = knot * knot * variance_1d_2 / 12
# for 2d
#sigma_2 = knot*sqrt(sigma_2/12)
#sigma_2 += sigma_wave_2
# normalize
sigma_2 = knot * sqrt(sigma_2 / 12)
sigma1d = sqrt(variance_1d_1 + variance_1d_2)
# set sigmas
self.sigma_1 = sigma_1
self.sigma_lamd = sigma_r
self.sigma_2 = sigma_2
self.sigma_1d = sigma1d
return qr_value, phi, sigma_1, sigma_2, sigma_r, sigma1d
def _within_detector_range(self, qx_value, qy_value):
"""
check if qvalues are within detector range
"""
# detector range
detector_qx_min = self.detector_qx_min
detector_qx_max = self.detector_qx_max
detector_qy_min = self.detector_qy_min
detector_qy_max = self.detector_qy_max
if self.qxmin_limit > detector_qx_min:
self.qxmin_limit = detector_qx_min
if self.qxmax_limit < detector_qx_max:
self.qxmax_limit = detector_qx_max
if self.qymin_limit > detector_qy_min:
self.qymin_limit = detector_qy_min
if self.qymax_limit < detector_qy_max:
self.qymax_limit = detector_qy_max
if qx_value < detector_qx_min or qx_value > detector_qx_max:
return False
if qy_value < detector_qy_min or qy_value > detector_qy_max:
return False
return True
def get_image(self, qx_value, qy_value, sigma_1, sigma_2, sigma_r,
qx_min, qx_max, qy_min, qy_max,
coord='cartesian', full_cal=True):
"""
Get the resolution in polar coordinate ready to plot
: qx_value: qx_value value
: qy_value: qy_value value
: sigma_1: variance in r direction
: sigma_2: variance in phi direction
: coord: coordinate system of image, 'polar' or 'cartesian'
"""
# Get qx_max and qy_max...
self._get_detector_qxqy_pixels()
qr_value, phi = self._get_polar_value(qx_value, qy_value)
# Check whether the q value is within the detector range
if qx_min < self.qx_min:
self.qx_min = qx_min
#raise ValueError(msg)
if qx_max > self.qx_max:
self.qx_max = qx_max
#raise ValueError(msg)
if qy_min < self.qy_min:
self.qy_min = qy_min
#raise ValueError(msg)
if qy_max > self.qy_max:
self.qy_max = qy_max
#raise ValueError(msg)
if not full_cal:
return None
# Make an empty graph in the detector scale
dx_size = (self.qx_max - self.qx_min) / (1000 - 1)
dy_size = (self.qy_max - self.qy_min) / (1000 - 1)
x_val = np.arange(self.qx_min, self.qx_max, dx_size)
y_val = np.arange(self.qy_max, self.qy_min, -dy_size)
q_1, q_2 = np.meshgrid(x_val, y_val)
#q_phi = numpy.arctan(q_1,q_2)
# check whether polar or cartesian
if coord == 'polar':
# Find polar values
qr_value, phi = self._get_polar_value(qx_value, qy_value)
q_1, q_2 = self._rotate_z(q_1, q_2, phi)
qc_1 = qr_value
qc_2 = 0.0
# Calculate the 2D Gaussian distribution image
image = self._gaussian2d_polar(q_1, q_2, qc_1, qc_2,
sigma_1, sigma_2, sigma_r)
else:
# catesian coordinate
# qx_center
qc_1 = qx_value
# qy_center
qc_2 = qy_value
# Calculate the 2D Gaussian distribution image
image = self._gaussian2d(q_1, q_2, qc_1, qc_2,
sigma_1, sigma_2, sigma_r)
# out side of detector
if not self._within_detector_range(qx_value, qy_value):
image *= 0.0
self.intensity = 0.0
#return self.image
# Add it if there are more than one inputs.
if len(self.image_lam) > 0:
self.image_lam += image * self.intensity
else:
self.image_lam = image * self.intensity
return self.image_lam
def plot_image(self, image):
"""
Plot image using pyplot
: image: 2d resolution image
: return plt: pylab object
"""
import matplotlib.pyplot as plt
self.plot = plt
plt.xlabel('$\\rm{Q}_{x} [A^{-1}]$')
plt.ylabel('$\\rm{Q}_{y} [A^{-1}]$')
# Max value of the image
# max = numpy.max(image)
qx_min, qx_max, qy_min, qy_max = self.get_detector_qrange()
# Image
im = plt.imshow(image,
extent=[qx_min, qx_max, qy_min, qy_max])
# bilinear interpolation to make it smoother
im.set_interpolation('bilinear')
return plt
def reset_image(self):
"""
Reset image to default (=[])
"""
self.image = []
def get_variance(self, size=[], distance=0, phi=0, comp='radial'):
"""
Get the variance when the slit/pinhole size is given
: size: list that can be one(diameter for circular) or two components(lengths for rectangular)
: distance: [z, x] where z along the incident beam, x // qx_value
: comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'
: return variance: sigma^2
"""
# check the length of size (list)
len_size = len(size)
# define sigma component direction
if comp == 'radial':
phi_x = math.cos(phi)
phi_y = math.sin(phi)
elif comp == 'phi':
phi_x = math.sin(phi)
phi_y = math.cos(phi)
elif comp == 'x':
phi_x = 1
phi_y = 0
elif comp == 'y':
phi_x = 0
phi_y = 1
else:
phi_x = 0
phi_y = 0
# calculate each component
# for pinhole w/ radius = size[0]/2
if len_size == 1:
x_comp = (0.5 * size[0]) * sqrt(3)
y_comp = 0
# for rectangular slit
elif len_size == 2:
x_comp = size[0] * phi_x
y_comp = size[1] * phi_y
# otherwise
else:
raise ValueError(" Improper input...")
# get them squared
sigma = x_comp * x_comp
sigma += y_comp * y_comp
# normalize by distance
sigma /= (distance * distance)
return sigma
def get_variance_wave(self, A_value, radius, distance, spread, phi,
comp='radial', switch='on'):
"""
Get the variance when the wavelength spread is given
: radius: the radial distance from the beam center to the pix of q
: distance: sample to detector distance
: spread: wavelength spread (ratio)
: comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'
: return variance: sigma^2 for 2d, sigma^2 for 1d [tuple]
"""
if switch.lower() == 'off':
return 0, 0
# check the singular point
if distance == 0 or comp == 'phi':
return 0, 0
else:
# calculate sigma^2 for 1d
sigma1d = 2 * math.pow(radius/distance*spread, 2)
if comp == 'x':
sigma1d *= (math.cos(phi)*math.cos(phi))
elif comp == 'y':
sigma1d *= (math.sin(phi)*math.sin(phi))
else:
sigma1d *= 1
# sigma^2 for 2d
# shift the coordinate due to the gravitational shift
rad_x = radius * math.cos(phi)
rad_y = A_value - radius * math.sin(phi)
radius = math.sqrt(rad_x * rad_x + rad_y * rad_y)
# new phi
phi = math.atan2(-rad_y, rad_x)
self.gravity_phi = phi
# calculate sigma^2
sigma = 2 * math.pow(radius/distance*spread, 2)
if comp == 'x':
sigma *= (math.cos(phi)*math.cos(phi))
elif comp == 'y':
sigma *= (math.sin(phi)*math.sin(phi))
else:
sigma *= 1
return sigma, sigma1d
def get_variance_gravity(self, s_distance, d_distance, wavelength, spread,
phi, comp='radial', switch='on'):
"""
Get the variance from gravity when the wavelength spread is given
: s_distance: source to sample distance
: d_distance: sample to detector distance
: wavelength: wavelength
: spread: wavelength spread (ratio)
: comp: direction of the sigma; can be 'phi', 'y', 'x', and 'radial'
: return variance: sigma^2
"""
if switch.lower() == 'off':
return 0
if self.mass == 0.0:
return 0
# check the singular point
if d_distance == 0 or comp == 'x':
return 0
else:
a_value = self._cal_A_value(None, s_distance, d_distance)
# calculate sigma^2
sigma = math.pow(a_value / d_distance, 2)
sigma *= math.pow(wavelength, 4)
sigma *= math.pow(spread, 2)
sigma *= 8
return sigma
def _cal_A_value(self, lamda, s_distance, d_distance):
"""
Calculate A value for gravity
: s_distance: source to sample distance
: d_distance: sample to detector distance
"""
# neutron mass in cgs unit
self.mass = self.get_neutron_mass()
# plank constant in cgs unit
h_constant = _PLANK_H
# gravity in cgs unit
gravy = _GRAVITY
# m/h
m_over_h = self.mass / h_constant
# A value
a_value = d_distance * (s_distance + d_distance)
a_value *= math.pow(m_over_h / 2, 2)
a_value *= gravy
# unit correction (1/cm to 1/A) for A and d_distance below
a_value *= 1.0E-16
# if lamda is give (broad meanning of A) return 2* lamda^2 * A
if lamda is not None:
a_value *= (4 * lamda * lamda)
return a_value
def get_intensity(self):
"""
Get intensity
"""
return self.wave.intensity
def get_wavelength(self):
"""
Get wavelength
"""
return self.wave.wavelength
def get_default_spectrum(self):
"""
Get default_spectrum
"""
return self.wave.get_default_spectrum()
def get_spectrum(self):
"""
Get _spectrum
"""
return self.wave.get_spectrum()
def get_wavelength_spread(self):
"""
Get wavelength spread
"""
return self.wave.wavelength_spread
def get_neutron_mass(self):
"""
Get Neutron mass
"""
return self.wave.mass
def get_source_aperture_size(self):
"""
Get source aperture size
"""
return self.aperture.source_size
def get_sample_aperture_size(self):
"""
Get sample aperture size
"""
return self.aperture.sample_size
def get_detector_pix_size(self):
"""
Get detector pixel size
"""
return self.detector.pix_size
def get_detector_size(self):
"""
Get detector size
"""
return self.detector.size
def get_source2sample_distance(self):
"""
Get detector source2sample_distance
"""
return self.aperture.sample_distance
def get_sample2sample_distance(self):
"""
Get detector sampleslitsample_distance
"""
return self.sample.distance
def get_sample2detector_distance(self):
"""
Get detector sample2detector_distance
"""
return self.detector.distance
def set_intensity(self, intensity):
"""
Set intensity
"""
self.wave.set_intensity(intensity)
def set_wave(self, wavelength):
"""
Set wavelength list or wavelength
"""
if wavelength.__class__.__name__ == 'list':
self.wave.set_wave_list(wavelength)
elif wavelength.__class__.__name__ == 'float':
self.wave.set_wave_list([wavelength])
#self.set_wavelength(wavelength)
else:
raise TypeError("invalid wavlength---should be list or float")
def set_wave_spread(self, wavelength_spread):
"""
Set wavelength spread or wavelength spread
"""
if wavelength_spread.__class__.__name__ == 'list':
self.wave.set_wave_spread_list(wavelength_spread)
elif wavelength_spread.__class__.__name__ == 'float':
self.wave.set_wave_spread_list([wavelength_spread])
else:
raise TypeError("invalid wavelength spread---should be list or float")
def set_wavelength(self, wavelength):
"""
Set wavelength
"""
self.wavelength = wavelength
self.wave.set_wavelength(wavelength)
def set_spectrum(self, spectrum):
"""
Set spectrum
"""
self.spectrum = spectrum
self.wave.set_spectrum(spectrum)
def set_wavelength_spread(self, wavelength_spread):
"""
Set wavelength spread
"""
self.wavelength_spread = wavelength_spread
self.wave.set_wavelength_spread(wavelength_spread)
def set_wave_list(self, wavelength_list, wavelengthspread_list):
"""
Set wavelength and its spread list
"""
self.wave.set_wave_list(wavelength_list)
self.wave.set_wave_spread_list(wavelengthspread_list)
def get_wave_list(self):
"""
Set wavelength spread
"""
return self.wave.get_wave_list()
def get_intensity_list(self):
"""
Set wavelength spread
"""
return self.wave.get_intensity_list()
def set_source_aperture_size(self, size):
"""
Set source aperture size
: param size: [dia_value] or [x_value, y_value]
"""
if len(size) < 1 or len(size) > 2:
raise RuntimeError("The length of the size must be one or two.")
self.aperture.set_source_size(size)
def set_neutron_mass(self, mass):
"""
Set Neutron mass
"""
self.wave.set_mass(mass)
self.mass = mass
def set_sample_aperture_size(self, size):
"""
Set sample aperture size
: param size: [dia_value] or [xheight_value, yheight_value]
"""
if len(size) < 1 or len(size) > 2:
raise RuntimeError("The length of the size must be one or two.")
self.aperture.set_sample_size(size)
def set_detector_pix_size(self, size):
"""
Set detector pixel size
"""
self.detector.set_pix_size(size)
def set_detector_size(self, size):
"""
Set detector size in number of pixels
: param size: [pixel_nums] or [x_pix_num, yx_pix_num]
"""
self.detector.set_size(size)
def set_source2sample_distance(self, distance):
"""
Set detector source2sample_distance
: param distance: [distance, x_offset]
"""
if len(distance) < 1 or len(distance) > 2:
raise RuntimeError("The length of the size must be one or two.")
self.aperture.set_sample_distance(distance)
def set_sample2sample_distance(self, distance):
"""
Set detector sample_slit2sample_distance
: param distance: [distance, x_offset]
"""
if len(distance) < 1 or len(distance) > 2:
raise RuntimeError("The length of the size must be one or two.")
self.sample.set_distance(distance)
def set_sample2detector_distance(self, distance):
"""
Set detector sample2detector_distance
: param distance: [distance, x_offset]
"""
if len(distance) < 1 or len(distance) > 2:
raise RuntimeError("The length of the size must be one or two.")
self.detector.set_distance(distance)
def get_all_instrument_params(self):
"""
Get all instrumental parameters
"""
self.mass = self.get_neutron_mass()
self.spectrum = self.get_spectrum()
self.source_aperture_size = self.get_source_aperture_size()
self.sample_aperture_size = self.get_sample_aperture_size()
self.detector_pix_size = self.get_detector_pix_size()
self.detector_size = self.get_detector_size()
self.source2sample_distance = self.get_source2sample_distance()
self.sample2sample_distance = self.get_sample2sample_distance()
self.sample2detector_distance = self.get_sample2detector_distance()
def get_detector_qrange(self):
"""
get max detector q ranges
: return: qx_min, qx_max, qy_min, qy_max tuple
"""
if len(self.qxrange) != 2 or len(self.qyrange) != 2:
return None
qx_min = self.qxrange[0]
qx_max = self.qxrange[1]
qy_min = self.qyrange[0]
qy_max = self.qyrange[1]
return qx_min, qx_max, qy_min, qy_max
def _rotate_z(self, x_value, y_value, theta=0.0):
"""
Rotate x-y cordinate around z-axis by theta
: x_value: numpy array of x values
: y_value: numpy array of y values
: theta: angle to rotate by in rad
:return: x_prime, y-prime
"""
# rotate by theta
x_prime = x_value * math.cos(theta) + y_value * math.sin(theta)
y_prime = -x_value * math.sin(theta) + y_value * math.cos(theta)
return x_prime, y_prime
def _gaussian2d(self, x_val, y_val, x0_val, y0_val,
sigma_x, sigma_y, sigma_r):
"""
Calculate 2D Gaussian distribution
: x_val: x value
: y_val: y value
: x0_val: mean value in x-axis
: y0_val: mean value in y-axis
: sigma_x: variance in x-direction
: sigma_y: variance in y-direction
: return: gaussian (value)
"""
# phi values at each points (not at the center)
x_value = x_val - x0_val
y_value = y_val - y0_val
phi_i = np.arctan2(y_val, x_val)
# phi correction due to the gravity shift (in phi)
phi_0 = math.atan2(y0_val, x0_val)
phi_i = phi_i - phi_0 + self.gravity_phi
sin_phi = np.sin(self.gravity_phi)
cos_phi = np.cos(self.gravity_phi)
x_p = x_value * cos_phi + y_value * sin_phi
y_p = -x_value * sin_phi + y_value * cos_phi
new_sig_x = sqrt(sigma_r * sigma_r / (sigma_x * sigma_x) + 1)
new_sig_y = sqrt(sigma_r * sigma_r / (sigma_y * sigma_y) + 1)
new_x = x_p * cos_phi / new_sig_x - y_p * sin_phi
new_x /= sigma_x
new_y = x_p * sin_phi / new_sig_y + y_p * cos_phi
new_y /= sigma_y
nu_value = -0.5 * (new_x * new_x + new_y * new_y)
gaussian = np.exp(nu_value)
# normalizing factor correction
gaussian /= gaussian.sum()
return gaussian
def _gaussian2d_polar(self, x_val, y_val, x0_val, y0_val,
sigma_x, sigma_y, sigma_r):
"""
Calculate 2D Gaussian distribution for polar coodinate
: x_val: x value
: y_val: y value
: x0_val: mean value in x-axis
: y0_val: mean value in y-axis
: sigma_x: variance in r-direction
: sigma_y: variance in phi-direction
: sigma_r: wavelength variance in r-direction
: return: gaussian (value)
"""
sigma_x = sqrt(sigma_x * sigma_x + sigma_r * sigma_r)
# call gaussian1d
gaussian = self._gaussian1d(x_val, x0_val, sigma_x)
gaussian *= self._gaussian1d(y_val, y0_val, sigma_y)
# normalizing factor correction
if sigma_x != 0 and sigma_y != 0:
gaussian *= sqrt(2 * pi)
return gaussian
def _gaussian1d(self, value, mean, sigma):
"""
Calculate 1D Gaussian distribution
: value: value
: mean: mean value
: sigma: variance
: return: gaussian (value)
"""
# default
gaussian = 1.0
if sigma != 0:
# get exponent
nu_value = (value - mean) / sigma
nu_value *= nu_value
nu_value *= -0.5
gaussian *= np.exp(nu_value)
gaussian /= sigma
# normalize
gaussian /= sqrt(2 * pi)
return gaussian
def _atan_phi(self, qy_value, qx_value):
"""
Find the angle phi of q on the detector plane for qx_value, qy_value given
: qx_value: x component of q
: qy_value: y component of q
: return phi: the azimuthal angle of q on x-y plane
"""
phi = math.atan2(qy_value, qx_value)
return phi
def _get_detector_qxqy_pixels(self):
"""
Get the pixel positions of the detector in the qx_value-qy_value space
"""
# update all param values
self.get_all_instrument_params()
# wavelength
wavelength = self.wave.wavelength
# Gavity correction
delta_y = self._get_beamcenter_drop() # in cm
# detector_pix size
detector_pix_size = self.detector_pix_size
# Square or circular pixel
if len(detector_pix_size) == 1:
pix_x_size = detector_pix_size[0]
pix_y_size = detector_pix_size[0]
# rectangular pixel pixel
elif len(detector_pix_size) == 2:
pix_x_size = detector_pix_size[0]
pix_y_size = detector_pix_size[1]
else:
raise ValueError(" Input value format error...")
# Sample to detector distance = sample slit to detector
# minus sample offset
sample2detector_distance = self.sample2detector_distance[0] - \
self.sample2sample_distance[0]
# detector offset in x-direction
detector_offset = 0
try:
detector_offset = self.sample2detector_distance[1]
except Exception as exc:
logger.error(exc)
# detector size in [no of pix_x,no of pix_y]
detector_pix_nums_x = self.detector_size[0]
# get pix_y if it exists, otherwse take it from [0]
try:
detector_pix_nums_y = self.detector_size[1]
except:
detector_pix_nums_y = self.detector_size[0]
# detector offset in pix number
offset_x = detector_offset / pix_x_size
offset_y = delta_y / pix_y_size
# beam center position in pix number (start from 0)
center_x, center_y = self._get_beamcenter_position(detector_pix_nums_x,
detector_pix_nums_y,
offset_x, offset_y)
# distance [cm] from the beam center on detector plane
detector_ind_x = np.arange(detector_pix_nums_x)
detector_ind_y = np.arange(detector_pix_nums_y)
# shif 0.5 pixel so that pix position is at the center of the pixel
detector_ind_x = detector_ind_x + 0.5
detector_ind_y = detector_ind_y + 0.5
# the relative postion from the beam center
detector_ind_x = detector_ind_x - center_x
detector_ind_y = detector_ind_y - center_y
# unit correction in cm
detector_ind_x = detector_ind_x * pix_x_size
detector_ind_y = detector_ind_y * pix_y_size
qx_value = np.zeros(len(detector_ind_x))
qy_value = np.zeros(len(detector_ind_y))
i = 0
for indx in detector_ind_x:
qx_value[i] = self._get_qx(indx, sample2detector_distance, wavelength)
i += 1
i = 0
for indy in detector_ind_y:
qy_value[i] = self._get_qx(indy, sample2detector_distance, wavelength)
i += 1
# qx_value and qy_value values in array
qx_value = qx_value.repeat(detector_pix_nums_y)
qx_value = qx_value.reshape(detector_pix_nums_x, detector_pix_nums_y)
qy_value = qy_value.repeat(detector_pix_nums_x)
qy_value = qy_value.reshape(detector_pix_nums_y, detector_pix_nums_x)
qy_value = qy_value.transpose()
# p min and max values among the center of pixels
self.qx_min = np.min(qx_value)
self.qx_max = np.max(qx_value)
self.qy_min = np.min(qy_value)
self.qy_max = np.max(qy_value)
# Appr. min and max values of the detector display limits
# i.e., edges of the last pixels.
self.qy_min += self._get_qx(-0.5 * pix_y_size,
sample2detector_distance, wavelength)
self.qy_max += self._get_qx(0.5 * pix_y_size,
sample2detector_distance, wavelength)
#if self.qx_min == self.qx_max:
self.qx_min += self._get_qx(-0.5 * pix_x_size,
sample2detector_distance, wavelength)
self.qx_max += self._get_qx(0.5 * pix_x_size,
sample2detector_distance, wavelength)
# min and max values of detecter
self.detector_qx_min = self.qx_min
self.detector_qx_max = self.qx_max
self.detector_qy_min = self.qy_min
self.detector_qy_max = self.qy_max
# try to set it as a Data2D otherwise pass (not required for now)
try:
from sas.sascalc.dataloader.data_info import Data2D
output = Data2D()
inten = np.zeros_like(qx_value)
output.data = inten
output.qx_data = qx_value
output.qy_data = qy_value
except Exception as exc:
logger.error(exc)
return output
def _get_qx(self, dx_size, det_dist, wavelength):
"""
:param dx_size: x-distance from beam center [cm]
:param det_dist: sample to detector distance [cm]
:return: q-value at the given position
"""
# Distance from beam center in the plane of detector
plane_dist = dx_size
# full scattering angle on the x-axis
theta = np.arctan(plane_dist / det_dist)
qx_value = (2.0 * pi / wavelength) * np.sin(theta)
return qx_value
def _get_polar_value(self, qx_value, qy_value):
"""
Find qr_value and phi from qx_value and qy_value values
: return qr_value, phi
"""
# find |q| on detector plane
qr_value = sqrt(qx_value*qx_value + qy_value*qy_value)
# find angle phi
phi = self._atan_phi(qy_value, qx_value)
return qr_value, phi
def _get_beamcenter_position(self, num_x, num_y, offset_x, offset_y):
"""
:param num_x: number of pixel in x-direction
:param num_y: number of pixel in y-direction
:param offset: detector offset in x-direction in pix number
:return: pix number; pos_x, pos_y in pix index
"""
# beam center position
pos_x = num_x / 2
pos_y = num_y / 2
# correction for offset
pos_x += offset_x
# correction for gravity that is always negative
pos_y -= offset_y
return pos_x, pos_y
def _get_beamcenter_drop(self):
"""
Get the beam center drop (delta y) in y diection due to gravity
:return delta y: the beam center drop in cm
"""
# Check if mass == 0 (X-ray).
if self.mass == 0:
return 0
# Covert unit from A to cm
unit_cm = 1e-08
# Velocity of neutron in horizontal direction (~ actual velocity)
velocity = _PLANK_H / (self.mass * self.wave.wavelength * unit_cm)
# Compute delta y
delta_y = 0.5
delta_y *= _GRAVITY
sampletodetector = self.sample2detector_distance[0] - \
self.sample2sample_distance[0]
delta_y *= sampletodetector
delta_y *= (self.source2sample_distance[0] + self.sample2detector_distance[0])
delta_y /= (velocity * velocity)
return delta_y
|
SasView/sasview
|
src/sas/sascalc/calculator/resolution_calculator.py
|
Python
|
bsd-3-clause
| 39,431
|
[
"Gaussian"
] |
a5f2c46d9d565457e9f084ac1c5fa8a7f363102b283dca0b8c1969dd16c4fc7d
|
"""Protocol and threaded interface for data acquisition."""
import time
import numpy
from PyQt5 import QtCore
from axopy.messaging import Transmitter
from axopy.gui.main import get_qtapp, qt_key_map
from axopy.pipeline import Filter
class DaqStream(QtCore.QThread):
"""Asynchronous interface to an input device.
Runs a persistent while loop wherein the device is repeatedly polled for
data. When the data becomes available, it is emitted and the loop
continues.
There are effectively two methods of this class: start and stop. These
methods do as their names suggest -- they start and stop the underlying
device from sampling new data.
The device used to create the DaqStream is also accessible via the
``device`` attribute so you can change settings on the underlying device
any time (e.g. sampling rate, number of samples per update, etc.).
Parameters
----------
device : daq
Any object implementing the AxoPy data acquisition interface. See
:class:`NoiseGenerator` for an example.
Attributes
----------
updated : Transmitter
Transmitted when the latest chunk of data is available. The data type
depends on the underlying input device, but it is often a numpy
ndarray.
disconnected : Transmitter
Transmitted if the device cannot be read from (it has disconnected
somehow).
finished : Transmitter
Transmitted when the device has stopped and samping is finished.
"""
updated = Transmitter(object)
disconnected = Transmitter()
finished = Transmitter()
def __init__(self, device):
super(DaqStream, self).__init__()
self.device = device
self._running = False
@property
def running(self):
"""Boolean value indicating whether or not the stream is running."""
return self._running
def start(self):
"""Start the device and begin reading from it."""
super(DaqStream, self).start()
def run(self):
"""Implementation for the underlying QThread.
Don't call this method directly -- use :meth:`start` instead.
"""
self._running = True
self.device.start()
while True:
if not self._running:
break
try:
d = self.device.read()
except IOError:
self.disconnected.emit()
return
if self._running:
self.updated.emit(d)
self.device.stop()
self.finished.emit()
def stop(self, wait=True):
"""Stop the stream.
Parameters
----------
wait : bool, optional
Whether or not to wait for the underlying device to stop before
returning.
"""
self._running = False
if wait:
self.wait()
class NoiseGenerator(object):
"""An emulated data acquisition device which generates random data.
Each sample of the generated data is sampled from a zero-mean Gaussian
distribution with variance determined by the amplitude specified, which
corresponds to three standard deviations. That is, approximately 99.7% of
the samples should be within the desired peak amplitude.
:class:`NoiseGenerator` is meant to emulate data acquisition devices that
block on each request for data until the data is available. See
:meth:`read` for details.
Parameters
----------
rate : int, optional
Sample rate in Hz. Default is 1000.
num_channels : int, optional
Number of "channels" to generate. Default is 1.
amplitude : float, optional
Approximate peak amplitude of the signal to generate. Specifically, the
amplitude represents three standard deviations for generating the
Gaussian distributed data. Default is 1.
read_size : int, optional
Number of samples to generate per :meth:`read()` call. Default is 100.
"""
def __init__(self, rate=1000, num_channels=1, amplitude=1.0,
read_size=100):
self.rate = rate
self.num_channels = num_channels
self.amplitude = amplitude
self.read_size = read_size
self._sigma = amplitude / 3
self.sleeper = _Sleeper(float(self.read_size/self.rate))
def start(self):
"""Does nothing for this device. Implemented to follow device API."""
pass
def read(self):
"""
Generates zero-mean Gaussian data.
This method blocks (calls ``time.sleep()``) to emulate other data
acquisition units which wait for the requested number of samples to be
read. The amount of time to block is calculated such that consecutive
calls will always return with constant frequency, assuming the calls
occur faster than required (i.e. processing doesn't fall behind).
Returns
-------
data : ndarray, shape (num_channels, read_size)
The generated data.
"""
self.sleeper.sleep()
data = self._sigma * numpy.random.randn(self.num_channels,
self.read_size)
return data
def stop(self):
"""Does nothing for this device. Implemented to follow device API."""
pass
def reset(self):
"""Reset the device back to its initialized state."""
self.sleeper.reset()
class Keyboard(QtCore.QObject):
"""Keyboard input device.
The keyboard device works by periodically sampling (with the rate
specified) whether or not the watched keys have been pressed since the last
sampling event. The output is a numpy array of shape ``(n_keys, 1)``, where
the numerical values are booleans indicating whether or not the
corresponding keys have been pressed.
Parameters
----------
rate : int, optional
Sampling rate, in Hz.
keys : container of str, optional
Keys to watch and use as input signals. The keys used here should not
conflict with the key used by the ``Experiment`` to start the next
task.
Notes
-----
There are a couple reasonable alternatives to the way the keyboard device
is currently implemented. One way to do it might be sampling the key states
at a given rate and producing segments of sampled key state data, much like
a regular data acquisition device. One issue is that actual key state
(whether the key is being physically pressed or not) doesn't seem to be
feasible to find out with Qt. You can hook into key press and key release
events, but these are subject to repeat delay and repeat rate.
Another possible keyboard device would be responsive to key press events
themselves rather than an input sampling event. While Qt enables
event-based keyboard handling, the method used here fits the input device
model, making it easily swappable with other input devices.
"""
def __init__(self, rate=10, keys=None):
super(Keyboard, self).__init__()
self.rate = rate
if keys is None:
keys = list('wasd')
self.keys = keys
self._qkeys = [qt_key_map[k] for k in keys]
self._sleeper = _Sleeper(1.0/rate)
self._data = numpy.zeros((len(self.keys), 1))
def start(self):
"""Start the keyboard input device."""
# install event filter to capture keyboard input events
get_qtapp().installEventFilter(self)
def read(self):
"""Read which keys have just been pressed.
Returns
-------
data : ndarray, shape (n_keys, 1)
A boolean array with a 1 indicating the corresponding key has been
pressed and a 0 indicating it has not.
"""
self._sleeper.sleep()
out = self._data.copy()
self._data *= 0
return out
def stop(self):
"""Stop the keyboard input device.
You may need to stop the device in case you want to be able to use the
keys watched by the device for another purpose.
"""
# remove event filter so captured keys propagate when daq isn't used
get_qtapp().removeEventFilter(self)
def reset(self):
"""Reset the input device."""
self._sleeper.reset()
def eventFilter(self, obj, event):
evtype = event.type()
if evtype == QtCore.QEvent.KeyPress and event.key() in self._qkeys:
self._data[self._qkeys.index(event.key())] = 1
return True
return False
class Mouse(QtCore.QObject):
"""Mouse input device.
The mouse device works by periodically sampling (with the rate specified)
the mouse position within the AxoPy experiment window. The output is in the
form of a numpy array of shape ``(2, 1)``, representing either the change
in position (default) or the absolute position in the window.
Parameters
----------
rate : int, optional
Sampling rate, in Hz.
position : bool, optional
Whether or not to return the mouse's position (instead of the position
difference from the prevoius sample).
Notes
-----
In Qt's coordinate system, the positive y direction is *downward*. Here,
this is inverted as a convenience (upward movement of the mouse produces a
positive "velocity").
Mouse events are intercepted here but they are not *consumed*, meaning you
can still use the mouse to manipulate widgets in the experiment window.
"""
def __init__(self, rate=10, position=False):
super(Mouse, self).__init__()
self.rate = rate
self._sleeper = _Sleeper(1.0/rate)
if position:
b = 1
else:
b = (1, -1)
self._filter = Filter(b)
self.reset()
def start(self):
"""Start sampling mouse movements."""
get_qtapp().installEventFilter(self)
def read(self):
"""Read the last-updated mouse position.
Returns
-------
data : ndarray, shape (2, 1)
The mouse "velocity" or position (x, y).
"""
self._sleeper.sleep()
return self._filter.process(self._data.copy())
def stop(self):
"""Stop sampling mouse movements."""
get_qtapp().removeEventFilter(self)
def reset(self):
"""Clear the input device."""
self._data = numpy.zeros((2, 1), dtype=float)
self._filter.clear()
self._sleeper.reset()
def eventFilter(self, obj, event):
evtype = event.type()
if evtype == QtCore.QEvent.MouseMove:
self._data[0] = event.x()
self._data[1] = -event.y()
return False
class _Sleeper(object):
def __init__(self, read_time):
self.read_time = read_time
self.last_read_time = None
def sleep(self):
t = time.time()
if self.last_read_time is None:
time.sleep(self.read_time)
else:
try:
time.sleep(self.read_time - (t - self.last_read_time))
except ValueError:
# if we're not meeting real-time requirement, don't wait
pass
self.last_read_time = time.time()
def reset(self):
self.last_read_time = None
|
ucdrascal/axopy
|
axopy/daq.py
|
Python
|
mit
| 11,328
|
[
"Gaussian"
] |
c9f9727b5784d3f8e41b529307be74355a0e69a6686e66db0e2316821ba5a855
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_disks import RegionDisksClient
from google.cloud.compute_v1.services.region_disks import pagers
from google.cloud.compute_v1.services.region_disks import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionDisksClient._get_default_mtls_endpoint(None) is None
assert (
RegionDisksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
RegionDisksClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionDisksClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionDisksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert RegionDisksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class,transport_name", [(RegionDisksClient, "rest"),])
def test_region_disks_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name", [(transports.RegionDisksRestTransport, "rest"),]
)
def test_region_disks_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class,transport_name", [(RegionDisksClient, "rest"),])
def test_region_disks_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_disks_client_get_transport_class():
transport = RegionDisksClient.get_transport_class()
available_transports = [
transports.RegionDisksRestTransport,
]
assert transport in available_transports
transport = RegionDisksClient.get_transport_class("rest")
assert transport == transports.RegionDisksRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionDisksClient, transports.RegionDisksRestTransport, "rest"),],
)
@mock.patch.object(
RegionDisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDisksClient)
)
def test_region_disks_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionDisksClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionDisksClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(RegionDisksClient, transports.RegionDisksRestTransport, "rest", "true"),
(RegionDisksClient, transports.RegionDisksRestTransport, "rest", "false"),
],
)
@mock.patch.object(
RegionDisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDisksClient)
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_disks_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionDisksClient])
@mock.patch.object(
RegionDisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDisksClient)
)
def test_region_disks_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionDisksClient, transports.RegionDisksRestTransport, "rest"),],
)
def test_region_disks_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[(RegionDisksClient, transports.RegionDisksRestTransport, "rest", None),],
)
def test_region_disks_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.AddResourcePoliciesRegionDiskRequest, dict,]
)
def test_add_resource_policies_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_add_resource_policies_request_resource"] = {
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.add_resource_policies_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_add_resource_policies_unary_rest_required_fields(
request_type=compute.AddResourcePoliciesRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).add_resource_policies._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).add_resource_policies._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.add_resource_policies_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_add_resource_policies_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.add_resource_policies._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(
(
"disk",
"project",
"region",
"regionDisksAddResourcePoliciesRequestResource",
)
)
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_add_resource_policies_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_add_resource_policies"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_add_resource_policies"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.AddResourcePoliciesRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.add_resource_policies_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_add_resource_policies_unary_rest_bad_request(
transport: str = "rest", request_type=compute.AddResourcePoliciesRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_add_resource_policies_request_resource"] = {
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.add_resource_policies_unary(request)
def test_add_resource_policies_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
disk="disk_value",
region_disks_add_resource_policies_request_resource=compute.RegionDisksAddResourcePoliciesRequest(
resource_policies=["resource_policies_value"]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.add_resource_policies_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies"
% client.transport._host,
args[1],
)
def test_add_resource_policies_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.add_resource_policies_unary(
compute.AddResourcePoliciesRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
region_disks_add_resource_policies_request_resource=compute.RegionDisksAddResourcePoliciesRequest(
resource_policies=["resource_policies_value"]
),
)
def test_add_resource_policies_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.CreateSnapshotRegionDiskRequest, dict,]
)
def test_create_snapshot_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["snapshot_resource"] = {
"auto_created": True,
"chain_name": "chain_name_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"disk_size_gb": 1261,
"download_bytes": 1502,
"id": 205,
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"license_codes": [1361, 1362],
"licenses": ["licenses_value_1", "licenses_value_2"],
"location_hint": "location_hint_value",
"name": "name_value",
"satisfies_pzs": True,
"self_link": "self_link_value",
"snapshot_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"source_disk": "source_disk_value",
"source_disk_encryption_key": {},
"source_disk_id": "source_disk_id_value",
"status": "status_value",
"storage_bytes": 1403,
"storage_bytes_status": "storage_bytes_status_value",
"storage_locations": ["storage_locations_value_1", "storage_locations_value_2"],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.create_snapshot_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_create_snapshot_unary_rest_required_fields(
request_type=compute.CreateSnapshotRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).create_snapshot._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).create_snapshot._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.create_snapshot_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_create_snapshot_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.create_snapshot._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("disk", "project", "region", "snapshotResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_create_snapshot_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_create_snapshot"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_create_snapshot"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.CreateSnapshotRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.create_snapshot_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_create_snapshot_unary_rest_bad_request(
transport: str = "rest", request_type=compute.CreateSnapshotRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["snapshot_resource"] = {
"auto_created": True,
"chain_name": "chain_name_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"disk_size_gb": 1261,
"download_bytes": 1502,
"id": 205,
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"license_codes": [1361, 1362],
"licenses": ["licenses_value_1", "licenses_value_2"],
"location_hint": "location_hint_value",
"name": "name_value",
"satisfies_pzs": True,
"self_link": "self_link_value",
"snapshot_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"source_disk": "source_disk_value",
"source_disk_encryption_key": {},
"source_disk_id": "source_disk_id_value",
"status": "status_value",
"storage_bytes": 1403,
"storage_bytes_status": "storage_bytes_status_value",
"storage_locations": ["storage_locations_value_1", "storage_locations_value_2"],
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.create_snapshot_unary(request)
def test_create_snapshot_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
disk="disk_value",
snapshot_resource=compute.Snapshot(auto_created=True),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.create_snapshot_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/createSnapshot"
% client.transport._host,
args[1],
)
def test_create_snapshot_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_snapshot_unary(
compute.CreateSnapshotRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
snapshot_resource=compute.Snapshot(auto_created=True),
)
def test_create_snapshot_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.DeleteRegionDiskRequest, dict,])
def test_delete_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("disk", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", region="region_value", disk="disk_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
)
def test_delete_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetRegionDiskRequest, dict,])
def test_get_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Disk(
creation_timestamp="creation_timestamp_value",
description="description_value",
id=205,
kind="kind_value",
label_fingerprint="label_fingerprint_value",
last_attach_timestamp="last_attach_timestamp_value",
last_detach_timestamp="last_detach_timestamp_value",
license_codes=[1360],
licenses=["licenses_value"],
location_hint="location_hint_value",
name="name_value",
options="options_value",
physical_block_size_bytes=2663,
provisioned_iops=1740,
region="region_value",
replica_zones=["replica_zones_value"],
resource_policies=["resource_policies_value"],
satisfies_pzs=True,
self_link="self_link_value",
size_gb=739,
source_disk="source_disk_value",
source_disk_id="source_disk_id_value",
source_image="source_image_value",
source_image_id="source_image_id_value",
source_snapshot="source_snapshot_value",
source_snapshot_id="source_snapshot_id_value",
source_storage_object="source_storage_object_value",
status="status_value",
type_="type__value",
users=["users_value"],
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Disk.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Disk)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.label_fingerprint == "label_fingerprint_value"
assert response.last_attach_timestamp == "last_attach_timestamp_value"
assert response.last_detach_timestamp == "last_detach_timestamp_value"
assert response.license_codes == [1360]
assert response.licenses == ["licenses_value"]
assert response.location_hint == "location_hint_value"
assert response.name == "name_value"
assert response.options == "options_value"
assert response.physical_block_size_bytes == 2663
assert response.provisioned_iops == 1740
assert response.region == "region_value"
assert response.replica_zones == ["replica_zones_value"]
assert response.resource_policies == ["resource_policies_value"]
assert response.satisfies_pzs is True
assert response.self_link == "self_link_value"
assert response.size_gb == 739
assert response.source_disk == "source_disk_value"
assert response.source_disk_id == "source_disk_id_value"
assert response.source_image == "source_image_value"
assert response.source_image_id == "source_image_id_value"
assert response.source_snapshot == "source_snapshot_value"
assert response.source_snapshot_id == "source_snapshot_id_value"
assert response.source_storage_object == "source_storage_object_value"
assert response.status == "status_value"
assert response.type_ == "type__value"
assert response.users == ["users_value"]
assert response.zone == "zone_value"
def test_get_rest_required_fields(request_type=compute.GetRegionDiskRequest):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Disk()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Disk.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("disk", "project", "region",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Disk.to_json(compute.Disk())
request = compute.GetRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Disk
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Disk()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", region="region_value", disk="disk_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Disk.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
)
def test_get_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetIamPolicyRegionDiskRequest, dict,])
def test_get_iam_policy_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(etag="etag_value", iam_owned=True, version=774,)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.version == 774
def test_get_iam_policy_rest_required_fields(
request_type=compute.GetIamPolicyRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get_iam_policy._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("options_requested_policy_version",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_iam_policy_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get_iam_policy._get_unset_required_fields({})
assert set(unset_fields) == (
set(("optionsRequestedPolicyVersion",))
& set(("project", "region", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_iam_policy_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_get_iam_policy"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_get_iam_policy"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Policy.to_json(compute.Policy())
request = compute.GetIamPolicyRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Policy
client.get_iam_policy(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_get_iam_policy_rest_bad_request(
transport: str = "rest", request_type=compute.GetIamPolicyRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get_iam_policy(request)
def test_get_iam_policy_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"resource": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", region="region_value", resource="resource_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get_iam_policy(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/getIamPolicy"
% client.transport._host,
args[1],
)
def test_get_iam_policy_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
compute.GetIamPolicyRegionDiskRequest(),
project="project_value",
region="region_value",
resource="resource_value",
)
def test_get_iam_policy_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.InsertRegionDiskRequest, dict,])
def test_insert_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["disk_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"guest_os_features": [{"type_": "type__value"}],
"id": 205,
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"last_attach_timestamp": "last_attach_timestamp_value",
"last_detach_timestamp": "last_detach_timestamp_value",
"license_codes": [1361, 1362],
"licenses": ["licenses_value_1", "licenses_value_2"],
"location_hint": "location_hint_value",
"name": "name_value",
"options": "options_value",
"physical_block_size_bytes": 2663,
"provisioned_iops": 1740,
"region": "region_value",
"replica_zones": ["replica_zones_value_1", "replica_zones_value_2"],
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"],
"satisfies_pzs": True,
"self_link": "self_link_value",
"size_gb": 739,
"source_disk": "source_disk_value",
"source_disk_id": "source_disk_id_value",
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_image_id": "source_image_id_value",
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
"source_snapshot_id": "source_snapshot_id_value",
"source_storage_object": "source_storage_object_value",
"status": "status_value",
"type_": "type__value",
"users": ["users_value_1", "users_value_2"],
"zone": "zone_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id", "source_image",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId", "sourceImage",)) & set(("diskResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["disk_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"guest_os_features": [{"type_": "type__value"}],
"id": 205,
"kind": "kind_value",
"label_fingerprint": "label_fingerprint_value",
"labels": {},
"last_attach_timestamp": "last_attach_timestamp_value",
"last_detach_timestamp": "last_detach_timestamp_value",
"license_codes": [1361, 1362],
"licenses": ["licenses_value_1", "licenses_value_2"],
"location_hint": "location_hint_value",
"name": "name_value",
"options": "options_value",
"physical_block_size_bytes": 2663,
"provisioned_iops": 1740,
"region": "region_value",
"replica_zones": ["replica_zones_value_1", "replica_zones_value_2"],
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"],
"satisfies_pzs": True,
"self_link": "self_link_value",
"size_gb": 739,
"source_disk": "source_disk_value",
"source_disk_id": "source_disk_id_value",
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_image_id": "source_image_id_value",
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
"source_snapshot_id": "source_snapshot_id_value",
"source_storage_object": "source_storage_object_value",
"status": "status_value",
"type_": "type__value",
"users": ["users_value_1", "users_value_2"],
"zone": "zone_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
disk_resource=compute.Disk(creation_timestamp="creation_timestamp_value"),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertRegionDiskRequest(),
project="project_value",
region="region_value",
disk_resource=compute.Disk(creation_timestamp="creation_timestamp_value"),
)
def test_insert_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListRegionDisksRequest, dict,])
def test_list_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.DiskList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.DiskList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListRegionDisksRequest):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.DiskList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.DiskList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.DiskList.to_json(compute.DiskList())
request = compute.ListRegionDisksRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.DiskList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionDisksRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.DiskList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.DiskList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionDisksRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.DiskList(
items=[compute.Disk(), compute.Disk(), compute.Disk(),],
next_page_token="abc",
),
compute.DiskList(items=[], next_page_token="def",),
compute.DiskList(items=[compute.Disk(),], next_page_token="ghi",),
compute.DiskList(items=[compute.Disk(), compute.Disk(),],),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.DiskList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.Disk) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [compute.RemoveResourcePoliciesRegionDiskRequest, dict,]
)
def test_remove_resource_policies_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_remove_resource_policies_request_resource"] = {
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.remove_resource_policies_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_remove_resource_policies_unary_rest_required_fields(
request_type=compute.RemoveResourcePoliciesRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).remove_resource_policies._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).remove_resource_policies._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.remove_resource_policies_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_remove_resource_policies_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.remove_resource_policies._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(
(
"disk",
"project",
"region",
"regionDisksRemoveResourcePoliciesRequestResource",
)
)
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_remove_resource_policies_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_remove_resource_policies"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_remove_resource_policies"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.RemoveResourcePoliciesRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.remove_resource_policies_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_remove_resource_policies_unary_rest_bad_request(
transport: str = "rest",
request_type=compute.RemoveResourcePoliciesRegionDiskRequest,
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_remove_resource_policies_request_resource"] = {
"resource_policies": ["resource_policies_value_1", "resource_policies_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.remove_resource_policies_unary(request)
def test_remove_resource_policies_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
disk="disk_value",
region_disks_remove_resource_policies_request_resource=compute.RegionDisksRemoveResourcePoliciesRequest(
resource_policies=["resource_policies_value"]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.remove_resource_policies_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies"
% client.transport._host,
args[1],
)
def test_remove_resource_policies_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.remove_resource_policies_unary(
compute.RemoveResourcePoliciesRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
region_disks_remove_resource_policies_request_resource=compute.RegionDisksRemoveResourcePoliciesRequest(
resource_policies=["resource_policies_value"]
),
)
def test_remove_resource_policies_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ResizeRegionDiskRequest, dict,])
def test_resize_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_resize_request_resource"] = {"size_gb": 739}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.resize_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_resize_unary_rest_required_fields(
request_type=compute.ResizeRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["disk"] = ""
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).resize._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["disk"] = "disk_value"
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).resize._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "disk" in jsonified_request
assert jsonified_request["disk"] == "disk_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.resize_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_resize_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.resize._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(("disk", "project", "region", "regionDisksResizeRequestResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_resize_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_resize"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_resize"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.ResizeRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.resize_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_resize_unary_rest_bad_request(
transport: str = "rest", request_type=compute.ResizeRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"}
request_init["region_disks_resize_request_resource"] = {"size_gb": 739}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.resize_unary(request)
def test_resize_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
disk="disk_value",
region_disks_resize_request_resource=compute.RegionDisksResizeRequest(
size_gb=739
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.resize_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/resize"
% client.transport._host,
args[1],
)
def test_resize_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resize_unary(
compute.ResizeRegionDiskRequest(),
project="project_value",
region="region_value",
disk="disk_value",
region_disks_resize_request_resource=compute.RegionDisksResizeRequest(
size_gb=739
),
)
def test_resize_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.SetIamPolicyRegionDiskRequest, dict,])
def test_set_iam_policy_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["region_set_policy_request_resource"] = {
"bindings": [
{
"binding_id": "binding_id_value",
"condition": {
"description": "description_value",
"expression": "expression_value",
"location": "location_value",
"title": "title_value",
},
"members": ["members_value_1", "members_value_2"],
"role": "role_value",
}
],
"etag": "etag_value",
"policy": {
"audit_configs": [
{
"audit_log_configs": [
{
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"ignore_child_exemptions": True,
"log_type": "log_type_value",
}
],
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"service": "service_value",
}
],
"bindings": {},
"etag": "etag_value",
"iam_owned": True,
"rules": [
{
"action": "action_value",
"conditions": [
{
"iam": "iam_value",
"op": "op_value",
"svc": "svc_value",
"sys": "sys_value",
"values": ["values_value_1", "values_value_2"],
}
],
"description": "description_value",
"ins": ["ins_value_1", "ins_value_2"],
"log_configs": [
{
"cloud_audit": {
"authorization_logging_options": {
"permission_type": "permission_type_value"
},
"log_name": "log_name_value",
},
"counter": {
"custom_fields": [
{"name": "name_value", "value": "value_value"}
],
"field": "field_value",
"metric": "metric_value",
},
"data_access": {"log_mode": "log_mode_value"},
}
],
"not_ins": ["not_ins_value_1", "not_ins_value_2"],
"permissions": ["permissions_value_1", "permissions_value_2"],
}
],
"version": 774,
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(etag="etag_value", iam_owned=True, version=774,)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.version == 774
def test_set_iam_policy_rest_required_fields(
request_type=compute.SetIamPolicyRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_set_iam_policy_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.set_iam_policy._get_unset_required_fields({})
assert set(unset_fields) == (
set(())
& set(("project", "region", "regionSetPolicyRequestResource", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_set_iam_policy_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_set_iam_policy"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_set_iam_policy"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Policy.to_json(compute.Policy())
request = compute.SetIamPolicyRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Policy
client.set_iam_policy(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_set_iam_policy_rest_bad_request(
transport: str = "rest", request_type=compute.SetIamPolicyRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["region_set_policy_request_resource"] = {
"bindings": [
{
"binding_id": "binding_id_value",
"condition": {
"description": "description_value",
"expression": "expression_value",
"location": "location_value",
"title": "title_value",
},
"members": ["members_value_1", "members_value_2"],
"role": "role_value",
}
],
"etag": "etag_value",
"policy": {
"audit_configs": [
{
"audit_log_configs": [
{
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"ignore_child_exemptions": True,
"log_type": "log_type_value",
}
],
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"service": "service_value",
}
],
"bindings": {},
"etag": "etag_value",
"iam_owned": True,
"rules": [
{
"action": "action_value",
"conditions": [
{
"iam": "iam_value",
"op": "op_value",
"svc": "svc_value",
"sys": "sys_value",
"values": ["values_value_1", "values_value_2"],
}
],
"description": "description_value",
"ins": ["ins_value_1", "ins_value_2"],
"log_configs": [
{
"cloud_audit": {
"authorization_logging_options": {
"permission_type": "permission_type_value"
},
"log_name": "log_name_value",
},
"counter": {
"custom_fields": [
{"name": "name_value", "value": "value_value"}
],
"field": "field_value",
"metric": "metric_value",
},
"data_access": {"log_mode": "log_mode_value"},
}
],
"not_ins": ["not_ins_value_1", "not_ins_value_2"],
"permissions": ["permissions_value_1", "permissions_value_2"],
}
],
"version": 774,
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_iam_policy(request)
def test_set_iam_policy_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"resource": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
resource="resource_value",
region_set_policy_request_resource=compute.RegionSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.set_iam_policy(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setIamPolicy"
% client.transport._host,
args[1],
)
def test_set_iam_policy_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
compute.SetIamPolicyRegionDiskRequest(),
project="project_value",
region="region_value",
resource="resource_value",
region_set_policy_request_resource=compute.RegionSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
def test_set_iam_policy_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.SetLabelsRegionDiskRequest, dict,])
def test_set_labels_unary_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["region_set_labels_request_resource"] = {
"label_fingerprint": "label_fingerprint_value",
"labels": {},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_labels_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_set_labels_unary_rest_required_fields(
request_type=compute.SetLabelsRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_labels._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_labels._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_labels_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_set_labels_unary_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.set_labels._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(("project", "region", "regionSetLabelsRequestResource", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_set_labels_unary_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_set_labels"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_set_labels"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.SetLabelsRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.set_labels_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_set_labels_unary_rest_bad_request(
transport: str = "rest", request_type=compute.SetLabelsRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["region_set_labels_request_resource"] = {
"label_fingerprint": "label_fingerprint_value",
"labels": {},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_labels_unary(request)
def test_set_labels_unary_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"resource": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
resource="resource_value",
region_set_labels_request_resource=compute.RegionSetLabelsRequest(
label_fingerprint="label_fingerprint_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.set_labels_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setLabels"
% client.transport._host,
args[1],
)
def test_set_labels_unary_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_labels_unary(
compute.SetLabelsRegionDiskRequest(),
project="project_value",
region="region_value",
resource="resource_value",
region_set_labels_request_resource=compute.RegionSetLabelsRequest(
label_fingerprint="label_fingerprint_value"
),
)
def test_set_labels_unary_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.TestIamPermissionsRegionDiskRequest, dict,]
)
def test_test_iam_permissions_rest(request_type):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_required_fields(
request_type=compute.TestIamPermissionsRegionDiskRequest,
):
transport_class = transports.RegionDisksRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_test_iam_permissions_rest_unset_required_fields():
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.test_iam_permissions._get_unset_required_fields({})
assert set(unset_fields) == (
set(())
& set(("project", "region", "resource", "testPermissionsRequestResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_test_iam_permissions_rest_interceptors(null_interceptor):
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionDisksRestInterceptor(),
)
client = RegionDisksClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionDisksRestInterceptor, "post_test_iam_permissions"
) as post, mock.patch.object(
transports.RegionDisksRestInterceptor, "pre_test_iam_permissions"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.TestPermissionsResponse.to_json(
compute.TestPermissionsResponse()
)
request = compute.TestIamPermissionsRegionDiskRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.TestPermissionsResponse
client.test_iam_permissions(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_test_iam_permissions_rest_bad_request(
transport: str = "rest", request_type=compute.TestIamPermissionsRegionDiskRequest
):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.test_iam_permissions(request)
def test_test_iam_permissions_rest_flattened():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"resource": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.test_iam_permissions(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/testIamPermissions"
% client.transport._host,
args[1],
)
def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsRegionDiskRequest(),
project="project_value",
region="region_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_test_iam_permissions_rest_error():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionDisksClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionDisksClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionDisksClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionDisksClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionDisksRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionDisksClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.RegionDisksRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_disks_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionDisksTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_disks_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionDisksTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"add_resource_policies",
"create_snapshot",
"delete",
"get",
"get_iam_policy",
"insert",
"list",
"remove_resource_policies",
"resize",
"set_iam_policy",
"set_labels",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_disks_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionDisksTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_disks_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionDisksTransport()
adc.assert_called_once()
def test_region_disks_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionDisksClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_disks_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionDisksRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_disks_host_no_port(transport_name):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_disks_host_with_port(transport_name):
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionDisksClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionDisksClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionDisksClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionDisksClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionDisksClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionDisksClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionDisksClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionDisksClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionDisksClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionDisksClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionDisksClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionDisksClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionDisksClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionDisksClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionDisksClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionDisksTransport, "_prep_wrapped_messages"
) as prep:
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionDisksTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionDisksClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionDisksClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(RegionDisksClient, transports.RegionDisksRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-compute
|
tests/unit/gapic/compute_v1/test_region_disks.py
|
Python
|
apache-2.0
| 183,067
|
[
"Octopus"
] |
40a79702f2b10b056300b5f13b0e3f40ecf046b23e2a354e4879d643afe286c4
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageBlend(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageBlend(), 'Processing.',
('vtkImageData', 'vtkImageStencilData'), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkImageBlend.py
|
Python
|
bsd-3-clause
| 505
|
[
"VTK"
] |
44533d6fa5f9b151d00323fe593e6841bc96377531ae7a72273445b3cb357b48
|
import numpy as np
from numpy.linalg import inv
import vtk
#import vtk.util.numpy_support as converter
from vtk.io import vtkPLYReader
import h5py
from pipeline_config import COLOR_OCTOMAP_H5_FILE,\
GPS_FILE, EXPORT_START, EXPORT_STEP, EXPORT_NUM,\
OCTOMAP_H5_FILE, STATIC_VTK_FILE, DYNAMIC_VTK_FILE, MAP_FILE
from VtkRenderer import VtkPointCloud, VtkBoundingBox
from transformations import euler_from_matrix
from RadarTransforms import loadRDRCamMap, loadRDR, calibrateRadarPts
from GPSTransforms import IMUTransforms
from GPSReader import GPSReader
from Q50_config import LoadParameters
def load_ply(ply_file):
reader = vtkPLYReader()
reader.SetFileName(ply_file)
reader.Update()
reader.Update()
ply_mapper = vtk.vtkPolyDataMapper()
ply_mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(ply_mapper)
return actor
def vtk_transform_from_np(np4x4):
vtk_matrix = vtk.vtkMatrix4x4()
for r in range(4):
for c in range(4):
vtk_matrix.SetElement(r, c, np4x4[r, c])
transform = vtk.vtkTransform()
transform.SetMatrix(vtk_matrix)
return transform
def load_octomap(octomap_h5_file, conf=0.9, wireframe=False):
h5f = h5py.File(octomap_h5_file, 'r')
octree_data = h5f['octree'][...]
octree_data = octree_data[octree_data[:, 4] > conf]
pts = vtk.vtkPoints()
#vtk_pt_data = converter.numpy_to_vtk(np.ascontiguousarray(octree_data[:, 0:3]))
#pts.SetData(vtk_pt_data)
use_colors = octree_data.shape[1] > 5
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
#color_data = np.ascontiguousarray(octree_data[:, 5:8])
#colors = converter.numpy_to_vtk(color_data)
#colors.SetName('ColorArray')
#polydata.GetPointData().SetActiveScalars('ColorArray')
for k in range(octree_data.shape[0]):
pts.InsertNextPoint(*octree_data[k, 0:3])
if use_colors:
r = int(octree_data[k, 5])
g = int(octree_data[k, 6])
b = int(octree_data[k, 7])
colors.InsertNextTupleValue((r, g, b))
polydata = vtk.vtkPolyData()
polydata.SetPoints(pts)
if use_colors:
polydata.GetPointData().SetScalars(colors)
cube = vtk.vtkCubeSource()
cube.SetXLength(octree_data[0, 3])
cube.SetYLength(octree_data[0, 3])
cube.SetZLength(octree_data[0, 3])
glyph = vtk.vtkGlyph3D()
if use_colors:
glyph.SetColorModeToColorByScalar()
glyph.SetSourceConnection(cube.GetOutputPort())
glyph.SetInput(polydata)
glyph.ScalingOff()
glyph.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if wireframe:
actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetLineWidth(1)
actor.GetProperty().SetOpacity(0.2)
actor.GetProperty().LightingOff()
return actor
def load_vtk_cloud(vtk_cloud_file):
reader = vtk.vtkDataSetReader()
reader.SetFileName(vtk_cloud_file)
reader.Update()
actor = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(reader.GetOutputPort())
actor.SetMapper(mapper)
return actor
def get_transforms():
gps_reader = GPSReader(GPS_FILE)
gps_data = gps_reader.getNumericData()
imu_transforms = IMUTransforms(gps_data)
return imu_transforms
def export_scene_vrml(win, output_file):
writer = vtk.vtkVRMLExporter()
writer.SetInput(win)
writer.SetFileName(output_file)
writer.Write()
class Blockworld:
def __init__(self):
self.start = EXPORT_START
self.end = EXPORT_START + EXPORT_NUM * EXPORT_STEP
self.step = EXPORT_STEP
self.count = 0
self.ren = vtk.vtkRenderer()
''' Transforms '''
self.imu_transforms = get_transforms()
self.trans_wrt_imu = self.imu_transforms[self.start:self.end:self.step,
0:3, 3]
self.params = LoadParameters('q50_4_3_14_params')
self.radar_params = self.params['radar']
self.lidar_params = self.params['lidar']
''' Radar '''
self.rdr_pts = loadRDRCamMap(MAP_FILE)
self.radar_actors = []
print 'Adding transforms'
gps_cloud = VtkPointCloud(self.trans_wrt_imu[:, 0:3],
0 * self.trans_wrt_imu[:, 0])
self.ren.AddActor(gps_cloud.get_vtk_cloud())
#print 'Adding octomap'
#octomap_actor = load_octomap(OCTOMAP_H5_FILE)
#self.ren.AddActor(octomap_actor)
print 'Adding point cloud'
cloud_actor = load_vtk_cloud(STATIC_VTK_FILE)
self.ren.AddActor(cloud_actor)
#dynamic_actor = load_vtk_cloud(DYNAMIC_VTK_FILE)
#dynamic_actor.GetProperty().SetColor(0, 0, 1)
#dynamic_actor.GetMapper().ScalarVisibilityOff()
#self.ren.AddActor(dynamic_actor)
print 'Adding car'
self.car = load_ply('gtr.ply')
self.ren.AddActor(self.car)
print 'Rendering'
self.ren.ResetCamera()
self.win = vtk.vtkRenderWindow()
self.win.AddRenderer(self.ren)
self.win.SetSize(400, 400)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.win)
mouseInteractor = vtk.vtkInteractorStyleTrackballCamera()
self.iren.SetInteractorStyle(mouseInteractor)
self.iren.Initialize()
# Whether to write video
self.record = False
# Set up time
self.iren.AddObserver('TimerEvent', self.update)
self.timer = self.iren.CreateRepeatingTimer(100)
# Add keypress event
self.iren.AddObserver('KeyPressEvent', self.keyhandler)
self.mode = 'ahead'
self.iren.Start()
def getCameraPosition(self):
t = self.start + self.step * self.count
if self.mode == 'ahead':
position = self.imu_transforms[t, 0:3, 3]
focal_point = self.imu_transforms[t + self.step, 0:3, 3]
elif self.mode == 'behind':
# FIXME Tune this
position = self.imu_transforms[t - 5*self.step, 0:3, 3]
focal_point = self.imu_transforms[t - 4*self.step, 0:3, 3]
elif self.mode == 'above':
position = self.imu_transforms[t - self.step, 0:3, 3] + np.array([0, 0, 200.0])
focal_point = self.imu_transforms[t, 0:3, 3]
elif self.mode == 'passenger':
# TODO Not sure being inside mesh works...
pass
return position, focal_point
def keyhandler(self, obj, event):
key = obj.GetKeySym()
if key == 'a':
self.mode = 'above'
elif key == 'b':
self.mode = 'behind'
elif key == 'd':
self.mode = 'ahead'
elif key == '0':
self.count = 0
elif key == 'r':
if self.record:
self.closeVideo()
self.record = False
else:
self.startVideo()
self.record = True
else:
pass
def updateRadar(self):
# Taken from testDrawRadarOnMap.py
fren = self.iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
t = self.start + self.step * self.count
radar_data = loadRDR(self.rdr_pts[t])[0]
if radar_data.shape[0] > 0:
#Convert from radar to lidar ref-frame
radar_data[:, :3] = calibrateRadarPts(radar_data[:, :3], self.radar_params)
#Convert from lidar to IMU ref-frame
radar_data[:, :3] = np.dot(self.lidar_params['T_from_l_to_i'][:3, :3],
radar_data[:, :3].transpose()).transpose()
h_radar_data = np.hstack((radar_data[:, :3], np.ones((radar_data.shape[0], 1))))
radar_data[:, :3] = np.dot(self.imu_transforms[t],
h_radar_data.transpose()).transpose()[:, :3]
for i in xrange(len(self.radar_actors)):
fren.RemoveActor(self.radar_actors[i])
self.radar_actors = []
self.radar_clouds = []
for i in xrange(radar_data.shape[0]):
self.radar_clouds.append(VtkBoundingBox(radar_data[i, :]))
(ax, ay, az) = euler_from_matrix(self.imu_transforms[t])
box = self.radar_clouds[i].get_vtk_box(rot=az*180/np.pi)
self.radar_actors.append(box)
fren.AddActor(self.radar_actors[i])
def update(self, iren, event):
# Transform the car
t = self.start + self.step * self.count
imu_transform = self.imu_transforms[t, :, :]
transform = vtk_transform_from_np(imu_transform)
transform.RotateZ(90)
transform.Translate(-2, -3, -2)
self.car.SetUserTransform(transform)
# Add the radar
#self.updateRadar()
# Set camera position
fren = iren.GetRenderWindow().GetRenderers().GetFirstRenderer()
cam = fren.GetActiveCamera()
position, focal_point = self.getCameraPosition()
cam.SetPosition(position)
cam.SetFocalPoint(focal_point)
cam.SetViewUp(0, 0, 1)
fren.ResetCameraClippingRange()
cam.SetClippingRange(0.1,1600)
iren.GetRenderWindow().Render()
if self.record:
self.writeVideo()
self.count += 1
def startVideo(self):
self.win2img = vtk.vtkWindowToImageFilter()
self.win2img.SetInput(self.win)
self.videoWriter = vtk.vtkFFMPEGWriter()
self.videoWriter.SetFileName('/home/zxie/Desktop/blockworld.avi')
self.videoWriter.SetInputConnection(self.win2img.GetOutputPort())
self.videoWriter.SetRate(10) # 10 fps
self.videoWriter.SetQuality(2) # Highest
self.videoWriter.SetBitRate(1000) # kilobits/s
self.videoWriter.SetBitRateTolerance(1000)
self.videoWriter.Start()
def writeVideo(self):
self.win2img.Modified()
self.videoWriter.Write()
def closeVideo(self):
self.videoWriter.End()
self.videoWriter.Delete()
self.win2img.Delete()
if __name__ == '__main__':
blockworld = Blockworld()
|
sameeptandon/sail-car-log
|
mapping/viz/blockworld.py
|
Python
|
bsd-2-clause
| 10,247
|
[
"VTK"
] |
082d8c2db3df584602b023a28fca0088593fde271b5283dce41f225cde28afcc
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Demonstrate using the simulator at the region level with a stimulus.
``Run time``: approximately 2 seconds (workstation circa 2010).
``Memory requirement``: < 1GB
.. moduleauthor:: Stuart A. Knock <[email protected]>
"""
from tvb.simulator.lab import *
##----------------------------------------------------------------------------##
##- Perform the simulation -##
##----------------------------------------------------------------------------##
LOG.info("Configuring...")
#Initialize a Model, Coupling, and Connectivity.
oscillator = models.Generic2dOscillator()
white_matter = connectivity.Connectivity.from_file("connectivity_96.zip")
white_matter.speed = numpy.array([4.0])
white_matter_coupling = coupling.Linear(a=0.0126)
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=2 ** -4)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=2 ** -2)
#Bundle them
what_to_watch = (momo, mama)
#Define the stimulus
#Specify a weighting for regions to receive stimuli...
white_matter.configure()
nodes = [0, 7, 13, 33, 42]
weighting = numpy.zeros((white_matter.number_of_regions,))
weighting[nodes] = numpy.array([2.0 ** -2, 2.0 ** -3, 2.0 ** -4, 2.0 ** -5, 2.0 ** -6])
eqn_t = equations.Gaussian()
eqn_t.parameters["midpoint"] = 16.0
stimulus = patterns.StimuliRegion(temporal=eqn_t,
connectivity=white_matter,
weight=weighting)
#Initialise Simulator -- Model, Connectivity, Integrator, Monitors, and stimulus.
sim = simulator.Simulator(model=oscillator,
connectivity=white_matter,
coupling=white_matter_coupling,
integrator=heunint,
monitors=what_to_watch,
stimulus=stimulus)
sim.configure()
#Clear the initial transient, so that the effect of the stimulus is clearer.
#NOTE: this is ignored, stimuli are defined relative to each simulation call.
LOG.info("Initial integration to clear transient...")
for _, _ in sim(simulation_length=128):
pass
LOG.info("Starting simulation...")
#Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=64):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
##----------------------------------------------------------------------------##
##- Plot pretty pictures of what we just did -##
##----------------------------------------------------------------------------##
#Plot defaults in a few combinations
#Plot the stimulus
plot_pattern(sim.stimulus)
#Make the lists numpy.arrays for easier use.
RAW = numpy.array(raw_data)
TAVG = numpy.array(tavg_data)
#Plot raw time series
figure(1)
plot(raw_time, RAW[:, 0, :, 0])
title("Raw -- State variable 0")
figure(2)
plot(raw_time, RAW[:, 1, :, 0])
title("Raw -- State variable 1")
#Plot temporally averaged time series
figure(3)
plot(tavg_time, TAVG[:, 0, :, 0])
title("Temporal average")
#Show them
show()
###EoF###
|
echohenry2006/tvb-library
|
tvb/simulator/demos/region_deterministic_stimulus.py
|
Python
|
gpl-2.0
| 4,737
|
[
"Gaussian"
] |
b2941f07ec9b9e1d3c7d90f65f911edad40517c9bc2df4cbb900bc57b58a37fd
|
#!/usr/bin/env python
import ROOT
import numpy as numpy
import array
import io, os, sys
import optparse
import commands
from UserCode.HGCanalysis.PlotUtils import *
from UserCode.HGCanalysis.HGCTree2Workspace import *
INTEGMIPEM=None
WEIGHTINGSCHEME=None
"""
Starts weighting scheme
"""
def initWeightingScheme(opt) :
#electromagnetic energy scale of each section
emCalibMap={'EE':None,'HEF':None,'HEB':None}
if opt.emCalibUrl :
for iurl in opt.emCalibUrl.split(','):
subDet,url=iurl.split(':')
print 'Replacing default energy scale in %s with calibration from %s'%(subDet,url)
calibF=ROOT.TFile.Open(url)
#emCalibMap[subDet]=(calibF.Get('simple_calib'),calibF.Get('calib_3_simple_res'))
#don't apply residuals! dangerous for low energy!!!
emCalibMap[subDet]=(calibF.Get('simple_calib'),None)
calibF.Close()
#readout material overburden file
matParamBeforeHGCMap={}
matFurl='%s/src/UserCode/HGCanalysis/data/HGCMaterialOverburden.root'%os.environ['CMSSW_BASE']
matF=ROOT.TFile.Open(matFurl)
matParamBeforeHGCGr=matF.Get("lambdaOverburden")
matParamBeforeHGCGr=None
matF.Close()
if not( matParamBeforeHGCGr is None) : print 'Material overburden has been read from %s'%matFurl
global INTEGMIPEM
global WEIGHTINGSCHEME
if opt.weighting=='lambda':
INTEGMIPEM={'EE':0.447,'HEF':0.661,'HEB':1.326}
WEIGHTINGSCHEME={
'EE': [([1, 1 ], matParamBeforeHGCGr, 0.0136, emCalibMap['EE']),
([2, 2], None, 0.0461, emCalibMap['EE']),
([3, 3], None, 0.0448, emCalibMap['EE']),
([4, 4], None, 0.0241, emCalibMap['EE']),
([5, 5], None, 0.0448, emCalibMap['EE']),
([6, 6], None, 0.0241, emCalibMap['EE']),
([7, 7], None, 0.0448, emCalibMap['EE']),
([8, 8], None, 0.0241, emCalibMap['EE']),
([9, 9], None, 0.0448, emCalibMap['EE']),
([10, 10], None, 0.0241, emCalibMap['EE']),
([11, 11], None, 0.0448, emCalibMap['EE']),
([12,12], None, 0.0347, emCalibMap['EE']),
([13,13], None, 0.0511, emCalibMap['EE']),
([14,14], None, 0.0347, emCalibMap['EE']),
([15,15], None, 0.0511, emCalibMap['EE']),
([16,16], None, 0.0347, emCalibMap['EE']),
([17,17], None, 0.0511, emCalibMap['EE']),
([18,18], None, 0.0347, emCalibMap['EE']),
([19,19], None, 0.0511, emCalibMap['EE']),
([20,20], None, 0.0347, emCalibMap['EE']),
([21,21], None, 0.0511, emCalibMap['EE']),
([22,22], None, 0.0488, emCalibMap['EE']),
([23,23], None, 0.0642, emCalibMap['EE']),
([24,24], None, 0.0488, emCalibMap['EE']),
([25,25], None, 0.0642, emCalibMap['EE']),
([26,26], None, 0.0488, emCalibMap['EE']),
([27,27], None, 0.0642, emCalibMap['EE']),
([28,28], None, 0.0488, emCalibMap['EE']),
([29,29], None, 0.0642, emCalibMap['EE']),
([30,30], None, 0.0488, emCalibMap['EE'])],
'HEF':[([31,31], None, 0.3377, emCalibMap['HEF']),
([32,42], None, 0.2727, emCalibMap['HEF'])],
'HEB':[([43,54], None, 0.4760, emCalibMap['HEB'])]
}
elif opt.weighting=='dedx':
INTEGMIPEM={'EE':0.722,'HEF':0.661,'HEB':1.322}
WEIGHTINGSCHEME={
'EE': [([1, 1 ], None, 2.372, emCalibMap['EE']),
([2, 2], None, 9.541, emCalibMap['EE']),
([3, 3], None, 8.816, emCalibMap['EE']),
([4, 4], None, 5.125, emCalibMap['EE']),
([5, 5], None, 8.816, emCalibMap['EE']),
([6, 6], None, 5.125, emCalibMap['EE']),
([7, 7], None, 8.816, emCalibMap['EE']),
([8, 8], None, 5.125, emCalibMap['EE']),
([9, 9], None, 8.816, emCalibMap['EE']),
([10, 10], None, 5.125, emCalibMap['EE']),
([11, 11], None, 8.816, emCalibMap['EE']),
([12,12], None, 7.445, emCalibMap['EE']),
([13,13], None, 10.217, emCalibMap['EE']),
([14,14], None, 7.445, emCalibMap['EE']),
([15,15], None, 10.217, emCalibMap['EE']),
([16,16], None, 7.445, emCalibMap['EE']),
([17,17], None, 10.217, emCalibMap['EE']),
([18,18], None, 7.445, emCalibMap['EE']),
([19,19], None, 10.217, emCalibMap['EE']),
([20,20], None, 7.445, emCalibMap['EE']),
([21,21], None, 10.217, emCalibMap['EE']),
([22,22], None, 10.539, emCalibMap['EE']),
([23,23], None, 13.148, emCalibMap['EE']),
([24,24], None, 10.539, emCalibMap['EE']),
([25,25], None, 13.148, emCalibMap['EE']),
([26,26], None, 10.539, emCalibMap['EE']),
([27,27], None, 13.148, emCalibMap['EE']),
([28,28], None, 10.539, emCalibMap['EE']),
([29,29], None, 13.148, emCalibMap['EE']),
([30,30], None, 10.539, emCalibMap['EE'])],
'HEF':[([31,31], None, 65.001, emCalibMap['HEF']),
([32,42], None, 52.954, emCalibMap['HEF'])],
'HEB':[([43,54], None, 92.196, emCalibMap['HEB'])]
}
else:
INTEGMIPEM={'EE':0.466,'HEF':0.665,'HEB':1.326}
WEIGHTINGSCHEME={
'EE': [([1, 1 ], matParamBeforeHGCGr, 0.0798, emCalibMap['EE']),
([2, 2], None, 0.9214, emCalibMap['EE']),
([3, 3], None, 0.5960, emCalibMap['EE']),
([4, 4], None, 0.5691, emCalibMap['EE']),
([5, 5], None, 0.5960, emCalibMap['EE']),
([6, 6], None, 0.5691, emCalibMap['EE']),
([7, 7], None, 0.5960, emCalibMap['EE']),
([8, 8], None, 0.5691, emCalibMap['EE']),
([9, 9], None, 0.5960, emCalibMap['EE']),
([10, 10], None, 0.5691, emCalibMap['EE']),
([11, 11], None, 0.5960, emCalibMap['EE']),
([12,12], None, 0.8687, emCalibMap['EE']),
([13,13], None, 0.7920, emCalibMap['EE']),
([14,14], None, 0.8687, emCalibMap['EE']),
([15,15], None, 0.7920, emCalibMap['EE']),
([16,16], None, 0.8687, emCalibMap['EE']),
([17,17], None, 0.7920, emCalibMap['EE']),
([18,18], None, 0.8687, emCalibMap['EE']),
([19,19], None, 0.7920, emCalibMap['EE']),
([20,20], None, 0.8687, emCalibMap['EE']),
([21,21], None, 0.7920, emCalibMap['EE']),
([22,22], None, 1.2683, emCalibMap['EE']),
([23,23], None, 1.2019, emCalibMap['EE']),
([24,24], None, 1.2683, emCalibMap['EE']),
([25,25], None, 1.2019, emCalibMap['EE']),
([26,26], None, 1.2683, emCalibMap['EE']),
([27,27], None, 1.2019, emCalibMap['EE']),
([28,28], None, 1.2683, emCalibMap['EE']),
([29,29], None, 1.2019, emCalibMap['EE']),
([30,30], None, 1.2683, emCalibMap['EE'])],
'HEF':[([31,31], None, 3.5803, emCalibMap['HEF']),
([32,42], None, 3.1029, emCalibMap['HEF'])],
'HEB':[([43,54], None, 5.2279, emCalibMap['HEB'])]
}
"""
"""
def getMedianFor(x):
if len(x)==0: return (0,0)
return (numpy.median(x), 1.253*numpy.std(x)/numpy.sqrt(len(x)))
"""
creates an histogram and fits a gaussian
"""
def fitGaussianToPeak(data,hrange=None,nbins=None):
meanX,meanXerr=0,0
try:
data.InheritsFrom('TH1')
maxBin=data.GetMaximumBin()
#fitRangeMin=data.GetXaxis().GetBinCenter(maxBin-7)
#fitRangeMax=data.GetXaxis().GetBinCenter(maxBin+7)
#data.Fit('gaus','LMRQ0+','',fitRangeMin,fitRangeMax)
data.Fit('gaus','LMRQ0+','')
meanX,meanXerr=data.GetFunction('gaus').GetParameter(1),data.GetFunction('gaus').GetParError(1)
except:
if len(data)<5: return 0,0
h=ROOT.TH1F('datah','',nbins,hrange[0],hrange[1])
for d in data: h.Fill(d)
#spec=ROOT.TSpectrum()
#spec.Search(h,1,"nobackground goff")
#maxBin=h.GetXaxis().FindBin(spec.GetPositionX()[0])
maxBin=h.GetMaximumBin()
if maxBin>h.GetXaxis().GetNbins()-3: maxBin=h.GetXaxis().FindBin(h.GetMean())
fitRangeMin=h.GetXaxis().GetBinCenter(maxBin-7)
fitRangeMax=h.GetXaxis().GetBinCenter(maxBin+7)
gaus=ROOT.TF1('gaus','gaus',hrange[0],hrange[1])
gaus.SetParLimits(1,hrange[0],hrange[1])
h.Fit(gaus,'LMRQ+','',fitRangeMin,fitRangeMax)
meanX,meanXerr=h.GetFunction('gaus').GetParameter(1),h.GetFunction('gaus').GetParError(1)
# h.SaveAs('/tmp/psilva/temp.root')
# raw_input('...')
h.Delete()
gaus.Delete()
return meanX,meanXerr
"""
fits pi/e arc
"""
def fitPiOverEArc(piovereArcGr):
if piovereArcGr.GetN()<3 : return None
gr=ROOT.TGraphErrors()
x, y = ROOT.Double(0), ROOT.Double(0)
for i in xrange(0,piovereArcGr.GetN()):
piovereArcGr.GetPoint(i,x,y)
ex=piovereArcGr.GetErrorX(i)
ey=piovereArcGr.GetErrorY(i)
gr.SetPoint(i,y,x)
gr.SetPointError(i,ey,ex)
gr.Fit('pol2','MRQ+')
piovereArcFunc=gr.GetFunction('pol2').Clone()
gr.Delete()
return piovereArcFunc
"""
wraps the computation of pi/e
"""
def computePiOverE(x,xresp,gr,byMode):
#require minimum 3 events for pi/e by mode...
median, medianErr = getMedianFor(x=x)
medianResp, medianRespErr = getMedianFor(x=xresp)
if byMode and len(x)>3:
mode, modeErr = fitGaussianToPeak(data=x, hrange=(0,2*median), nbins=400)
modeResp, modeRespErr = fitGaussianToPeak(data=xresp, hrange=(0,2), nbins=50)
np=gr.GetN()
gr.SetPoint(np,mode,modeResp)
gr.SetPointError(np,modeErr,modeRespErr)
return [mode,modeErr,modeResp,modeRespErr]
elif not byMode and len(x)>0:
np=gr.GetN()
gr.SetPoint(np,median,medianResp)
gr.SetPointError(np,medianErr,medianRespErr)
return [median,medianErr,medianResp,medianRespErr]
#nothing done
return []
"""
draws the correlation for different sub-dets
"""
def computeSubdetectorResponse(enRanges,etaRanges,xaxis,yaxis,banana,ws,outDir,byMode):
#auxiliary, for plotting purposes
canvas = ROOT.TCanvas('c','c',1000,1000)
allLegs = []
allProjs = []
#pi/e model to adjust
#responseFunc=ROOT.TF1('responseFunc','TMath::Min(TMath::Max([0]*(x-[1])/(x+[2])+[3],0.001),10.0)',0,1000)
#responseFunc=ROOT.TF1('responseFunc','TMath::Min(TMath::Max([0]*(x-[1])/(x+[2])*exp(-[3]*x)+[4],0.001),10.0)',0,1000)
responseFunc=ROOT.TF1('responseFunc','[0]*(1-exp(-[1]*x))/(1+exp(-[2]*x))*exp(-[3]*x)+[4]',0,1000)
#determine the names for each axis
xaxisName,hasCorrX='',False
for var,corrFunc in xaxis:
xaxisName+=var
if not (corrFunc is None) : hasCorrX=True
yaxisName,hasCorrY='',False
for var,corrFunc in yaxis:
yaxisName+=var
if not (corrFunc is None) : hasCorrY=True
postfix=''
if hasCorrX : postfix += '_corrx'
if hasCorrY : postfix += '_corry'
if not (banana is None) : postfix += '_banana'
#prepare the to derive the responses from the projections of the scatter plots
all2DScatters={
xaxisName : ROOT.TH2F('hscatrecx',';E(%s);E(rec)/E(gen);Events'%(xaxisName), 50,0,400,100,0,2.5),
yaxisName : ROOT.TH2F('hscatrecy',';E(%s);E(rec)/E(gen);Events'%(yaxisName), 50,0,400,100,0,2.5)
}
for key in all2DScatters:
all2DScatters[key].SetDirectory(0)
all2DScatters[key].Sumw2()
incResolutionScatter={}
piovereArcFuncEvol={0:ROOT.TGraphErrors(),1:ROOT.TGraphErrors(),2:ROOT.TGraphErrors()}
for key in piovereArcFuncEvol:
piovereArcFuncEvol[key].SetMarkerStyle(20)
piovereArcFuncEvol[key].SetName('piovereres_p%d'%key)
piovereArcFuncEvol[key].SetTitle('p(%d)'%key)
responseProfiles={xaxisName:ROOT.TGraphErrors(),
yaxisName:ROOT.TGraphErrors(),
xaxisName+'_gen':ROOT.TGraphErrors(),
yaxisName+'_gen':ROOT.TGraphErrors(),
xaxisName+yaxisName+'_comb':ROOT.TGraphErrors(),
xaxisName+yaxisName+'_combgen':ROOT.TGraphErrors(),
xaxisName+yaxisName+'_combdiag':ROOT.TGraphErrors()}
for key in responseProfiles:
marker=20
if yaxisName in key: marker=24
if yaxisName in key and xaxisName in key : marker=22
responseProfiles[key].SetTitle(key)
responseProfiles[key].SetMarkerStyle(marker)
responseProfiles[key].SetFillStyle(0)
responseProfiles[key].SetName('%s_prof'%key)
#loop over available energies
thrMipFrac=1.1
for ien in xrange(0,len(enRanges)):
genEn_min=enRanges[ien][0]
genEn_max=enRanges[ien][1]
genEn_mean=0.5*(genEn_max+genEn_min)
genEnKey='%3.0f'%genEn_mean
all2DScatters[genEnKey]=ROOT.TH2F('hprof%d'%(ien),';E(%s)/E(gen);E(%s)/E(gen);Events'%(xaxisName,yaxisName),100,0,2.5,100,0,2.5)
all2DScatters[genEnKey].SetDirectory(0)
all2DScatters[genEnKey].Sumw2()
all2DScatters[genEnKey+'_'+yaxisName]=ROOT.TH2F('hprof%d_%s'%(ien,yaxisName),';E(rec)/E(gen);E(%s)/{E(%s)+max[E(%s)-MIP,0]};Events'%(yaxisName,yaxisName,xaxisName),100,0,2.5,100,0,1.0)
all2DScatters[genEnKey+'_'+yaxisName].SetDirectory(0)
all2DScatters[genEnKey+'_'+yaxisName].Sumw2()
incResolutionScatter[genEnKey]=ROOT.TH1F('hincprof%d'%(ien),';Total energy/E(gen);Events',100,0,2.5)
incResolutionScatter[genEnKey].SetDirectory(0)
incResolutionScatter[genEnKey].Sumw2()
incResolutionScatter[genEnKey+'_diag']=incResolutionScatter[genEnKey].Clone('hincprof%d_diag'%(ien))
incResolutionScatter[genEnKey+'_diag'].SetDirectory(0)
redData=ws.data('data').reduce('en>=%f && en<=%f && eta>=1.6 && eta<=2.8'%(genEn_min,genEn_max))
if redData.numEntries()<10 : continue
xvaluesAtY0, yvaluesAtX0 = [], []
yfracvaluesAtY0, yfracvaluesAtX0, yfracvaluesAtDiag = [], [], []
xrespvaluesAtY0, yrespvaluesAtX0 = [], []
xyvalues, xyrespvalues = [], []
xydiagvalues, xydiagrespvalues = [], []
for ientry in xrange(0,redData.numEntries()):
entryVars=redData.get(ientry)
#raw energy
xvalRaw, yvalRaw = 0, 0
for var,_ in xaxis: xvalRaw+=entryVars.find('en_%s'%var).getVal()
for var,_ in yaxis: yvalRaw+=entryVars.find('en_%s'%var).getVal()
xyvalRaw = xvalRaw + yvalRaw
if xyvalRaw<0.01 : continue
#corrected energy for front calorimeter combination
xval, xmipThr, xmip = 0,0,0
for var,corrFunc in xaxis:
imip=0
if corrFunc is None :
xmipThr += thrMipFrac*INTEGMIPEM[var]
xmip += INTEGMIPEM[var]
else :
xmipThr += thrMipFrac*INTEGMIPEM[var]/corrFunc.Eval(thrMipFrac*INTEGMIPEM[var])
xmip += INTEGMIPEM[var]/corrFunc.Eval(INTEGMIPEM[var])
ienVal=entryVars.find('en_%s'%var).getVal()
if not (corrFunc is None) : ienVal /= corrFunc.Eval(xyvalRaw)
xval += ienVal
#corrected energy for back calorimeter
yval, ymipThr, ymip = 0, 0, 0
for var,corrFunc in yaxis:
if corrFunc is None :
ymipThr += thrMipFrac*INTEGMIPEM[var]
ymip += INTEGMIPEM[var]
else :
ymipThr += thrMipFrac*INTEGMIPEM[var]/corrFunc.Eval(thrMipFrac*INTEGMIPEM[var])
ymip += INTEGMIPEM[var]/corrFunc.Eval(INTEGMIPEM[var])
ienVal=entryVars.find('en_%s'%var).getVal()
if not (corrFunc is None): ienVal /= corrFunc.Eval(xyvalRaw)
yval += ienVal
#total sums and energy sharing
xyval = xval+yval
xyval_m_mip = ROOT.TMath.Max(xval-xmip,0.)+yval
yval_over_xyval_m_mip=-1
if yval==0: yval_over_xyval_m_mip=0
if xyval_m_mip>0:yval_over_xyval_m_mip=yval/xyval_m_mip
#Si vs Silicone banana correction
if not (banana is None):
p0=banana[0].Eval(xyval)
p1=banana[1].Eval(xyval)
p2=banana[2].Eval(xyval)
resCorrection = p0
resCorrection += p1*yval_over_xyval_m_mip
resCorrection += p2*yval_over_xyval_m_mip*yval_over_xyval_m_mip
if resCorrection>0: xyval /= resCorrection
#final responses
xresp, yresp, xyresp = xval/genEn_mean, yval/genEn_mean, xyval/genEn_mean
if genEn_mean>100 and xyresp<0.1 : continue
if genEn_mean<100 and xyresp<0.05: continue
#########
# residual correction depending on energy fraction TODO
#########
all2DScatters[genEnKey].Fill(xresp,yresp)
all2DScatters[yaxisName].Fill(yval,xyresp)
all2DScatters[xaxisName].Fill(xval,xyresp)
incResolutionScatter[genEnKey].Fill(xyresp)
xyvalues.append(xyval)
xyrespvalues.append(xyresp)
all2DScatters[genEnKey+'_'+yaxisName].Fill(xyresp,yval_over_xyval_m_mip)
if yval_over_xyval_m_mip>0.4 and yval_over_xyval_m_mip<0.6:
incResolutionScatter[genEnKey+'_diag'].Fill(xyresp)
xydiagvalues.append(xyval)
xydiagrespvalues.append(xyresp)
yfracvaluesAtDiag.append(yval/xyval_m_mip)
#require MIP-like deposits in the face calorimeter
if (xmipThr==0 or xval<xmipThr) and yval>0:
yvaluesAtX0.append(yval)
yrespvaluesAtX0.append(xyresp)
yfracvaluesAtX0.append(yval_over_xyval_m_mip)
if yresp<0.05 and xval>0:
#if (ymipThr==0 or yval<0.5*ymipThr) and xval>0:
xvaluesAtY0.append(xval)
xrespvaluesAtY0.append(xyresp)
yfracvaluesAtY0.append(yval_over_xyval_m_mip)
#pi/e
piovereArcGr=ROOT.TGraphErrors()
piovereArcGr.SetMarkerStyle(34)
piovereArcGr.SetMarkerColor(ROOT.kRed)
piovereArcGr.SetLineColor(ROOT.kRed)
piovereArcGr.SetMarkerSize(1.5)
xPiOverE = computePiOverE(x=xvaluesAtY0, xresp=xrespvaluesAtY0, gr=responseProfiles[xaxisName],byMode=byMode)
if len(xPiOverE)==4:
np=responseProfiles[xaxisName+'_gen'].GetN()
responseProfiles[xaxisName+'_gen'].SetPoint(np,genEn_mean,xPiOverE[2])
responseProfiles[xaxisName+'_gen'].SetPointError(np,0,xPiOverE[3])
fracCoord, fracCoordErr = getMedianFor(x=yfracvaluesAtY0)
if byMode:
fracCoord,fracCoordErr= fitGaussianToPeak(data=yfracvaluesAtY0,hrange=(0,1),nbins=20)
piovereArcGr.SetPoint(0,xPiOverE[2],fracCoord)
piovereArcGr.SetPointError(0,xPiOverE[3],0)
yPiOverE = computePiOverE(x=yvaluesAtX0, xresp=yrespvaluesAtX0, gr=responseProfiles[yaxisName],byMode=byMode)
if len(yPiOverE)==4:
np=responseProfiles[yaxisName+'_gen'].GetN()
responseProfiles[yaxisName+'_gen'].SetPoint(np,genEn_mean,yPiOverE[2])
responseProfiles[yaxisName+'_gen'].SetPointError(np,0,yPiOverE[3])
fracCoord, fracCoordErr = getMedianFor(x=yfracvaluesAtX0)
if byMode:
fracCoord,fracCoordErr= fitGaussianToPeak(data=yfracvaluesAtX0,hrange=(0,1),nbins=20)
piovereArcGr.SetPoint(1,yPiOverE[2],fracCoord)
piovereArcGr.SetPointError(1,yPiOverE[3],0)
combPiOverE = computePiOverE(x=xyvalues, xresp=xyrespvalues, gr=responseProfiles[xaxisName+yaxisName+'_comb'],byMode=byMode)
if len(combPiOverE)==4:
np=responseProfiles[xaxisName+yaxisName+'_combgen'].GetN()
responseProfiles[xaxisName+yaxisName+'_combgen'].SetPoint(np,genEn_mean,combPiOverE[2])
responseProfiles[xaxisName+yaxisName+'_combgen'].SetPointError(np,0,combPiOverE[3])
combdiagPiOverE = computePiOverE(x=xydiagvalues, xresp=xydiagrespvalues, gr=responseProfiles[xaxisName+yaxisName+'_combdiag'],byMode=byMode)
if len(combdiagPiOverE)==4:
fracCoord, fracCoordErr = getMedianFor(x=yfracvaluesAtDiag)
if byMode:
fracCoord,fracCoordErr= fitGaussianToPeak(data=yfracvaluesAtDiag,hrange=(0,1),nbins=20)
piovereArcGr.SetPoint(2,combdiagPiOverE[2],fracCoord)
piovereArcGr.SetPointError(2,combdiagPiOverE[3],0)
piovereArcGr.Sort()
piovereArcFunc=fitPiOverEArc(piovereArcGr)
if not (piovereArcFunc is None):
for ip in xrange(0,3):
np=piovereArcFuncEvol[ip].GetN()
piovereArcFuncEvol[ip].SetPoint(np,combPiOverE[0],piovereArcFunc.GetParameter(ip))
piovereArcFuncEvol[ip].SetPointError(np,combPiOverE[1],piovereArcFunc.GetParError(ip))
#
# SHOW ENERGY SLICE
#
canvas.Clear()
canvas.Divide(2,2)
#scatter 1
p=canvas.cd(1)
p.SetLogz()
p.SetRightMargin(0.1)
all2DScatters[genEnKey].Draw('colz')
all2DScatters[genEnKey].GetZaxis().SetTitleOffset(-0.5)
line=ROOT.TLine(0,1,1,0)
line.SetLineStyle(7)
line.Draw('same')
#projections
p=canvas.cd(3)
nLegs=len(allLegs)
allLegs.append( ROOT.TLegend(0.2,0.7,0.9,0.94) )
allLegs[nLegs].SetFillStyle(0)
allLegs[nLegs].SetBorderSize(0)
allLegs[nLegs].SetTextFont(42)
allLegs[nLegs].SetTextSize(0.035)
drawOpt='hist'
for profKey in [genEnKey,genEnKey+'_diag']:
totalFound=incResolutionScatter[profKey].Integral()
if totalFound<=0: continue
profTitle, color, fill = 'inc', ROOT.kGray, 1001
if 'diag' in profKey : profTitle, color, fill = 'inc, [0.4-0.6]E_{rec}', 38, 3344
incResolutionScatter[profKey].Rebin()
incResolutionScatter[profKey].SetLineColor(color)
incResolutionScatter[profKey].SetFillStyle(fill)
incResolutionScatter[profKey].SetFillColor(color)
incResolutionScatter[profKey].SetMarkerColor(color)
incResolutionScatter[profKey].SetLineWidth(1)
incResolutionScatter[profKey].Scale(1./totalFound)
fixExtremities(incResolutionScatter[profKey])
incResolutionScatter[profKey].GetXaxis().SetTitle('E(rec)/E(gen)')
incResolutionScatter[profKey].GetYaxis().SetRangeUser(0,0.3)
title='<#pi/e>(%s) = '%profTitle
if 'diag' in profKey :
if len(combdiagPiOverE)==4 : title += '%3.2f'%combdiagPiOverE[2]
else : title += 'n/a'
else:
if len(combPiOverE)==4 : title += '%3.2f'%combPiOverE[2]
else : title += 'n/a'
incResolutionScatter[profKey].SetTitle(title)
incResolutionScatter[profKey].Draw(drawOpt)
allLegs[nLegs].AddEntry( incResolutionScatter[profKey],title,'fp')
drawOpt='histsame'
for iaxis in xrange(0,2):
nProjs=len(allProjs)
title=''
if iaxis==1:
if len(xaxisName)==0 : continue
allProjs.append( all2DScatters[genEnKey].ProjectionX('projx_%d'%nProjs,1,1) )
allProjs[nProjs].Reset('ICE')
allProjs[nProjs].SetTitle( xaxisName )
for ix in xrange(0,len(xrespvaluesAtY0)) :
allProjs[nProjs].Fill(xrespvaluesAtY0[ix])
title='<#pi/e>(%s) = '%xaxisName
if len(xPiOverE)==4 : title += '%3.2f'%xPiOverE[2]
else : title += 'n/a'
else:
if len(yaxisName)==0 : continue
allProjs.append( all2DScatters[genEnKey].ProjectionY('projy_%d'%nProjs,1,1) )
allProjs[nProjs].Reset('ICE')
allProjs[nProjs].SetTitle( xaxisName )
for iy in xrange(0,len(yvaluesAtX0)) : allProjs[nProjs].Fill(yrespvaluesAtX0[iy])
allProjs[nProjs].SetTitle( yaxisName )
title='<#pi/e>(%s) = '%yaxisName
if len(yPiOverE)==4 : title += '%3.2f'%yPiOverE[2]
else : title += 'n/a'
totalEvts=allProjs[nProjs].Integral()
if totalEvts<1: continue
allProjs[nProjs].Scale(1./totalEvts)
fixExtremities(allProjs[nProjs])
allProjs[nProjs].Rebin()
allProjs[nProjs].SetLineColor(1+iaxis)
allProjs[nProjs].SetMarkerColor(1+iaxis)
allProjs[nProjs].SetMarkerStyle(20+4*iaxis)
allProjs[nProjs].SetTitle(title)
allProjs[nProjs].SetDirectory(0)
allProjs[nProjs].Draw(drawOpt)
allLegs[nLegs].AddEntry( allProjs[nProjs],title,'p')
drawOpt='histsame'
#all done
allLegs[nLegs].Draw()
line1d=ROOT.TLine(1,0,1,0.3)
line1d.SetLineStyle(7)
line1d.Draw('same')
#scatter 2
p=canvas.cd(2)
p.SetLogz()
p.SetRightMargin(0.1)
all2DScatters[genEnKey+'_'+yaxisName].Draw('colz')
all2DScatters[genEnKey+'_'+yaxisName].GetZaxis().SetTitleOffset(-0.5)
line2d=ROOT.TLine(1,0,1,1.0)
line2d.SetLineStyle(7)
line2d.SetLineWidth(3)
line2d.Draw('same')
if not (piovereArcGr is None) and not (piovereArcFunc is None):
gr=ROOT.TGraph()
gr.SetLineColor(1)
gr.SetLineStyle(7)
gr.SetLineWidth(3)
for iy in xrange(0,100):
yfrac=float(iy)/100.
gr.SetPoint(iy,piovereArcFunc.Eval(yfrac),yfrac)
gr.Draw('l')
piovereArcGr.Draw('p')
#finalize
p=canvas.cd(1)
MyPaveText('#bf{CMS} #it{simulation} Energy=%s GeV'%genEnKey)
canvas.Modified()
canvas.Update()
canvas.SaveAs('%s/profile_%s%s.png'%(outDir,genEnKey,postfix))
#show residual parameterisations
canvas.Clear()
canvas.Divide(2,2)
for key in piovereArcFuncEvol:
p=canvas.cd(key+1)
p.SetLogx()
piovereArcFuncEvol[key].Draw('ap')
piovereArcFuncEvol[key].GetXaxis().SetTitle('<E(rec)>')
piovereArcFuncEvol[key].GetYaxis().SetTitle(piovereArcFuncEvol[key].GetTitle())
if key==0: MyPaveText('#bf{CMS} #it{simulation}')
#do the fit only after both corrections are applied
if not ('corrx' in postfix and 'corry' in postfix) : continue
resEvolFunc=ROOT.TF1('resEvolFunc','[0]+[1]*(1-exp(-[2]*x))',0,10000)
resEvolFunc.SetParameter(0, piovereArcFuncEvol[key].Eval(500))
resEvolFunc.SetParLimits(0,-5,5)
resEvolFunc.SetParameter(1, piovereArcFuncEvol[key].Eval(0)-piovereArcFuncEvol[key].Eval(500))
resEvolFunc.SetParLimits(1,-5,5)
resEvolFunc.SetParameter(2,0.6)
resEvolFunc.SetParLimits(2,0.01,2)
if key==0 and xaxisName=='HEF': resEvolFunc.SetParLimits(2,0.4,1)
#resEvolFunc=ROOT.TF1('resEvolFunc','[0]+[1]*(1-[2]*x)/(1+[3]*x)',0,10000)
#resEvolFunc.SetParLimits(0,-2,2)
#resEvolFunc.SetParLimits(1,-2,2)
#resEvolFunc.SetParLimits(2,0.1,100)
#resEvolFunc.SetParLimits(3,0.1,100)
piovereArcFuncEvol[key].Fit( resEvolFunc, 'MR+','',0,100 )
piovereArcFuncEvol[key].GetFunction(resEvolFunc.GetName()).SetRange(0,10000)
limit=piovereArcFuncEvol[key].GetFunction(resEvolFunc.GetName()).Eval(10000)
piovereArcFuncEvol[key].GetYaxis().SetRangeUser(limit-2,limit+2)
canvas.Modified()
canvas.Update()
canvas.SaveAs('%s/piovereparams%s_residuals.png'%(outDir,postfix))
#final pi/e for x/y-axis separate
canvas.Clear()
canvas.SetWindowSize(500,500)
canvas.SetCanvasSize(500,500)
drawOpt='ap'
nLegs=len(allLegs)
allLegs.append( ROOT.TLegend(0.2,0.85,0.9,0.94) )
allLegs[nLegs].SetFillStyle(0)
allLegs[nLegs].SetBorderSize(0)
allLegs[nLegs].SetTextFont(42)
allLegs[nLegs].SetTextSize(0.035)
canvas.SetLogx()
responseCtr=0
frame=ROOT.TGraph()
frame.SetPoint(0,0,0)
frame.SetPoint(1,1000,2)
frame.SetMarkerStyle(1)
frame.Draw('ap')
frame.GetXaxis().SetRangeUser(0.5,800)
frame.GetYaxis().SetRangeUser(0.1,1.5)
frame.GetXaxis().SetTitle('<E(reco)> [GeV]')
frame.GetYaxis().SetTitle('<#pi/e>')
ibaseCtr=0
for var,corrFunc in xaxis:
if corrFunc is None: continue
corrFunc.SetTitle(var+' base')
corrFunc.SetLineColor(ROOT.kCyan-2)
corrFunc.SetLineStyle(7+ibaseCtr)
ibaseCtr+=1
corrFunc.Draw('same')
allLegs[nLegs].AddEntry(corrFunc,corrFunc.GetTitle(),'l')
ibaseCtr=0
for var,corrFunc in yaxis:
if corrFunc is None: continue
corrFunc.SetTitle(var+' base')
corrFunc.SetLineColor(ROOT.kYellow+2)
corrFunc.SetLineStyle(7+ibaseCtr)
ibaseCtr+=1
corrFunc.Draw('same')
allLegs[nLegs].AddEntry(corrFunc,corrFunc.GetTitle(),'l')
for key in [yaxisName,xaxisName]:
if responseProfiles[key].GetN()==0: continue
if key==xaxisName:
responseProfiles[key].SetMarkerStyle(20)
responseProfiles[key].SetMarkerColor(1)
responseProfiles[key].SetLineColor(1)
if key==yaxisName:
responseProfiles[key].SetMarkerStyle(24)
responseProfiles[key].SetMarkerColor(ROOT.kRed)
responseProfiles[key].SetLineColor(ROOT.kRed)
responseProfiles[key].Draw('p')
responseProfiles[key].GetYaxis().SetRangeUser(0.1,1.5)
responseProfiles[key].GetYaxis().SetTitle('<#pi/e>')
responseProfiles[key].GetXaxis().SetTitle('<E(rec)> [GeV]')
allLegs[nLegs].AddEntry(responseProfiles[key],responseProfiles[key].GetTitle(),'p')
responseFunc.SetParameter(0,1.0)
responseFunc.SetParLimits(0,0,2)
responseFunc.SetParameter(1,0.6)
responseFunc.SetParLimits(1,0,10)
responseFunc.SetParameter(2,0.12)
responseFunc.SetParLimits(2,0,10)
responseFunc.SetParameter(3,0.0002)
responseFunc.SetParLimits(3,0,0.01)
responseFunc.SetParameter(4,0)
responseFunc.SetParLimits(4,0,2)
if key=='HEB':
#responseFunc.SetParameter(0,0.30)
#responseFunc.SetParameter(1,0.28)
#responseFunc.SetParameter(2,0.009)
#responseFunc.SetParameter(3,0.003)
responseFunc.FixParameter(3,0)
#responseFunc.SetParameter(4,0.7)
responseProfiles[key].Fit(responseFunc,'MRQ+','',0,100)
responseProfiles[key].GetFunction(responseFunc.GetName()).SetRange(0,1000)
else:
responseFunc.FixParameter(3,0)
#responseProfiles[key].Fit(responseFunc,'MRQ+','',0,100)
responseProfiles[key].Fit(responseFunc,'MR+')
responseProfiles[key].GetFunction(responseFunc.GetName()).SetRange(0,1000)
#else:
#responseProfiles[key].Fit(responseFunc,'MR+')
responseProfiles[key].GetFunction(responseFunc.GetName()).SetLineStyle(2-responseCtr)
responseProfiles[key].GetFunction(responseFunc.GetName()).SetLineColor(2-responseCtr)
responseCtr+=1
allLegs[nLegs].SetNColumns(2)
allLegs[nLegs].Draw()
MyPaveText('#bf{CMS} #it{simulation}')
canvas.Modified()
canvas.Update()
canvas.SaveAs('%s/piovereprofiles%s.png'%(outDir,postfix))
#save response parameterisations
fOut=ROOT.TFile.Open('%s/%s%s_response%s.root'%(outDir,xaxisName,yaxisName,postfix),'RECREATE')
toReturn=[None,None,None,None,None]
try:
toReturn[0]=responseProfiles[xaxisName].GetFunction('responseFunc').Clone('%s_responseFunc'%xaxisName)
responseProfiles[xaxisName].Write()
toReturn[0].Clone('%s_responseFunc'%xaxisName).Write()
except:
pass
try:
toReturn[1]=responseProfiles[yaxisName].GetFunction('responseFunc').Clone('%s_responseFunc'%yaxisName)
responseProfiles[yaxisName].Write()
toReturn[1].Clone('%s_responseFunc'%yaxisName).Write()
except:
pass
try:
for key in piovereArcFuncEvol:
piovereArcFuncEvol[key].Write()
toReturn[2+key]=piovereArcFuncEvol[key].GetFunction('resEvolFunc').Clone('resEvolFunc_%s'%key)
toReturn[2+key].Clone('resEvolFunc_%s'%key).Write()
except:
pass
print 'pi/e written for %s, %s in %s'%(xaxisName,yaxisName,fOut.GetName())
fOut.Close()
#all done here
return toReturn
"""
draws the correlation between showerVolume and energy estimator
"""
#def computeCompensationWeights(enRanges,etaRanges,ws,outDir):
#
# print '[computeCompensationWeights] will look at correlations between reconstructed energy and hit fraction (global) or shower density (local)'
#
# #wgtfunc = ROOT.TF1('wgtfunc','[0]+[1]*pow(x,[2])',0,2)
# wgtfunc = ROOT.TF1('wgtfunc','[0]+[1]*x',0,2)
# weightOptimGr = {'rho':[],'c':[]}
# aGr = {'rho':[],'c':[]}
# for key in aGr:
# for ip in xrange(0,wgtfunc.GetNpar()):
# aGr[key].append( ROOT.TGraphErrors() )
# aGr[key][ip].SetMarkerStyle(20)
# aGr[key][ip].SetName('%s_swweights_%d'%(key,ip))
#
#
# canvas=ROOT.TCanvas('c','c',1200,800)
# all2DScatters={'c_Si':[],'c_Sci':[],'rho_Si':[],'rho_Sci':[]}
# all2DProfiles={'c_Si':[],'c_Sci':[],'rho_Si':[],'rho_Sci':[]}
# all1DProfiles={'c_Si':[],'c_Sci':[],'rho_Si':[],'rho_Sci':[]}
# for ien in xrange(0,len(enRanges)):
#
# genEn_min=enRanges[ien][0]
# genEn_max=enRanges[ien][1]
# genEn_mean=0.5*(genEn_max+genEn_min)
# iprof1d=len(all1DProfiles['rho_Si'])
#
# #optimize in energy density ranges, based on the inclusive profile
# all1DProfiles['rho_Si'].append( ROOT.TH1F('rho_Si_hprof1d_%d'%ien,'%d GeV;Shower density [GeV/Volume];PDF'%genEn_mean,25,0,2) )
# all1DProfiles['rho_Si'][iprof1d].SetDirectory(0)
# all1DProfiles['rho_Si'][iprof1d].Sumw2()
# all1DProfiles['rho_Sci'].append( all1DProfiles['rho_Si'][iprof1d].Clone('rho_Sci_hprof1d_%d'%ien) )
# all1DProfiles['rho_Sci'][iprof1d].SetDirectory(0)
#
# all1DProfiles['c_Si'].append( ROOT.TH1F('c_Si_hprof1d_%d'%ien,'%d GeV;C(10 MIP);PDF'%genEn_mean,25,0.5,2.0) )
# all1DProfiles['c_Si'][iprof1d].SetDirectory(0)
# all1DProfiles['c_Si'][iprof1d].Sumw2()
# all1DProfiles['c_Sci'].append( all1DProfiles['c_Si'][iprof1d].Clone( 'c_Sci_hprof1d_%d'%ien ) )
# all1DProfiles['c_Sci'][iprof1d].SetDirectory(0)
#
# #fill histograms and prepare for quantile computation
# rho_values,c_values=[],[]
# redIncData=ws.data('data').reduce('en>=%f && en<=%f'%(genEn_min,genEn_max))
# nEntries=redIncData.numEntries()
# if nEntries<10 : continue
# for ientry in xrange(0,nEntries):
# entryVars=redIncData.get(ientry)
# for subDet in ['EE','HEF','HEB']:
# rhoval=entryVars.find('rho_%s'%subDet).getVal()
# cval=entryVars.find('c_%s'%subDet).getVal()
# if subDet=='HEF':
# rho_values.append(rhoval)
# c_values.append(cval)
# if rhoval>0 : all1DProfiles['rho_%s'%subDet][iprof1d].Fill(rhoval,1./nEntries)
# if cval>0 : all1DProfiles['c_%s'%subDet][iprof1d].Fill(cval,1./nEntries)
#
# #sums for weight optimization
# rhoRanges, cRanges = [], []
# prevRhoQuantile, prevCQuantile = 0, 0
# for q in [5,25,50,75]:
# rhoQuantile, cQuantile = numpy.percentile(rho_values,q), numpy.percentile(c_values,q)
# if prevRhoQuantile!=rhoQuantile : rhoRanges.append([prevRhoQuantile,rhoQuantile])
# if prevCQuantile!=cQuantile : cRanges.append([prevCQuantile,cQuantile])
# prevRhoQuantile, prevCQuantile = rhoQuantile, cQuantile
# rhoRanges.append([prevRhoQuantile,ROOT.TMath.Min(numpy.percentile(rho_values,99),2)])
# cRanges.append([prevCQuantile,ROOT.TMath.Min(numpy.percentile(c_values,99),2)])
# print 'For E=%d GeV compensation weights will be optimised in the following en. density ranges'%genEn_mean
# print 'Density : ',rhoRanges
# print 'C : ',cRanges
#
# #arrays for optimisation: delta^2 = [ w Erec / Egen - 1]^2
# # => [ Erec/Egen ] [ w Erec / Egen - 1 ] = 0
# # ~ a(w.b +c ) = 0 <=> w = - ca / ab
# # with ca = - [Erec/Egen]
# # ab = [Erec/Egen]^2
# ca_rho_Sum, ab_rho_Sum = [0]*len(rhoRanges), [0]*len(rhoRanges)
# ca_c_Sum, ab_c_Sum = [0]*len(cRanges), [0]*len(cRanges)
# rho_values, c_values = [], []
# for irho in xrange(0,len(rhoRanges)) : rho_values.append( [] )
# for ic in xrange(0,len(cRanges)) : c_values.append( [] )
#
# iprof=len(all2DScatters['rho_EE'])
# all2DScatters['rho_EE'].append( ROOT.TH2F('rho_EE_hprof2d_%d'%ien,';E_{rec} [GeV];Shower density [GeV/Volume];Events',30,0.5*genEn_mean,3*genEn_mean,25,0,2) )
# all2DScatters['rho_EE'][iprof].SetDirectory(0)
# all2DScatters['rho_EE'][iprof].Sumw2()
# all2DScatters['rho_HEF'].append( all2DScatters['rho_EE'][iprof].Clone('rho_HEF_hprof2d_%d'%ien) )
# all2DScatters['rho_HEF'][iprof].SetDirectory(0)
# all2DScatters['rho_HEB'].append( all2DScatters['rho_EE'][iprof].Clone('rho_HEB_hprof2d_%d'%ien) )
# all2DScatters['rho_HEB'][iprof].SetDirectory(0)
# all2DScatters['c_EE'].append( ROOT.TH2F('c_EE_hprof2d_%d'%ien,';E_{rec} [GeV];C(10 MIP);Events',30,0.5*genEn_mean,3*genEn_mean,25,0.5,2) )
# all2DScatters['c_EE'][iprof].SetDirectory(0)
# all2DScatters['c_EE'][iprof].Sumw2()
# all2DScatters['c_HEF'].append( all2DScatters['c_EE'][iprof].Clone('c_HEF_hprof2d_%d'%ien) )
# all2DScatters['c_HEF'][iprof].SetDirectory(0)
# all2DScatters['c_HEB'].append( all2DScatters['c_EE'][iprof].Clone('c_HEB_hprof2d_%d'%ien) )
# all2DScatters['c_HEB'][iprof].SetDirectory(0)
#
# for ieta in xrange(0,len(etaRanges)):
# genEta_min=etaRanges[ieta][0]
# genEta_max=etaRanges[ieta][1]
#
# #fill a new profile
# redData=ws.data('data').reduce('en>=%f && en<=%f && eta>=%f && eta<=%f'%(genEn_min,genEn_max,genEta_min,genEta_max))
# if redData.numEntries()<10 : continue
#
# for ientry in xrange(0,redData.numEntries()):
# entryVars=redData.get(ientry)
#
# egen=entryVars.find('en').getVal()
# #FIXME use pi/e corrections
# e_EE = entryVars.find('en_EE').getVal() #*ws.var('k_EE').getVal()
# e_HEF = entryVars.find('en_HEF').getVal() #*ws.var('k_HEF').getVal()
# e_HEB = entryVars.find('en_HEB').getVal() #*ws.var('k_HEB').getVal()
# e_tot = e_EE+e_HEF+e_HEB
#
# for subDet in ['EE','HEF','HEB']:
#
# rhoval=entryVars.find('rho_%s'%subDet).getVal()
# all2DScatters['rho_%s'%subDet][iprof].Fill(e_tot,rhoval)
#
# cval=entryVars.find('c_%s'%subDet).getVal()
# all2DScatters['c_%s'%subDet][iprof].Fill(e_tot,cval)
#
# if subDet!='HEF': continue
#
# for irho in xrange(0,len(rhoRanges)):
# if rhoval<rhoRanges[irho][0] or rhoval>rhoRanges[irho][1] : continue
# rho_values[ irho ].append( rhoval )
# ca_rho_Sum[ irho ] += (e_tot/egen)
# ab_rho_Sum[ irho ] += (e_tot/egen)*(e_tot/egen)
#
# for ic in xrange(0,len(cRanges)):
# if cval<cRanges[ic][0] or cval>cRanges[ic][1] : continue
# c_values[ ic ].append( cval )
# ca_c_Sum[ ic ] += (e_tot/egen)
# ab_c_Sum[ ic ] += (e_tot/egen)*(e_tot/egen)
#
# #now optimize weights for this energy
# for key in weightOptimGr:
# weightOptimGr[key].append( ROOT.TGraph() )
# weightOptimGr[key][ien].SetName('%s_sweights_%d'%(key,ien))
# weightOptimGr[key][ien].SetTitle('%d GeV'%genEn_mean)
# weightOptimGr[key][ien].SetMarkerStyle(20)
# if key=='rho':
# for irho in xrange(0,len(rhoRanges)):
# if ab_rho_Sum[irho]==0: continue
# np=weightOptimGr[key][ien].GetN()
# weightOptimGr[key][ien].SetPoint(np,numpy.mean(rho_values[irho]),ca_rho_Sum[irho]/ab_rho_Sum[irho])
# else:
# for ic in xrange(0,len(cRanges)):
# if ab_c_Sum[ic]==0: continue
# np=weightOptimGr[key][ien].GetN()
# weightOptimGr[key][ien].SetPoint(np,numpy.mean(c_values[ic]),ca_c_Sum[ic]/ab_c_Sum[ic])
#
# #save evolution of the parameters for this energy
# weightOptimGr[key][ien].Fit(wgtfunc,'MQR+')
# for ip in xrange(0,wgtfunc.GetNpar()):
# np=aGr[key][ip].GetN()
# aGr[key][ip].SetPoint(np,genEn_mean,wgtfunc.GetParameter(ip))
# aGr[key][ip].SetPointError(np,0,wgtfunc.GetParError(ip))
#
# #show profile
# canvas.Clear()
# canvas.Divide(3,2)
# ipad=0
# for var in ['rho','c']:
# for subDet in ['EE','HEF','HEB']:
# ipad+=1
# p=canvas.cd(ipad)
# p.SetLogz()
# p.SetRightMargin(0.1)
# key='%s_%s'%(var,subDet)
# all2DProfiles[key].append( all2DScatters[key][iprof].ProfileY('%s_prof'%all2DScatters[key][iprof].GetName()) )
# all2DProfiles[key][iprof].SetMarkerStyle(20)
# all2DScatters[key][iprof].Draw('colz')
# all2DScatters[key][iprof].GetZaxis().SetTitleOffset(-0.5)
# all2DScatters[key][iprof].GetYaxis().SetTitleOffset(1.2)
# #all2DProfiles[key][iprof].Draw('e1same')
# MyPaveText('[ %s ]'%subDet,0.8,0.95,0.95,0.99)
# if ipad>1: continue
# MyPaveText('#bf{CMS} #it{simulation} Energy=%d'%genEn_mean)
# canvas.Modified()
# canvas.Update()
# canvas.SaveAs('%s/showerdens_%d_profile.png'%(outDir,ien))
#
# #compare shower densities for different events
# canvas.Clear()
# canvas.Divide(3,2)
# ipad=0
# for var in ['rho','c']:
# for subDet in ['EE','HEF','HEB']:
# key='%s_%s'%(var,subDet)
# ipad+=1
# p=canvas.cd(ipad)
# p.Clear()
# p.SetLogy(True)
# for iprof1d in xrange(0,len(all1DProfiles[key])):
# all1DProfiles[key][iprof1d].SetLineColor(45-2*iprof1d)
# all1DProfiles[key][iprof1d].SetMarkerColor(45-2*iprof1d)
# all1DProfiles[key][iprof1d].SetMarkerStyle(1)
# all1DProfiles[key][iprof1d].SetLineWidth(2)
# if iprof1d==0 :
# all1DProfiles[key][iprof1d].Draw('hist')
# all1DProfiles[key][iprof1d].GetYaxis().SetRangeUser(1e-3,1.0)
# all1DProfiles[key][iprof1d].GetYaxis().SetTitleOffset(1.2)
# else:
# all1DProfiles[key][iprof1d].Draw('histsame')
#
# MyPaveText('[ %s ]'%subDet,0.8,0.95,0.95,0.99)
#
# if ipad>1: continue
# MyPaveText('#bf{CMS} #it{simulation}')
# leg=p.BuildLegend(0.6,0.6,0.9,0.94)
# leg.SetFillStyle(0)
# leg.SetBorderSize(0)
# leg.SetTextFont(42)
# leg.SetTextSize(0.03)
#
# canvas.Modified()
# canvas.Update()
# canvas.SaveAs('%s/showerdens.png'%outDir)
#
#
# #compare evolution
# npar=wgtfunc.GetNpar()
# canvas.Clear()
# canvas.SetWindowSize(400*npar,400)
# canvas.SetCanvasSize(400*npar,400)
# canvas.Divide(npar,1)
# for ip in xrange(0,wgtfunc.GetNpar()):
# p=canvas.cd(ip+1)
# p.SetLogy(False)
# key='rho'
# aGr[key][ip].Draw('ap')
# aGr[key][ip].GetXaxis().SetTitle('Energy [GeV]')
# aGr[key][ip].GetYaxis().SetTitle('Weight function parameter # %d'%ip)
# aGr[key][ip].GetYaxis().SetTitleOffset(1.4)
# aFunc=ROOT.TF1('%s_evfunc_%d'%(key,ip),'[0]+[1]*(1-exp(-x*[2]))',0,1000)
# aFunc.SetParLimits(0,-100,100)
# aFunc.SetParLimits(1,-50,50)
# aFunc.SetParLimits(2,0.01,100)
# aGr[key][ip].Fit(aFunc,'MR+')
# if ip==0 : MyPaveText('#bf{CMS} #it{simulation}')
# MyPaveText('Parameter evolution\\p_{%d}(E)=q_{1}+q_{2}(1-e^{-q_{3}E})\\q_{1}=%3.3f#pm%3.3f\\q_{2}=%3.3f#pm%3.3f\\q_{3}=%3.3f#pm%3.3f'
# %(ip,aFunc.GetParameter(0),aFunc.GetParError(0),
# aFunc.GetParameter(1),aFunc.GetParError(1),
# aFunc.GetParameter(2),aFunc.GetParError(2)),
# 0.4,0.3,0.9,0.6).SetTextSize(0.035)
# canvas.Modified()
# canvas.Update()
# canvas.SaveAs('%s/swcompweights.png'%(outDir))
# canvas.SaveAs('%s/swcompweights.C'%(outDir))
#
#
# #save weights and weight function to file
# swcompUrl='%s/swcompweights.root'%outDir
# fOut=ROOT.TFile(swcompUrl,'RECREATE')
# fOut.cd()
# for key in aGr :
# for gr in aGr[key] : gr.Write()
# for w in weightOptimGr[key] : w.Write()
# wgtfunc.Write()
# fOut.Close()
#
# #all done
# return swcompUrl
#
"""
Adapts the workspace for pion calibration
"""
def adaptWorkspaceForPionCalibration(opt,outDir):
wsUrl = opt.wsUrl
#prepare workspace (if needed) and output
if wsUrl is None :
#prepare the workspace and get new url
wsUrl=prepareWorkspace(url=opt.input,weightingScheme=WEIGHTINGSCHEME,vetoTrackInt=opt.vetoTrackInt,vetoHEBLeaks=opt.vetoHEBLeaks,treeVarName=opt.treeVarName)
#all done here
print 'Workspace ready for pion calibration, stored @ %s'%wsUrl
#return the workspace
wsOutF=ROOT.TFile.Open(wsUrl)
ws=wsOutF.Get('w')
wsOutF.Close()
return ws
"""
Steer the calibration study
"""
def runCalibrationStudy(opt):
#prepare the output
outDir="./"
if opt.wsUrl is None:
outDir=os.path.basename(opt.input).replace('.root','')
os.system('mkdir -p '+outDir)
else:
outDir=os.path.dirname(opt.wsUrl)
#get the workspace
ws=adaptWorkspaceForPionCalibration(opt,outDir=outDir)
#init phase space regions of interest
etaRanges = [[1.55,1.75],[1.75,2.0],[2.0,2.25],[2.25,2.5],[2.5,2.7],[2.7,2.9]]
enRanges = [[1.8,2.2],[2.8,3.2],[4.5,5.5],[7.5,8.5],[9,11],[19,21],[39,41],[49,51],[74,76],[99,101],[124,126],[174,176],[249,251],[399,401],[499,501]]
#if opt.noEE and opt.noHEF:
# enRanges=enRanges[:len(enRanges)-3]
pioverE_EE, pioverE_HEF, pioverE_HEB, banana = None,None,None, None
#get pi/e for HEB
try:
hebFin=ROOT.TFile.Open(opt.hebRespUrl)
pioverE_HEB = hebFin.Get('HEB_responseFunc')
print 'Readout pi/e response for HEB from %s'%hebFin.GetName()
if opt.noEE and opt.noHEF:
computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,xaxis=[('HEB',pioverE_HEB)],yaxis=[],banana=banana,ws=ws,outDir=outDir,byMode=opt.byMode)
hebFin.Close()
except:
if opt.noEE and opt.noHEF:
print 'Will compute pi/e response for HEB sub-detector'
pioverE_HEB = computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,xaxis=[('HEB',pioverE_HEB)],yaxis=[],banana=banana,ws=ws,outDir=outDir,byMode=opt.byMode)[0]
#full Si combination
if opt.combineSi==True:
try:
eehefFin=ROOT.TFile.Open(opt.eeRespUrl)
pioverE_EE = eehefFin.Get('EEHEF_responseFunc')
pioverE_HEF = pioverE_EE
print 'Readout pi/e response for EE+HEF from %s'%eehefFin.GetName()
eehefFin.Close()
if not (opt.bananaUrl is None):
bananaFin=ROOT.TFile.Open(opt.bananaUrl)
banana=(bananaFin.Get('resEvolFunc_0'),bananaFin.Get('resEvolFunc_1'),bananaFin.Get('resEvolFunc_2'))
print 'Readout banana correction for EE+HEF from %s'%bananaFin.GetName()
bananaFin.Close()
computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,
xaxis=[('EE',pioverE_EE),('HEF',pioverE_HEF)],yaxis=[('HEB',pioverE_HEB)],banana=banana,
ws=ws,outDir=outDir,byMode=opt.byMode)
except:
print 'Will compute pi/e for EE+HEF'
pioverE_EE =computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,
xaxis=[('EE',None),('HEF',None)],yaxis=[('HEB',pioverE_HEB)],banana=banana,
ws=ws,outDir=outDir,byMode=opt.byMode) [0]
pioverE_HEF=pioverE_EE
#get pi/e for HEF
else:
if opt.noHEF==False:
try:
hefhebFin=ROOT.TFile.Open(opt.hefRespUrl)
pioverE_HEF = hefhebFin.Get('HEF_responseFunc')
print 'Readout pi/e responses for HEF from %s'%hefhebFin.GetName()
if opt.noEE :
if not (opt.bananaUrl is None):
bananaFin=ROOT.TFile.Open(opt.bananaUrl)
banana=(bananaFin.Get('resEvolFunc_0'),bananaFin.Get('resEvolFunc_1'),bananaFin.Get('resEvolFunc_2'))
print 'Readout banana correction for HEF+HEB from %s'%bananaFin.GetName()
bananaFin.Close()
computeSubdetectorResponse(enRanges=enRanges, etaRanges=etaRanges,
xaxis=[('HEF',pioverE_HEF)], yaxis=[('HEB',pioverE_HEB)],banana=banana,
ws=ws, outDir=outDir,byMode=opt.byMode)
hefhebFin.Close()
except:
if opt.noEE :
print 'Will compute pi/e response for HEF sub-detector'
pioverE_HEF = computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,
xaxis=[('HEF',None)],yaxis=[('HEB',pioverE_HEB)],banana=banana,
ws=ws,outDir=outDir,byMode=opt.byMode) [0]
#get pi/e for EE
if opt.noEE==False:
try:
ehFin=ROOT.TFile.Open(opt.eeRespUrl)
pioverE_EE = ehFin.Get('EE_responseFunc')
print 'Readout pi/e response for EE from %s'%ehFin.GetName()
ehFin.Close()
if not (opt.bananaUrl is None):
bananaFin=ROOT.TFile.Open(opt.bananaUrl)
banana=(bananaFin.Get('resEvolFunc_0'),bananaFin.Get('resEvolFunc_1'),bananaFin.Get('resEvolFunc_2'))
print 'Readout banana correction for EE from %s'%bananaFin.GetName()
bananaFin.Close()
computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,
xaxis=[('EE',pioverE_EE)],yaxis=[('HEF',pioverE_HEF),('HEB',pioverE_HEB)],banana=banana,
ws=ws,outDir=outDir,byMode=opt.byMode)
except:
print 'Will compute pi/e for EE'
pioverE_EE =computeSubdetectorResponse(enRanges=enRanges,etaRanges=etaRanges,
xaxis=[('EE',None)],yaxis=[('HEF',pioverE_HEF),('HEB',pioverE_HEB)],banana=banana,
ws=ws,outDir=outDir,byMode=opt.byMode) [0]
#read sw compensation weights
# swCompParams=[]
# if opt.compWeights is None:
# opt.compWeights=computeCompensationWeights(enRanges,etaRanges,ws,outDir)
# swF=ROOT.TFile.Open(opt.compWeights)
# print 'Reading out compensation weights from %s'%(opt.compWeights)
# swwgtfunc=swF.Get('wgtfunc')
# for ip in xrange(0,swwgtfunc.GetNpar()):
# swCompParams.append( swF.Get('rho_swweights_%d'%ip).GetFunction('rho_evfunc_%d'%ip) )
# #swCompParams.append( swF.Get('rho_swweights_%d'%ip) )
# swF.Close()
#nothing else to be done
if opt.noResCalib==True :
print 'Bailing out, no residual calibration needs to be run'
return
#read calibrations from file, if available
weightTitles={'simple':'Simple sum '}
if opt.noEE==False and opt.noHEF==False: weightTitles['gc']='Global compensation'
calibPostFix='uncalib'
calibMap={}
calibMapRes={}
try:
calibPostFix=os.path.basename(opt.calibUrl)
calibPostFix='_'+calibPostFix.replace('.root','')
calibF=ROOT.TFile.Open(opt.calibUrl)
print 'Reading out calibrations from %s'%opt.calibUrl
for wType in weightTitles:
calibMap[wType]=calibF.Get('%s_calib'%wType).Clone()
calibMapRes[wType]=calibF.Get('calib_3_%s_res'%wType).Clone() #use the 2.0-2.25 eta range for residuals
calibF.Close()
except:
print 'No calibration will be applied'
#create the dataset for calibration
uncalibDataVars=ROOT.RooArgSet(ws.var('en'), ws.var('eta'), ws.var('phi'))
for wType in weightTitles:
ws.factory('%sEn[0,0,999999999]'%wType)
ws.var('%sEn'%wType).SetTitle( '%s'%weightTitles[wType] )
uncalibDataVars.add( ws.var('%sEn'%wType) )
getattr(ws,'import')( ROOT.RooDataSet('data_uncalib_final','data_uncalib_final',uncalibDataVars) )
for ientry in xrange(0,ws.data('data').numEntries()):
entryVars=ws.data('data').get(ientry)
#check phi
phi=entryVars.find('phi').getVal()
nphi=ROOT.TMath.Floor(ROOT.TMath.Abs(9*phi/ROOT.TMath.Pi()))
phi=ROOT.TMath.Abs(phi)-nphi*ROOT.TMath.Pi()/9.
if ROOT.TMath.Abs(phi-ROOT.TMath.Pi()/18.)<0.03 : continue
newEntry=ROOT.RooArgSet()
for baseVar in ['en','eta','phi']:
ws.var(baseVar).setVal( entryVars.find(baseVar).getVal() )
newEntry.add( ws.var(baseVar) )
enEstimators={}
#raw energies
rawe_EE = entryVars.find('en_EE').getVal()
if opt.noEE : rawe_EE=0
rawe_HEF = entryVars.find('en_HEF').getVal()
if opt.noHEF : rawe_HEF=0
rawe_HEB = entryVars.find('en_HEB').getVal()
rawe_Total = rawe_EE+rawe_HEF+rawe_HEB
#global compensation weights
c_EE = entryVars.find('c_EE').getVal()
c_HEF = entryVars.find('c_HEF').getVal()
c_HEB = entryVars.find('c_HEB').getVal()
#corrected energies
e_EE, e_HEF, e_HEB = 0, 0, 0
if not (pioverE_HEB is None) and not opt.noComp : e_HEB = rawe_HEB/pioverE_HEB.Eval(rawe_Total)
if not (pioverE_HEF is None) and not opt.noComp : e_HEF = rawe_HEF/pioverE_HEF.Eval(rawe_Total)
if not (pioverE_EE is None) and not opt.noComp : e_EE = rawe_EE/pioverE_EE.Eval(rawe_Total)
e_tot = e_EE + e_HEF + e_HEB
e_comp_tot = e_EE + c_HEF*e_HEF + e_HEB
#residual energy sharing correction
if not (banana is None):
if opt.noHEF is False:
yval_over_xyval_m_mip = -1
e_front, e_back = 0, 0
e_comp_front, e_comp_back = 0, 0
if opt.noEE:
e_front, e_comp_front = e_HEF, c_HEF*e_HEF
e_back, e_comp_back = e_HEB, e_HEB
mipEm = 0
if not(pioverE_HEF is None) : mipEm += INTEGMIPEM['HEF']/pioverE_HEF.Eval(INTEGMIPEM['HEF'])
else : mipEm += INTEGMIPEM['HEF']
elif opt.combineSi:
e_front, e_comp_front = e_EE+e_HEF, e_EE+c_HEF*e_HEF
e_back, e_comp_back = e_HEB, e_HEB
mipEm = 0
if not(pioverE_HEF is None) : mipEm += INTEGMIPEM['HEF']/pioverE_HEF.Eval(INTEGMIPEM['HEF'])
else : mipEm += INTEGMIPEM['HEF']
if not(pioverE_EE is None) : mipEm += INTEGMIPEM['EE']/pioverE_EE.Eval(INTEGMIPEM['EE'])
else : mipEm += INTEGMIPEM['EE']
else:
e_front, e_comp_front = e_EE, e_EE
e_back, e_comp_back = e_HEB+e_HEF, e_HEB+c_HEF*e_HEF
mipEm = 0
if not(pioverE_EE is None) : mipEm += INTEGMIPEM['EE']/pioverE_EE.Eval(INTEGMIPEM['EE'])
else : mipEm += INTEGMIPEM['EE']
#do the banana
xyval_m_mip = ROOT.TMath.Max(e_front-mipEm,0.)+e_back
if e_back==0 : yval_over_xyval_m_mip=0
if xyval_m_mip>0 : yval_over_xyval_m_mip=e_front/xyval_m_mip
p0=banana[0].Eval(e_tot)
p1=banana[1].Eval(e_tot)
p2=banana[2].Eval(e_tot)
resCorrection = p0
resCorrection += p1*yval_over_xyval_m_mip
resCorrection += p2*yval_over_xyval_m_mip*yval_over_xyval_m_mip
if resCorrection>0: e_tot /=resCorrection
#do the banana
xyval_m_mip = ROOT.TMath.Max(e_comp_front-mipEm,0.)+e_comp_back
if e_comp_back==0 : yval_over_xyval_m_mip=0
if xyval_m_mip>0 : yval_over_xyval_m_mip=e_comp_front/xyval_m_mip
p0=banana[0].Eval(e_comp_tot)
p1=banana[1].Eval(e_comp_tot)
p2=banana[2].Eval(e_comp_tot)
resCorrection = p0
resCorrection += p1*yval_over_xyval_m_mip
resCorrection += p2*yval_over_xyval_m_mip*yval_over_xyval_m_mip
if resCorrection>0: e_comp_tot /=resCorrection
if e_tot<=0: continue
enEstimators['simple'] = e_tot
enEstimators['gc'] = e_comp_tot
#software compensation weight
# e_rho_tot = e_tot
# rho_HEF = entryVars.find('rho_HEF').getVal()
# if e_HEF > 0 and rho_HEF>0:
# for ip in xrange(0,swwgtfunc.GetNpar()):
# swwgtfunc.SetParameter(ip,swCompParams[ip].Eval(e_tot))
# swWgt=swwgtfunc.Eval(ROOT.TMath.Min(rho_HEF,2.0))
# e_rho_tot = e_EE + swWgt*e_HEF + e_HEB
# enEstimators['lc']=e_rho_tot
#now apply calibration
for wType in weightTitles :
ienVal=enEstimators[wType]
if wType in calibMap:
ienVal=calibMap[wType].GetX(ienVal)
if wType in calibMapRes:
resCorr=calibMapRes[wType].Eval(ienVal)
ienVal*=(1-resCorr/100.)
#add corrected value
ws.var('%sEn'%wType).setVal(ienVal)
newEntry.add(ws.var('%sEn'%wType))
#all filled, add new row
ws.data('data_uncalib_final').add(newEntry)
#calibrate the energy estimators (split up in different energies and pseudo-rapidity ranges)
nSigmasToFit=3.0
calibGr={}
resGr={}
for wType in weightTitles:
calibGr[wType]=ROOT.TMultiGraph()
calibGr[wType].SetName('calib_%s'%wType)
calibGr[wType].SetTitle( weightTitles[wType] )
resGr[wType]=calibGr[wType].Clone('res_%s'%wType)
for iEtaRange in xrange(0,len(etaRanges)):
genEta_min=etaRanges[iEtaRange][0]
genEta_max=etaRanges[iEtaRange][1]
genEta_mean=0.5*(genEta_max+genEta_min)
#keep track of eta slices separately
etaSliceCalibGr={}
etaSliceResGr={}
iwtype=0
for wType in weightTitles:
iwtype=iwtype+1
etaSliceCalibGr[wType]=ROOT.TGraphErrors()
etaSliceCalibGr[wType].SetName('calib_%s_%s'%(iEtaRange,wType))
etaSliceCalibGr[wType].SetTitle('%3.1f<|#eta|<%3.1f'%(genEta_min,genEta_max))
etaSliceCalibGr[wType].SetLineColor(iwtype)
etaSliceCalibGr[wType].SetMarkerColor(iwtype)
etaSliceCalibGr[wType].SetMarkerStyle(iEtaRange+20)
etaSliceResGr[wType]=etaSliceCalibGr[wType].Clone('res_%s_%s'%(iEtaRange,wType))
for iEnRange in xrange(0,len(enRanges)):
genEn_min = enRanges[iEnRange][0]
genEn_max = enRanges[iEnRange][1]
redData=ws.data('data_uncalib_final').reduce('en>=%f && en<=%f && eta>=%f && eta<=%f'%(genEn_min,genEn_max,genEta_min,genEta_max))
if redData.numEntries()<10 : continue
genEn_mean, genEn_sigma = redData.mean(ws.var('en')), redData.sigma(ws.var('en'))
for wType in weightTitles :
#prepare the fit to this slice
vName = '%sEn'%wType
v_mean, v_sigma, v_skew = redData.mean(ws.function(vName)), redData.sigma(ws.function(vName)), redData.skewness(ws.function(vName))
#v_min, v_max = ROOT.TMath.Max(v_mean*0.5,v_mean-5*v_sigma), v_mean+5*v_sigma
#v_fitMin, v_fitMax = ROOT.TMath.Max(1,v_mean-nSigmasToFit*v_sigma), v_mean+nSigmasToFit*v_sigma
v_min, v_max = ROOT.TMath.Max(1,v_mean-3*v_sigma), v_mean+3*v_sigma
v_fitMin, v_fitMax = ROOT.TMath.Max(1,v_mean-nSigmasToFit*v_sigma), v_mean+nSigmasToFit*v_sigma
#define PDF
fitName = 'range%d%d_%s'%(iEtaRange,iEnRange,vName)
ws.var(vName).setRange(fitName,v_min,v_max)
ws.var(vName).setRange('fit_%s'%fitName,v_fitMin, v_fitMax)
iniAlphaValue=0
if v_skew<0 : iniAlphaValue=2
if v_skew>0 : iniAlphaValue=-2
ws.factory('RooCBShape::resol_%s(%s,mean_%s[%f,%f,%f],sigma_%s[%f,%f,%f],alpha_%s[%f,-10.0,10.0],n_%s[2,1,3])'%
(fitName,vName,
fitName,v_mean,v_min,v_max,
fitName,v_sigma,v_sigma*0.001, v_sigma*1.5,
fitName,iniAlphaValue,
fitName)
)
#fit
theVar=ws.var(vName)
thePDF=ws.pdf('resol_%s'%fitName)
fres = thePDF.fitTo( redData, ROOT.RooFit.Range('fit_%s'%fitName), ROOT.RooFit.Save(True) )
meanFit, meanFit_error = ws.var('mean_%s'%fitName).getVal(), ws.var('mean_%s'%fitName).getError()
sigmaFit, sigmaFit_error = ws.var('sigma_%s'%fitName).getVal(), ws.var('sigma_%s'%fitName).getError()
scanStep=(v_max-v_min)*1e-4
tol=scanStep/4
effSigma = ROOT.getEffSigma(theVar,thePDF,v_min,v_max,scanStep,tol)
sigmaEffVal=0.5*(effSigma.second-effSigma.first)
#save results
np=etaSliceCalibGr[wType].GetN()
etaSliceCalibGr[wType].SetPoint(np,genEn_mean,meanFit)
etaSliceCalibGr[wType].SetPointError(np,0,meanFit_error)
etaSliceResGr[wType].SetPoint(np,genEn_mean,sigmaFit/meanFit)
etaSliceResGr[wType].SetPointError(np,0,ROOT.TMath.Sqrt( ROOT.TMath.Power(meanFit*sigmaFit_error,2)+
ROOT.TMath.Power(meanFit_error*sigmaFit,2) ) /
ROOT.TMath.Power(meanFit,2) )
#for debug purposes
showCalibrationFitResults( theVar=ws.var(vName),
theData=redData,
thePDF=ws.pdf('resol_%s'%fitName),
theLabel='#it{Energy=%3.0f GeV, %3.1f<#eta<%3.1f}\\#mu=%3.2f#pm%3.2f\\#sigma=%3.2f#pm%3.2f\\#sigma_{eff}=%3.2f'%(genEn_mean,genEta_min,genEta_max,meanFit,meanFit_error,sigmaFit,sigmaFit_error,sigmaEffVal),
fitName=fitName,
outDir=outDir)
for wType in weightTitles:
calibGr[wType].Add(etaSliceCalibGr[wType],'p')
resGr[wType].Add(etaSliceResGr[wType],'p')
#derive calibration
calibModel=ROOT.TF1('calibmodel',"[0]*x",0,1000)
calibModel.SetLineWidth(1)
for wType in weightTitles :
calibGr[wType].Fit(calibModel,'MER+','',5,1000)
calibGr[wType].GetFunction(calibModel.GetName()).SetRange(0,1000)
calibGr[wType].GetFunction(calibModel.GetName()).SetLineColor(calibGr[wType].GetListOfGraphs().At(0).GetLineColor())
#show results
resCorrectionGr,resCalibGr=showCalibrationCurves(calibGr=calibGr,calibRanges=etaRanges,outDir=outDir,calibPostFix=calibPostFix)
showResolutionCurves(resGr=resGr,outDir=outDir,calibPostFix=calibPostFix,model=0)
#save all to file
calibModelRes=ROOT.TF1('calibmodelres',"[0]*x*x+[1]*x+[2]",1.45,3.1)
calibF=ROOT.TFile.Open('%s/calib_%s.root'%(outDir,calibPostFix),'RECREATE')
for wType in weightTitles :
calibGr[wType].Write()
calibGr[wType].GetFunction(calibModel.GetName()).Write('%s_calib'%wType)
for gr in resCalibGr[wType].GetListOfGraphs():
gr.Fit(calibModelRes,'WMR+')
gr.Write()
resCorrectionGr[wType].Write()
resCorrectionGr[wType].Fit(calibModelRes,'WMR+')
resCorrectionGr[wType].GetFunction(calibModelRes.GetName()).Write('%s_calib_res'%wType)
for gr in resGr[wType].GetListOfGraphs():
gr.Write()
calibF.Close()
"""
steer
"""
def main():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('-i', '--in' , dest='input', help='Input file', default=None)
parser.add_option('-w', '--ws' , dest='wsUrl', help='Workspace file', default=None)
parser.add_option('--emCalib' , dest='emCalibUrl', help='em calibration files (e.g. EE:calib_ee.root,HEF:calib_hef.root', default=None)
parser.add_option('--noComp' , dest='noComp', help='no attempt to correct for compensation', default=False, action="store_true")
parser.add_option('--calib' , dest='calibUrl', help='pion calibration file', default=None)
parser.add_option('--compWeights' , dest='compWeights', help='file with software compensation weights', default=None)
parser.add_option('--byMode', dest='byMode', help='flag pi/e is to be determined by mode', default=False, action="store_true")
parser.add_option('--weighting', dest='weighting', help='em,lambda,dedx based weights', default='em')
parser.add_option('--vetoTrackInt', dest='vetoTrackInt', help='flag if tracker interactions should be removed', default=False, action="store_true")
parser.add_option('--vetoHEBLeaks', dest='vetoHEBLeaks', help='flag if HEB leaks are allowed', default=False, action='store_true')
parser.add_option('--banana', dest='bananaUrl', help='Apply banana correction from this url', default=None)
parser.add_option('--noResCalib', dest='noResCalib', help='Don\'t run calibration fits in E/eta slices', default=False, action='store_true')
parser.add_option('--noEE', dest='noEE', help='Assign weight 0 to EE', default=False, action='store_true')
parser.add_option('--noHEF', dest='noHEF', help='Assign weight 0 to HEF', default=False, action='store_true')
parser.add_option('--combineSi', dest='combineSi', help='Combine Si detectors', default=False, action='store_true')
parser.add_option('--hebResp', dest='hebRespUrl', help='Location of the parameterization for HEB pi/e', default=None)
parser.add_option('--hefResp', dest='hefRespUrl', help='Location of the parameterization for HEF and HEF pi/e', default=None)
parser.add_option('--eeResp', dest='eeRespUrl', help='Location of the parameterization for EE pi/e', default=None)
parser.add_option('-v', '--var' , dest='treeVarName', help='Variable to use as energy estimator', default='edep_rec')
(opt, args) = parser.parse_args()
#check inputs
if opt.input is None and opt.wsUrl is None:
parser.print_help()
sys.exit(1)
#basic ROOT customization
customROOTstyle()
ROOT.gSystem.Load( "libUserCodeHGCanalysis")
#ROOT.gROOT.SetBatch(False)
ROOT.gROOT.SetBatch(True)
ROOT.gStyle.SetPalette(1)
ROOT.gStyle.SetOptTitle(0)
ROOT.gStyle.SetOptStat(0)
ROOT.RooMsgService.instance().setSilentMode(True);
ROOT.RooMsgService.instance().getStream(0).removeTopic(ROOT.RooFit.Minimization);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.Minimization);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.ObjectHandling);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.DataHandling);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.Fitting);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.Plotting);
ROOT.RooMsgService.instance().getStream(0).removeTopic(ROOT.RooFit.InputArguments);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.InputArguments);
ROOT.RooMsgService.instance().getStream(0).removeTopic(ROOT.RooFit.Eval);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.Eval);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.Integration);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.NumIntegration);
ROOT.RooMsgService.instance().getStream(1).removeTopic(ROOT.RooFit.NumIntegration);
initWeightingScheme(opt=opt)
runCalibrationStudy(opt=opt)
if __name__ == "__main__":
sys.exit(main())
|
lgray/HGCanalysis
|
test/analysis/runPionCalibration.py
|
Python
|
gpl-3.0
| 74,020
|
[
"Gaussian"
] |
47ec75f64731eecb2f2e26ca74771e201a8aa96c5d7b762023c96e13bb71d202
|
from __future__ import division, print_function, absolute_import
import networkx as nx
import numpy as np
from matplotlib import pyplot as plt
from scipy.spatial.distance import cdist
from .utilities import DifferenceKernel
from .SafeMDP_class import (reachable_set, returnable_set, SafeMDP,
link_graph_and_safe_set)
__all__ = ['compute_true_safe_set', 'compute_true_S_hat', 'compute_S_hat0',
'grid_world_graph', 'grid', 'GridWorld', 'draw_gp_sample',
'states_to_nodes', 'nodes_to_states', 'shortest_path',
'path_to_boolean_matrix', 'safe_subpath']
def compute_true_safe_set(world_shape, altitude, h):
"""
Computes the safe set given a perfect knowledge of the map
Parameters
----------
world_shape: tuple
altitude: np.array
1-d vector with altitudes for each node
h: float
Safety threshold for height differences
Returns
-------
true_safe: np.array
Boolean array n_states x (n_actions + 1).
"""
true_safe = np.zeros((world_shape[0] * world_shape[1], 5), dtype=np.bool)
altitude_grid = altitude.reshape(world_shape)
# Reshape so that first dimensions are actions, the rest is the grid world.
safe_grid = true_safe.T.reshape((5,) + world_shape)
# Height difference (next height - current height) --> positive if downhill
up_diff = altitude_grid[:, :-1] - altitude_grid[:, 1:]
right_diff = altitude_grid[:-1, :] - altitude_grid[1:, :]
# State are always safe
true_safe[:, 0] = True
# Going in the opposite direction
safe_grid[1, :, :-1] = up_diff >= h
safe_grid[2, :-1, :] = right_diff >= h
safe_grid[3, :, 1:] = -up_diff >= h
safe_grid[4, 1:, :] = -right_diff >= h
return true_safe
def dynamics_vec_ind(states_vec_ind, action, world_shape):
"""
Dynamic evolution of the system defined in vector representation of
the states
Parameters
----------
states_vec_ind: np.array
Contains all the vector indexes of the states we want to compute
the dynamic evolution for
action: int
action performed by the agent
Returns
-------
next_states_vec_ind: np.array
vector index of states resulting from applying the action given
as input to the array of starting points given as input
"""
n, m = world_shape
next_states_vec_ind = np.copy(states_vec_ind)
if action == 1:
next_states_vec_ind[:] = states_vec_ind + 1
condition = np.mod(next_states_vec_ind, m) == 0
next_states_vec_ind[condition] = states_vec_ind[condition]
elif action == 2:
next_states_vec_ind[:] = states_vec_ind + m
condition = next_states_vec_ind >= m * n
next_states_vec_ind[condition] = states_vec_ind[condition]
elif action == 3:
next_states_vec_ind[:] = states_vec_ind - 1
condition = np.mod(states_vec_ind, m) == 0
next_states_vec_ind[condition] = states_vec_ind[condition]
elif action == 4:
next_states_vec_ind[:] = states_vec_ind - m
condition = next_states_vec_ind <= -1
next_states_vec_ind[condition] = states_vec_ind[condition]
else:
raise ValueError("Unknown action")
return next_states_vec_ind
def compute_S_hat0(s, world_shape, n_actions, altitudes, step_size, h):
"""
Compute a valid initial safe seed.
Parameters
---------
s: int or nan
Vector index of the state where we start computing the safe seed
from. If it is equal to nan, a state is chosen at random
world_shape: tuple
Size of the grid world (rows, columns)
n_actions: int
Number of actions available to the agent
altitudes: np.array
It contains the flattened n x m matrix where the altitudes of all
the points in the map are stored
step_size: tuple
step sizes along each direction to create a linearly spaced grid
h: float
Safety threshold
Returns
------
S_hat: np.array
Boolean array n_states x (n_actions + 1).
"""
# Initialize
n, m = world_shape
n_states = n * m
S_hat = np.zeros((n_states, n_actions + 1), dtype=bool)
# In case an initial state is given
if not np.isnan(s):
S_hat[s, 0] = True
valid_initial_seed = False
vertical = False
horizontal = False
altitude_prev = altitudes[s]
if not isinstance(s, np.ndarray):
s = np.array([s])
# Loop through actions
for action in range(1, n_actions + 1):
# Compute next state to check steepness
next_vec_ind = dynamics_vec_ind(s, action, world_shape)
altitude_next = altitudes[next_vec_ind]
if s != next_vec_ind and -np.abs(altitude_prev - altitude_next) / \
step_size[0] >= h:
S_hat[s, action] = True
S_hat[next_vec_ind, 0] = True
S_hat[next_vec_ind, reverse_action(action)] = True
if action == 1 or action == 3:
vertical = True
if action == 2 or action == 4:
horizontal = True
if vertical and horizontal:
valid_initial_seed = True
if valid_initial_seed:
return S_hat
else:
print ("No valid initial seed starting from this state")
S_hat[:] = False
return S_hat
# If an explicit initial state is not given
else:
while np.all(np.logical_not(S_hat)):
initial_state = np.random.choice(n_states)
S_hat = compute_S_hat0(initial_state, world_shape, n_actions,
altitudes, step_size, h)
return S_hat
def reverse_action(action):
# Computes the action that is the opposite of the one given as input
rev_a = np.mod(action + 2, 4)
if rev_a == 0:
rev_a = 4
return rev_a
def grid_world_graph(world_size):
"""Create a graph that represents a grid world.
In the grid world there are four actions, (1, 2, 3, 4), which correspond
to going (up, right, down, left) in the x-y plane. The states are
ordered so that `np.arange(np.prod(world_size)).reshape(world_size)`
corresponds to a matrix where increasing the row index corresponds to the
x direction in the graph, and increasing y index corresponds to the y
direction.
Parameters
----------
world_size: tuple
The size of the grid world (rows, columns)
Returns
-------
graph: nx.DiGraph()
The directed graph representing the grid world.
"""
nodes = np.arange(np.prod(world_size))
grid_nodes = nodes.reshape(world_size)
graph = nx.DiGraph()
# action 1: go right
graph.add_edges_from(zip(grid_nodes[:, :-1].reshape(-1),
grid_nodes[:, 1:].reshape(-1)),
action=1)
# action 2: go down
graph.add_edges_from(zip(grid_nodes[:-1, :].reshape(-1),
grid_nodes[1:, :].reshape(-1)),
action=2)
# action 3: go left
graph.add_edges_from(zip(grid_nodes[:, 1:].reshape(-1),
grid_nodes[:, :-1].reshape(-1)),
action=3)
# action 4: go up
graph.add_edges_from(zip(grid_nodes[1:, :].reshape(-1),
grid_nodes[:-1, :].reshape(-1)),
action=4)
return graph
def compute_true_S_hat(graph, safe_set, initial_nodes, reverse_graph=None):
"""
Compute the true safe set with reachability and returnability.
Parameters
----------
graph: nx.DiGraph
safe_set: np.array
initial_nodes: list of int
reverse_graph: nx.DiGraph
graph.reverse()
Returns
-------
true_safe: np.array
Boolean array n_states x (n_actions + 1).
"""
graph = graph.copy()
link_graph_and_safe_set(graph, safe_set)
if reverse_graph is None:
reverse_graph = graph.reverse()
reach = reachable_set(graph, initial_nodes)
ret = returnable_set(graph, reverse_graph, initial_nodes)
ret &= reach
return ret
class GridWorld(SafeMDP):
"""
Grid world with Safe exploration
Parameters
----------
gp: GPy.core.GP
Gaussian process that expresses our current belief over the safety
feature
world_shape: shape
Tuple that contains the shape of the grid world n x m
step_size: tuple of floats
Tuple that contains the step sizes along each direction to
create a linearly spaced grid
beta: float
Scaling factor to determine the amplitude of the confidence
intervals
altitudes: np.array
It contains the flattened n x m matrix where the altitudes
of all the points in the map are stored
h: float
Safety threshold
S0: np.array
n_states x (n_actions + 1) array of booleans that indicates which
states (first column) and which state-action pairs belong to the
initial safe seed. Notice that, by convention we initialize all
the states to be safe
S_hat0: np.array or nan
n_states x (n_actions + 1) array of booleans that indicates which
states (first column) and which state-action pairs belong to the
initial safe seed and satisfy recovery and reachability properties.
If it is nan, such a boolean matrix is computed during
initialization
noise: float
Standard deviation of the measurement noise
L: float
Lipschitz constant to compute expanders
update_dist: int
Distance in unweighted graph used for confidence interval update.
A sample will only influence other nodes within this distance.
"""
def __init__(self, gp, world_shape, step_size, beta, altitudes, h, S0,
S_hat0, L, update_dist=0):
# Safe set
self.S = S0.copy()
graph = grid_world_graph(world_shape)
link_graph_and_safe_set(graph, self.S)
super(GridWorld, self).__init__(graph, gp, S_hat0, h, L, beta=2)
self.altitudes = altitudes
self.world_shape = world_shape
self.step_size = step_size
self.update_dist = update_dist
# Grids for the map
self.coord = grid(self.world_shape, self.step_size)
# Distances
self.distance_matrix = cdist(self.coord, self.coord)
# Confidence intervals
self.l = np.empty(self.S.shape, dtype=float)
self.u = np.empty(self.S.shape, dtype=float)
self.l[:] = -np.inf
self.u[:] = np.inf
self.l[self.S] = h
# Prediction with difference of altitudes
states_ind = np.arange(np.prod(self.world_shape))
states_grid = states_ind.reshape(world_shape)
self._prev_up = states_grid[:, :-1].flatten()
self._next_up = states_grid[:, 1:].flatten()
self._prev_right = states_grid[:-1, :].flatten()
self._next_right = states_grid[1:, :].flatten()
self._mat_up = np.hstack((self.coord[self._prev_up, :],
self.coord[self._next_up, :]))
self._mat_right = np.hstack((self.coord[self._prev_right, :],
self.coord[self._next_right, :]))
def update_confidence_interval(self, jacobian=False):
"""
Updates the lower and the upper bound of the confidence intervals
using then posterior distribution over the gradients of the altitudes
Returns
-------
l: np.array
lower bound of the safety feature (mean - beta*std)
u: np.array
upper bound of the safety feature (mean - beta*std)
"""
if jacobian:
# Predict safety feature
mu, s = self.gp.predict_jacobian(self.coord, full_cov=False)
mu = np.squeeze(mu)
# Confidence interval
s = self.beta * np.sqrt(s)
# State are always safe
self.l[:, 0] = self.u[:, 0] = self.h
# Update safety feature
self.l[:, [1, 2]] = -mu[:, ::-1] - s[:, ::-1]
self.l[:, [3, 4]] = mu[:, ::-1] - s[:, ::-1]
self.u[:, [1, 2]] = -mu[:, ::-1] + s[:, ::-1]
self.u[:, [3, 4]] = mu[:, ::-1] + s[:, ::-1]
elif self.update_dist > 0:
# States are always safe
self.l[:, 0] = self.u[:, 0] = self.h
# Extract last two sampled states in the grid
last_states = self.gp.X[-2:]
last_nodes = states_to_nodes(last_states, self.world_shape,
self.step_size)
# Extract nodes to be updated
nodes1 = nx.single_source_shortest_path(self.graph, last_nodes[0],
self.update_dist).keys()
nodes2 = nx.single_source_shortest_path(self.graph, last_nodes[1],
self.update_dist).keys()
update_nodes = np.union1d(nodes1, nodes2)
subgraph = self.graph.subgraph(update_nodes)
# Sort states to be updated according to actions
prev_up = []
next_up = []
prev_right = []
next_right = []
for node1, node2, act in subgraph.edges_iter(data='action'):
if act == 2:
prev_right.append(node1)
next_right.append(node2)
elif act == 1:
prev_up.append(node1)
next_up.append(node2)
mat_up = np.hstack((self.coord[prev_up, :],
self.coord[next_up, :]))
mat_right = np.hstack((self.coord[prev_right, :],
self.coord[next_right, :]))
# Update confidence for nodes around last sample
mu_up, s_up = self.gp.predict(mat_up,
kern=DifferenceKernel(self.gp.kern),
full_cov=False)
s_up = self.beta * np.sqrt(s_up)
self.l[prev_up, 1, None] = mu_up - s_up
self.u[prev_up, 1, None] = mu_up + s_up
self.l[next_up, 3, None] = -mu_up - s_up
self.u[next_up, 3, None] = -mu_up + s_up
mu_right, s_right = self.gp.predict(mat_right,
kern=DifferenceKernel(
self.gp.kern),
full_cov=False)
s_right = self.beta * np.sqrt(s_right)
self.l[prev_right, 2, None] = mu_right - s_right
self.u[prev_right, 2, None] = mu_right + s_right
self.l[next_right, 4, None] = -mu_right - s_right
self.u[next_right, 4, None] = -mu_right + s_right
else:
# Initialize to unsafe
self.l[:] = self.u[:] = self.h - 1
# States are always safe
self.l[:, 0] = self.u[:, 0] = self.h
# Actions up and down
mu_up, s_up = self.gp.predict(self._mat_up,
kern=DifferenceKernel(self.gp.kern),
full_cov=False)
s_up = self.beta * np.sqrt(s_up)
self.l[self._prev_up, 1, None] = mu_up - s_up
self.u[self._prev_up, 1, None] = mu_up + s_up
self.l[self._next_up, 3, None] = -mu_up - s_up
self.u[self._next_up, 3, None] = -mu_up + s_up
# Actions left and right
mu_right, s_right = self.gp.predict(self._mat_right,
kern=DifferenceKernel(
self.gp.kern),
full_cov=False)
s_right = self.beta * np.sqrt(s_right)
self.l[self._prev_right, 2, None] = mu_right - s_right
self.u[self._prev_right, 2, None] = mu_right + s_right
self.l[self._next_right, 4, None] = -mu_right - s_right
self.u[self._next_right, 4, None] = -mu_right + s_right
def compute_expanders(self):
"""Compute the expanders based on the current estimate of S_hat."""
self.G[:] = False
for action in range(1, self.S_hat.shape[1]):
# action-specific safe set
s_hat = self.S_hat[:, action]
# Extract distance from safe points to non safe ones
distance = self.distance_matrix[np.ix_(s_hat, ~self.S[:, action])]
# Update expanders for this particular action
self.G[s_hat, action] = np.any(
self.u[s_hat, action, None] - self.L * distance >= self.h,
axis=1)
def update_sets(self):
"""
Update the sets S, S_hat and G taking with the available observation
"""
self.update_confidence_interval()
# self.S[:] = self.l >= self.h
self.S |= self.l >= self.h
self.compute_S_hat()
self.compute_expanders()
def plot_S(self, safe_set, action=0):
"""
Plot the set of safe states
Parameters
----------
safe_set: np.array(dtype=bool)
n_states x (n_actions + 1) array of boolean values that indicates
the safe set
action: int
The action for which we want to plot the safe set.
"""
plt.figure(action)
plt.imshow(np.reshape(safe_set[:, action], self.world_shape).T,
origin='lower', interpolation='nearest', vmin=0, vmax=1)
plt.title('action {0}'.format(action))
plt.show()
def add_observation(self, node, action):
"""
Add an observation of the given state-action pair.
Observing the pair (s, a) means adding an observation of the altitude
at s and an observation of the altitude at f(s, a)
Parameters
----------
node: int
Node index
action: int
Action index
"""
# Observation of next state
for _, next_node, data in self.graph.edges_iter(node, data=True):
if data['action'] == action:
break
self.add_gp_observations(self.coord[[node, next_node], :],
self.altitudes[[node, next_node], None])
def target_sample(self):
"""
Compute the next target (s, a) to sample (highest uncertainty within
G or S_hat)
Returns
-------
node: int
The next node to sample
action: int
The next action to sample
"""
if np.any(self.G):
# Extract elements in G
expander_id = np.nonzero(self.G)
# Compute uncertainty
w = self.u[self.G] - self.l[self.G]
# Find max uncertainty
max_id = np.argmax(w)
else:
print('No expanders, using most uncertain element in S_hat'
'instead.')
# Extract elements in S_hat
expander_id = np.nonzero(self.S_hat)
# Compute uncertainty
w = self.u[self.S_hat] - self.l[self.S_hat]
# Find max uncertainty
max_id = np.argmax(w)
return expander_id[0][max_id], expander_id[1][max_id]
def states_to_nodes(states, world_shape, step_size):
"""Convert physical states to node numbers.
Parameters
----------
states: np.array
States with physical coordinates
world_shape: tuple
The size of the grid_world
step_size: tuple
The step size of the grid world
Returns
-------
nodes: np.array
The node indices corresponding to the states
"""
states = np.asanyarray(states)
node_indices = np.rint(states / step_size).astype(np.int)
return node_indices[:, 1] + world_shape[1] * node_indices[:, 0]
def nodes_to_states(nodes, world_shape, step_size):
"""Convert node numbers to physical states.
Parameters
----------
nodes: np.array
Node indices of the grid world
world_shape: tuple
The size of the grid_world
step_size: np.array
The step size of the grid world
Returns
-------
states: np.array
The states in physical coordinates
"""
nodes = np.asanyarray(nodes)
step_size = np.asanyarray(step_size)
return np.vstack((nodes // world_shape[1],
nodes % world_shape[1])).T * step_size
def grid(world_shape, step_size):
"""
Creates grids of coordinates and indices of state space
Parameters
----------
world_shape: tuple
Size of the grid world (rows, columns)
step_size: tuple
Phyiscal step size in the grid world
Returns
-------
states_ind: np.array
(n*m) x 2 array containing the indices of the states
states_coord: np.array
(n*m) x 2 array containing the coordinates of the states
"""
nodes = np.arange(0, world_shape[0] * world_shape[1])
return nodes_to_states(nodes, world_shape, step_size)
def draw_gp_sample(kernel, world_shape, step_size):
"""
Draws a sample from a Gaussian process distribution over a user
specified grid
Parameters
----------
kernel: GPy kernel
Defines the GP we draw a sample from
world_shape: tuple
Shape of the grid we use for sampling
step_size: tuple
Step size along any axis to find linearly spaced points
"""
# Compute linearly spaced grid
coord = grid(world_shape, step_size)
# Draw a sample from GP
cov = kernel.K(coord) + np.eye(coord.shape[0]) * 1e-10
sample = np.random.multivariate_normal(np.zeros(coord.shape[0]), cov)
return sample, coord
def shortest_path(source, next_sample, G):
"""
Computes shortest safe path from a source to the next state-action pair
the agent needs to sample
Parameters
----------
source: int
Staring node for the path
next_sample: (int, int)
Next state-action pair the agent needs to sample. First entry is the
number that indicates the state. Second entry indicates the action
G: networkx DiGraph
Graph that indicates the dynamics. It is linked to S matrix
Returns
-------
path: list
shortest safe path
"""
# Extract safe graph
safe_edges = [edge for edge in G.edges_iter(data=True) if edge[2]['safe']]
graph_safe = nx.DiGraph(safe_edges)
# Compute shortest path
target = next_sample[0]
action = next_sample[1]
path = nx.astar_path(graph_safe, source, target)
for _, next_node, data in graph_safe.out_edges(nbunch=target, data=True):
if data["action"] == action:
path = path + [next_node]
return path
def path_to_boolean_matrix(path, graph, S):
"""
Computes a S-like matrix for approaches where performances is based
on the trajectory of the agent (e.g. unsafe or random exploration)
Parameters
----------
path: np.array
Contains the nodes that are visited along the path
graph: networkx.DiGraph
Graph that indicates the dynamics
S: np.array
Array describing the safe set (needed for initialization)
Returns
-------
bool_mat: np.array
S-like array that is true for all the states and state-action pairs
along the path
"""
# Initialize matrix
bool_mat = np.zeros_like(S, dtype=bool)
# Go through path to find actions
for i in range(len(path) - 1):
prev = path[i]
succ = path[i + 1]
for _, next_node, data in graph.out_edges(nbunch=prev, data=True):
if next_node == succ:
bool_mat[prev, 0] = True
a = data["action"]
bool_mat[prev, a] = True
break
bool_mat[succ, 0] = True
return bool_mat
def safe_subpath(path, altitudes, h):
"""
Computes the maximum subpath of path along which the safety constraint is
not violated
Parameters
----------
path: np.array
Contains the nodes that are visited along the path
altitudes: np.array
1-d vector with altitudes for each node
h: float
Safety threshold
Returns
-------
subpath: np.array
Maximum subpath of path that fulfills the safety constraint
"""
# Initialize subpath
subpath = [path[0]]
# Loop through path
for j in range(len(path) - 1):
prev = path[j]
succ = path[j + 1]
# Check safety constraint
if altitudes[prev] - altitudes[succ] >= h:
subpath = subpath + [succ]
else:
break
return subpath
|
befelix/SafeMDP
|
safemdp/grid_world.py
|
Python
|
mit
| 25,021
|
[
"Gaussian"
] |
9685f44f862f354f2adde8883f6d22d193ab46bad946123d623c05d1d15f3598
|
#!/usr/bin/env python
########################################################################
# File : dirac-jobexec
# Author : Stuart Paterson
########################################################################
""" The dirac-jobexec script is equipped to execute workflows that
are specified via their XML description. The main client of
this script is the Job Wrapper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Register workflow parameter switch
Script.registerSwitch("p:", "parameter=", "Parameters that are passed directly to the workflow")
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("jobXMLfile: specify path to the Job XML file description")
Script.parseCommandLine()
# from DIRAC.Core.Workflow.Parameter import *
from DIRAC import gLogger
from DIRAC.Core.Workflow.Workflow import fromXMLFile
from DIRAC.WorkloadManagementSystem.Client.JobReport import JobReport
from DIRAC.AccountingSystem.Client.DataStoreClient import DataStoreClient
from DIRAC.RequestManagementSystem.Client.Request import Request
# Forcing the current directory to be the first in the PYTHONPATH
sys.path.insert(0, os.path.realpath("."))
gLogger.showHeaders(True)
def jobexec(jobxml, wfParameters):
jobfile = os.path.abspath(jobxml)
if not os.path.exists(jobfile):
gLogger.warn("Path to specified workflow %s does not exist" % (jobfile))
sys.exit(1)
workflow = fromXMLFile(jobfile)
gLogger.debug(workflow)
code = workflow.createCode()
gLogger.debug(code)
jobID = 0
if "JOBID" in os.environ:
jobID = os.environ["JOBID"]
gLogger.info("DIRAC JobID %s is running at site %s" % (jobID, DIRAC.siteName()))
workflow.addTool("JobReport", JobReport(jobID))
workflow.addTool("AccountingReport", DataStoreClient())
workflow.addTool("Request", Request())
# Propagate the command line parameters to the workflow if any
for pName, pValue in wfParameters.items():
workflow.setValue(pName, pValue)
# Propagate the command line parameters to the workflow module instances of each step
for stepdefinition in workflow.step_definitions.values():
for moduleInstance in stepdefinition.module_instances:
for pName, pValue in wfParameters.items():
if moduleInstance.parameters.find(pName):
moduleInstance.parameters.setValue(pName, pValue)
return workflow.execute()
positionalArgs = Script.getPositionalArgs()
if len(positionalArgs) != 1:
gLogger.debug("Positional arguments were %s" % (positionalArgs))
DIRAC.abort(1, "Must specify the Job XML file description")
if "JOBID" in os.environ:
gLogger.info("JobID: %s" % (os.environ["JOBID"]))
jobXMLfile = positionalArgs[0]
parList = Script.getUnprocessedSwitches()
parDict = {}
for switch, parameter in parList:
if switch == "p":
name, value = parameter.split("=")
value = value.strip()
# The comma separated list in curly brackets is interpreted as a list
if value.startswith("{"):
value = value[1:-1].replace('"', "").replace(" ", "").split(",")
value = ";".join(value)
parDict[name] = value
gLogger.debug("PYTHONPATH:\n%s" % ("\n".join(sys.path)))
jobExec = jobexec(jobXMLfile, parDict)
if not jobExec["OK"]:
gLogger.debug("Workflow execution finished with errors, exiting")
if jobExec["Errno"]:
os._exit(jobExec["Errno"])
else:
os._exit(1)
else:
gLogger.debug("Workflow execution successful, exiting")
# dirac_jobexec might interact with ARC library which cannot be closed using a simple sys.exit(0)
# See https://bugzilla.nordugrid.org/show_bug.cgi?id=4022 for further details
os._exit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
src/DIRAC/WorkloadManagementSystem/scripts/dirac_jobexec.py
|
Python
|
gpl-3.0
| 4,315
|
[
"DIRAC"
] |
362205e8069b1689a2fe489e01ce4f23f551aeec7f442d528ddd6078cfbac148
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
@author: Ward Poelmans (Ghent University)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import glob
import os
import re
import shutil
import stat
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self, *args, **kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wm_compiler = None
self.wm_mplib = None
self.openfoamdir = None
self.thrdpartydir = None
if 'extend' in self.name.lower():
if LooseVersion(self.version) >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
contents_installdir = os.listdir(self.installdir)
source = os.path.join(self.installdir, contents_installdir[0])
# it's one directory but has a wrong name
if len(contents_installdir) == 1 and os.path.isdir(source):
target = os.path.join(self.installdir, self.openfoamdir)
self.log.debug("Renaming %s to %s", source, target)
os.rename(source, target)
else:
mkdir(openfoam_installdir)
for fil in contents_installdir:
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s", source, target)
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError, err:
raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err)
def patch_step(self, beginpath=None):
"""Adjust start directory and start path for patching to correct directory."""
self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir)
super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir'])
def prepare_step(self):
"""Prepare for OpenFOAM install procedure."""
super(EB_OpenFOAM, self).prepare_step()
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC: # @UndefinedVariable
self.wm_compiler = 'Gcc'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
self.wm_compiler = 'Icc'
else:
raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER")
# set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.)
# Note: this name must contain 'MPI' so the MPI version of the
# Pstream library is built (cf src/Pstream/Allwmake)
self.wm_mplib = "EASYBUILDMPI"
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# compiler & compiler flags
comp_fam = self.toolchain.comp_family()
extra_flags = ''
if comp_fam == toolchain.GCC: # @UndefinedVariable
if get_software_version('GCC') >= LooseVersion('4.8'):
# make sure non-gold version of ld is used, since OpenFOAM requires it
# see http://www.openfoam.org/mantisbt/view.php?id=685
extra_flags = '-fuse-ld=bfd'
# older versions of OpenFOAM-Extend require -fpermissive
if 'extend' in self.name.lower() and LooseVersion(self.version) < LooseVersion('2.0'):
extra_flags += ' -fpermissive'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
# make sure -no-prec-div is used with Intel compilers
extra_flags = '-no-prec-div'
for env_var in ['CFLAGS', 'CXXFLAGS']:
env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))
# patch out hardcoding of WM_* environment variables
# for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
self.log.debug("Patching out hardcoded $WM_* env vars in %s", script)
# disable any third party stuff, we use EB controlled builds
regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
WM_env_var = ['WM_COMPILER', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
WM_env_var.append('WM_LABEL_SIZE')
for env_var in WM_env_var:
regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
r": ${\g<var>:=\g<val>}; export \g<var>"))
apply_regex_substitutions(script, regex_subs)
# inject compiler variables into wmake/rules files
ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
langs = ['c', 'c++']
suffixes = ['', 'Opt']
wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]
mpicc = os.environ['MPICC']
mpicxx = os.environ['MPICXX']
cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])
if self.toolchain.mpi_family() == toolchain.OPENMPI:
# no -cc/-cxx flags supported in OpenMPI compiler wrappers
c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
else:
# -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI)
c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq)
cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq)
comp_vars = {
# specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them
'cc': c_comp_cmd,
'CC': cxx_comp_cmd,
'cOPT': os.environ['CFLAGS'],
'c++OPT': os.environ['CXXFLAGS'],
}
for wmake_rules_file in wmake_rules_files:
fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file)
self.log.debug("Patching compiler variables in %s", fullpath)
regex_subs = []
for comp_var, newval in comp_vars.items():
regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval))
apply_regex_substitutions(fullpath, regex_subs)
# enable verbose build for debug purposes
# starting with openfoam-extend 3.2, PS1 also needs to be set
env.setvar("FOAM_VERBOSE", '1')
# installation directory
env.setvar("FOAM_INST_DIR", self.installdir)
# third party directory
self.thrdpartydir = "ThirdParty-%s" % self.version
# only if third party stuff is actually installed
if os.path.exists(self.thrdpartydir):
os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))
env.setvar("WM_COMPILER", self.wm_compiler)
env.setvar("WM_MPLIB", self.wm_mplib)
# parallel build spec
env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env.setvar("WM_LABEL_SIZE", '64')
else:
env.setvar("WM_LABEL_SIZE", '32')
# make sure lib/include dirs for dependencies are found
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if LooseVersion(self.version) < LooseVersion("2") or openfoam_extend_v3:
self.log.debug("List of deps: %s" % self.cfg.dependencies())
for dep in self.cfg.dependencies():
dep_name = dep['name'].upper(),
dep_root = get_software_root(dep['name'])
env.setvar("%s_SYSTEM" % dep_name, "1")
dep_vars = {
"%s_DIR": "%s",
"%s_BIN_DIR": "%s/bin",
"%s_LIB_DIR": "%s/lib",
"%s_INCLUDE_DIR": "%s/include",
}
for var, val in dep_vars.iteritems():
env.setvar(var % dep_name, val % dep_root)
else:
for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']:
dependloc = get_software_root(depend)
if dependloc:
if depend == 'CGAL' and get_software_root('Boost'):
env.setvar("CGAL_ROOT", dependloc)
env.setvar("BOOST_ROOT", get_software_root('Boost'))
else:
env.setvar("%s_ROOT" % depend.upper(), dependloc)
def build_step(self):
"""Build OpenFOAM using make after sourcing script to set environment."""
precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc")
# make directly in install directory
cmd_tmpl = "%(precmd)s && %(prebuildopts)s %(makecmd)s" % {
'precmd': precmd,
'prebuildopts': self.cfg['prebuildopts'],
'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'),
}
if 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
qa = {
"Proceed without compiling ParaView [Y/n]": 'Y',
"Proceed without compiling cudaSolvers? [Y/n]": 'Y',
}
noqa = [
".* -o .*",
"checking .*",
"warning.*",
"configure: creating.*",
"%s .*" % os.environ['CC'],
"wmake .*",
"Making dependency list for source file.*",
r"\s*\^\s*", # warning indicator
"Cleaning .*",
]
run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True)
else:
run_cmd(cmd_tmpl % 'Allwmake', log_all=True, simple=True, log_output=True)
def install_step(self):
"""Building was performed in install dir, so just fix permissions."""
# fix permissions of OpenFOAM dir
fullpath = os.path.join(self.installdir, self.openfoamdir)
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# fix permissions of ThirdParty dir and subdirs (also for 2.x)
# if the thirdparty tarball is installed
fullpath = os.path.join(self.installdir, self.thrdpartydir)
if os.path.exists(fullpath):
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
def sanity_check_step(self):
"""Custom sanity check for OpenFOAM"""
shlib_ext = get_shared_lib_ext()
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
int_size = 'Int64'
else:
int_size = 'Int32'
else:
int_size = ''
psubdir = "linux64%sDP%sOpt" % (self.wm_compiler, int_size)
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if openfoam_extend_v3 or LooseVersion(self.version) < LooseVersion("2"):
toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir)
libsdir = os.path.join(self.openfoamdir, "lib", psubdir)
dirs = [toolsdir, libsdir]
else:
toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin")
libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib")
dirs = [toolsdir, libsdir]
# some randomly selected binaries
# if one of these is missing, it's very likely something went wrong
bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["foamExec", "paraFoam"]] + \
[os.path.join(toolsdir, "buoyant%sSimpleFoam" % x) for x in ["", "Boussinesq"]] + \
[os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine", "sonic"]] + \
[os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \
[os.path.join(toolsdir, x) for x in ["deformedGeom", "engineSwirl", "modifyMesh",
"refineMesh", "wdot"]]
# check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one
if 'extend' in self.name.lower():
libs = [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext),
os.path.join(libsdir, "libmetisDecomp.%s" % shlib_ext)]
if LooseVersion(self.version) < LooseVersion('3.2'):
# Pstream should have both a dummy and a mpi one
libs.extend([os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]])
libs.extend([os.path.join(libsdir, "mpi", "libparMetisDecomp.%s" % shlib_ext)])
else:
libs.extend([os.path.join(libsdir, "libparMetisDecomp.%s" % shlib_ext)])
else:
# there must be a dummy one and an mpi one for both
libs = [os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]] + \
[os.path.join(libsdir, x, "libptscotchDecomp.%s" % shlib_ext) for x in ["dummy", "mpi"]] +\
[os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)] + \
[os.path.join(libsdir, "dummy", "libscotchDecomp.%s" % shlib_ext)]
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion("2.3.0"):
# surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0
bins.remove(os.path.join(toolsdir, "surfaceSmooth"))
bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth"))
custom_paths = {
'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs,
'dirs': dirs,
}
super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self, altroot=None, altversion=None):
"""Define extra environment variables required by OpenFOAM"""
txt = super(EB_OpenFOAM, self).make_module_extra()
env_vars = [
('WM_PROJECT_VERSION', self.version),
('FOAM_INST_DIR', self.installdir),
('WM_COMPILER', self.wm_compiler),
('WM_MPLIB', self.wm_mplib),
('FOAM_BASH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'bashrc')),
('FOAM_CSH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'cshrc')),
]
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env_vars += [('WM_LABEL_SIZE', '64')]
else:
env_vars += [('WM_LABEL_SIZE', '32')]
for (env_var, val) in env_vars:
# check whether value is defined for compatibility with --module-only
if val:
txt += self.module_generator.set_environment(env_var, val)
return txt
|
ocaisa/easybuild-easyblocks
|
easybuild/easyblocks/o/openfoam.py
|
Python
|
gpl-2.0
| 19,042
|
[
"ParaView"
] |
1070fe13f5617aaa0d82518d74c5269b8b39533a880930902a166f4d2ddba3bf
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Orca documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 7 16:19:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.autosummary'
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Orca'
copyright = '2021, UrbanSim Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Orcadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Orca.tex', 'Orca Documentation',
'UrbanSim Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'orca', 'Orca Documentation',
['UrbanSim Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Orca', 'Orca Documentation',
'UrbanSim Inc.', 'Orca', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
UDST/orca
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 8,328
|
[
"ORCA"
] |
c5ab5182a56d4fc6a8abfec81dffffa3ab6229e108b04e54c6be9672b3c88b22
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
from gensim.utils import keep_vocab_item
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
double, uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray, vstack
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
# Can't log here because logger is usually not configured at import time
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_sg(model, sentences, alpha, work=None):
"""
Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(model, model.index2word[word.index], word2.index, alpha)
result += len(word_vocabs)
return result
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab and
model.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start : pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.vocab[w] for w in sentence if w in model.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised.
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None):
if context_vectors is None:
context_vectors = model.syn0
if context_locks is None:
context_locks = model.syn0_lockf
if word not in model.vocab:
return
predict_word = model.vocab[word] # target word (NN output)
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
return neu1e
def sigmoid(p):
if p > 0:
return 1. / (1. + exp(-p))
elif p <= 0:
return exp(p) / (1 + exp(p))
else:
raise ValueError
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
# fb = sigmoid(dot(l1, l2b.T)) # propagate hidden -> output
fb = 1. / (1. + exp(-dot(l1, l2b.T)))
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
model.syn0[i] += neu1e * model.syn0_lockf[i]
return neu1e
def score_sg_pair(model, word, word2):
l1 = model.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -log(1.0 + exp(-sgn*dot(l1, l2a.T)))
return sum(lprob)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(
self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=0`), CBOW is used.
Otherwise (`sg=1`), skip-gram is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, useful range is (0, 1e-5).
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus. Default is 5.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
`batch_words` = target size (in words) for batches of examples passed to worker threads (and
thus cython routines). Default is 10000. (Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
if FAST_VERSION == -1:
logger.warning('Slow version of {0} is being used'.format(__name__))
else:
logger.debug('Fast version of {0} is being used'.format(__name__))
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = int(workers)
self.min_alpha = float(min_alpha)
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(sentences)
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
cumulative = 0.0
for word_index in range(vocab_size):
cumulative += self.vocab[self.index2word[word_index]].count**power
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule, update=update) # initial survey
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update) # trim by min_count & precalculate downsampling
self.finalize_vocab(update=update) # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None, update=False):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warn("Each 'sentences' item should be a list of words (usually unicode strings)."
"First item here is instead plain %s.", type(sentence))
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, sum(itervalues(vocab)) + total_words, len(vocab))
for word in sentence:
vocab[word] += 1
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
total_words += sum(itervalues(vocab))
logger.info("collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None, update=False):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.vocab[word] = Vocab(count=v, index=len(self.index2word))
self.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info("min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info("min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
min_count, retain_total, retain_pct, original_total, drop_total)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
if word in self.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
self.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
self.vocab[word] = Vocab(count=v, index=len(self.index2word))
self.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info("""New added %i unique words (%i%% of original %i)
and increased the count of %i pre-existing words (%i%% of original %i)""",
len(new_words), new_unique_pct, original_unique_total,
len(pre_exist_words), pre_exist_unique_pct, original_unique_total)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total)
# return from each step: words-affected, resulting-corpus-size
report_values = {'drop_unique': drop_unique, 'retain_total': retain_total,
'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)}
# print extra memory estimates
report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words))
return report_values
def finalize_vocab(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
if not self.index2word:
self.scale_vocab()
if self.sorted_vocab and not update:
self.sort_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
# set initial input/projection and hidden weights
if not update:
self.reset_weights()
else:
self.update_weights()
def sort_vocab(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if hasattr(self, 'syn0'):
raise RuntimeError("must sort before initializing vectors/weights")
self.index2word.sort(key=lambda word: self.vocab[word].count, reverse=True)
for i, word in enumerate(self.index2word):
self.vocab[word].index = i
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.vocab = other_model.vocab
self.index2word = other_model.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, sentences, alpha, inits):
"""
Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence) for sentence in job)
def train(self, sentences, total_words=None, word_count=0,
total_examples=None, queue_factor=2, report_delay=1.0):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples
(count of sentences) or total_words (count of raw words in sentences) should be provided, unless the
sentences are the same as those that were used to initially build the vocabulary.
"""
# if FAST_VERSION < 0:
# import warnings
# warnings.warn("C extension not loaded for Word2Vec, training will be slow. "
# "Install a C compiler and reinstall gensim for fast training.")
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s negative=%s window=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative, self.window)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not hasattr(self, 'syn0'):
raise RuntimeError("you must first finalize vocabulary before training the model")
if total_words is None and total_examples is None:
if self.corpus_count:
total_examples = self.corpus_count
logger.info("expecting %i sentences, matching count from corpus used for vocabulary survey", total_examples)
else:
raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations")
job_tally = 0
if self.iter > 1:
sentences = utils.RepeatCorpusNTimes(sentences, self.iter)
total_words = total_words and total_words * self.iter
total_examples = total_examples and total_examples * self.iter
def worker_loop():
"""Train the model, lifting lists of sentences from the job_queue."""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
sentences, alpha = job
tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1))
progress_queue.put((len(sentences), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def job_producer():
"""Fill jobs queue using the input `sentences` iterator."""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = self.alpha
if next_alpha > self.min_alpha_yet_reached:
logger.warn("Effective 'alpha' higher than previous training cycles")
self.min_alpha_yet_reached = next_alpha
job_no = 0
for sent_idx, sentence in enumerate(sentences):
sentence_length = self._raw_word_count([sentence])
# can we fit this sentence into the existing job batch?
if batch_size + sentence_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(sentence)
batch_size += sentence_length
else:
# no => submit the existing job
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha)
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if self.min_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
progress = 1.0 * pushed_words / total_words
next_alpha = self.alpha - (self.alpha - self.min_alpha) * progress
next_alpha = max(self.min_alpha, next_alpha)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [sentence], sentence_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha)
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable "
"iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in xrange(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
unfinished_worker_count = len(workers)
workers.append(threading.Thread(target=job_producer))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
example_count, trained_word_count, raw_word_count = 0, 0, word_count
start, next_report = default_timer() - 0.00001, 1.0
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue))
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue))
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
logger.info(
"training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed)
if job_tally < 10 * self.workers:
logger.warn("under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay")
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples)
if total_words and total_words != raw_word_count:
logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that).
We have currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with hs=1 and negative=0 for this to work.
Note that you should specify total_sentences; we'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the article by [taddy]_ and the gensim demo at [deepir]_ for examples of how to use such scores in document classification.
.. [taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
.. [deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness.")
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative)
if not self.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError("we have only implemented score for hs")
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed)
return sentence_scores[:sentence_count]
def clear_sims(self):
self.syn0norm = None
def update_weights(self):
"""
Copy all the existing weights, and reset the weights for the newly
added vocabulary.
"""
logger.info("updating layer weights")
gained_vocab = len(self.vocab) - len(self.syn0)
newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)
# randomize the remaining words
for i in xrange(len(self.syn0), len(self.vocab)):
# construct deterministic seed from word AND seed argument
newsyn0[i-len(self.syn0)] = self.seeded_vector(self.index2word[i] + str(self.seed))
self.syn0 = vstack([self.syn0, newsyn0])
if self.hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
self.syn0norm = None
# do not suppress learning for already learned words
self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.syn0 = empty((len(self.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
# construct deterministic seed from word AND seed argument
self.syn0[i] = self.seeded_vector(self.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
self.syn0_lockf = ones(len(self.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
`fname` is the file used to save the vectors in
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
"""
if fvocab is not None:
logger.info("storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.vector_size, fname))
assert (len(self.vocab), self.vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls(size=vector_size)
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.syn0.shape[0], len(result.vocab)
)
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), result.vector_size) == result.syn0.shape
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
return result
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
`lockf` is a lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
"""
overlap_count = 0
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
self.syn0_lockf[self.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if word in self.vocab:
overlap_count += 1
self.syn0[self.vocab[word].index] = weights
logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.syn0.shape, fname))
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).',
diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info('At least one of the documents had no words that were'
'in the vocabulary. Aborting (returning inf).')
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if not t1 in docset1 or not t2 in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = set()
def word_vec(word):
if isinstance(word, ndarray):
return word
elif word in self.vocab:
all_words.add(self.vocab[word].index)
return self.syn0norm[self.vocab[word].index]
else:
raise KeyError("word '%s' not in vocabulary" % word)
positive = [word_vec(word) for word in positive]
negative = [word_vec(word) for word in negative]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
If topn is False, similar_by_word returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
If topn is False, similar_by_vector returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_vector([1,2])
[('survey', 0.9942699074745178), ...]
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.syn0[self.vocab[words].index]
return vstack([self.syn0[self.vocab[word].index] for word in words])
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('Atleast one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)),
matutils.unitvec(array(v2).mean(axis=0)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info("estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'])
return report
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = dict((w.upper(), v) for w, v in reversed(ok_vocab)) if case_insensitive else dict(ok_vocab)
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip()))
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = set([a, b, c]) # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for index in matutils.argsort(sims, reverse=True):
predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (self.__class__.__name__, len(self.index2word), self.vector_size, self.alpha)
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
elif hasattr(v, 'sample_probability'):
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or no clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
logging.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument("-sample", help="Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)", type=float, default=1e-3)
parser.add_argument("-hs", help="Use Hierarchical Softmax; default is 0 (not used)", type=int, default=0, choices=[0, 1])
parser.add_argument("-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)", type=int, default=5)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument("-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5", type=int, default=5)
parser.add_argument("-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)", type=int, default=1, choices=[0, 1])
parser.add_argument("-binary", help="Save the resulting vectors in binary mode; default is 0 (off)", type=int, default=0, choices=[0, 1])
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter)
if args.output:
outfile = args.output
model.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
|
wtgme/labeldoc2vec
|
build/lib.linux-x86_64-2.7/gensim/models/word2vec.py
|
Python
|
lgpl-2.1
| 92,365
|
[
"VisIt"
] |
ed070889c72e224773d97d0227c785e1f552302d568ca6c74a7331a7fea90145
|
"""A meta-component that allows a component to be optionally enabled
or disabled. This component is mostly for illustration and is not
used anywhere. This is because it is usually much easier to simply
add a trait in the module to enable/disable a particular component.
"""
# Author: Prabhu Ramachandran <[email protected]>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Bool, Str, Property
from traitsui.api import View, Group, Item
# Local imports.
from mayavi.core.component import Component
######################################################################
# `Optional` class.
######################################################################
class Optional(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The outputs of this component is a property and not a list.
outputs = Property
# The component that is enabled or disabled.
component = Instance(Component)
# Is the component enabled or not.
enabled = Bool(True, desc='if the component is enabled')
# The label of the checkbox to use in the view.
label = Str
########################################
# The component's view
# This is defined outside the view so that the label may be easily
# changed.
enabled_item = Item(name='enabled')
view = View(Group(Group(enabled_item),
Group(Item(name='component', style='custom',
visible_when='object.enabled'),
show_labels=False)
)
)
######################################################################
# `Component` interface
######################################################################
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
comp = self.component
if self.inputs != comp.inputs:
comp.inputs = self.inputs
self.pipeline_changed = True
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self.data_changed = True
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline. Note that when start is invoked, all the other
information for the pipeline should be already set.
"""
# Do nothing if we are already running.
if self.running:
return
super(Optional, self).start()
self.component.start()
def stop(self):
"""Invoked when this object is removed from the mayavi
pipeline.
"""
if not self.running:
return
self.component.stop()
super(Optional, self).stop()
######################################################################
# Non-public methods.
######################################################################
def _get_outputs(self):
if self.enabled:
return self.component.outputs
else:
return self.inputs[0].get_output_object()
def _enabled_changed(self, value):
# Force downstream modules to update.
self.pipeline_changed = True
def _label_changed(self, value):
# Change the displayed label for the enable trait in the view.
item = self.trait_view_elements().content['enabled_item']
item.label = value
def _component_changed(self, old, new):
if old is not None:
old.on_trait_change(self._fire_pipeline_changed,
'pipeline_changed', remove=True)
old.on_trait_change(self._fire_data_changed,
'data_changed', remove=True)
new.on_trait_change(self._fire_pipeline_changed, 'pipeline_changed')
new.on_trait_change(self._fire_data_changed, 'data_changed')
def _fire_pipeline_changed(self):
self.pipeline_changed = True
def _fire_data_changed(self):
self.data_changed = True
|
dmsurti/mayavi
|
mayavi/components/optional.py
|
Python
|
bsd-3-clause
| 4,594
|
[
"Mayavi"
] |
fb943d6bed6685e3ff83aa2831194933a38f995a58356c800fa8953731213cdd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
cc_plugin_ncei/ncei_timeseries_profile.py
'''
from compliance_checker.base import BaseCheck
from cc_plugin_ncei.ncei_base import TestCtx, NCEI1_1Check, NCEI2_0Check
from cc_plugin_ncei import util
from isodate import parse_duration
class NCEITimeSeriesProfileOrthogonalBase(BaseCheck):
_cc_spec = 'ncei-timeseries-profile-orthogonal'
valid_feature_types = [
'timeseries',
'timeseries_id',
'timeSeriesProfile'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the timeseries id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "timeseries_id" exists')
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
# No need to check
exists_ctx.assert_true(timeseries_ids, 'variable defining cf_role="timseries_id" exists')
if not timeseries_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(timeseries_ids[0].name))
test_ctx.assert_true(
getattr(timeseries_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITimeSeriesProfileOrthogonal1_1(NCEI1_1Check, NCEITimeSeriesProfileOrthogonalBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF timeSeriesProfile Orthogonal '
'Time and Depth template version 1.1 (found at https://www.nodc.noaa.gov/data/formats/'
'netcdf/v1.1/timeSeriesProfileOrthoVOrthoT.cdl). The NCEI version 1.1 templates are based '
'on “feature types”, as identified by Unidata and CF, and conform to ACDD version 1.0 and '
'CF version 1.6. You can find more information about the version 1.1 templates at '
'https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/. This test is specifically for the '
'timeSeriesProfile feature type in an Orthogonal time and depth multidimensional array '
'representation. This representation is typically used for a series of profile features at'
' the same horizontal position with monotonically increasing time and all instruments are '
'at the same depths and measuring at the same points in time.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/timeSeriesOrthogonal.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TimeSeriesProfile_Orthogonal_Template_v1.1",
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITimeSeriesProfileOrthogonal2_0(NCEI2_0Check, NCEITimeSeriesProfileOrthogonalBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF timeSeriesProfile Orthogonal '
'Time and Depth template version 2.0 (found at https://www.nodc.noaa.gov/data/formats/'
'netcdf/v2.0/timeSeriesProfileOrthoVOrthoT.cdl). The NCEI version 2.0 templates are based '
'on “feature types”, as identified by Unidata and CF, and conform to ACDD version 1.3 and '
'CF version 1.6. You can find more information about the version 2.0 templates at '
'https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/. This test is specifically for the '
'timeSeriesProfile feature type in an Orthogonal time and depth multidimensional array '
'representation. This representation is typically used for a series of profile features at'
' the same horizontal position with monotonically increasing time and all instruments are '
'at the same depths and measuring at the same points in time.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/timeSeriesOrthogonal.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TimeSeriesProfile_Orthogonal_Template_v2.0",
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
class NCEITimeSeriesProfileOrthTimeIncompleteDepthBase(BaseCheck):
_cc_spec = 'ncei-timeseries-profile-orthtime-incompletedepth'
valid_feature_types = [
'timeSeries',
'timeseries_id',
'timeSeriesProfile',
'timeseriesprofile_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-ortho-time-incomplete-depth feature types')
message = '{} must be a valid timeseries-profile-ortho-time-incomplete-depth feature type.'
message += ' If it\'s multiple stations, it must have dimensions (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must'
message += ' have dimensions (time, z) or (station, time, z) if it\'s a multi-station dataset.'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_ortho_time(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_ortho_time(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the timeseries id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "timeseries_id" exists')
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
# No need to check
exists_ctx.assert_true(timeseries_ids, 'variable defining cf_role="timseries_id" exists')
if not timeseries_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(timeseries_ids[0].name))
test_ctx.assert_true(
getattr(timeseries_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITimeSeriesProfileOrthTimeIncompleteDepth1_1(NCEI1_1Check, NCEITimeSeriesProfileOrthTimeIncompleteDepthBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'This test checks the selected file against the NCEI netCDF timeSeriesProfile Orthogonal '
'Time and Incomplete Depth template version 1.1 (found at https://www.nodc.noaa.gov/data/'
'formats/netcdf/v1.1/timeSeriesProfileIncomVOrthoT.cdl). The NCEI version 1.1 templates '
'are based on “feature types”, as identified by Unidata and CF, and conform to ACDD '
'version 1.0 and CF version 1.6. You can find more information about the version 1.1 '
'templates at https://www.nodc.noaa.gov/data/formats/netcdf/v1.1/. This test is '
'specifically for the timeSeriesProfile feature type in an Orthogonal time and Incomplete '
'depth multidimensional array representation. This representation is typically used for a '
'series of profile features at the same horizontal position with monotonically increasing '
'time and the stationary instruments measure at different depths but at the same points '
'in time.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TimeSeriesProfile_IncompleteVertical_OrthogonalTemporal_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITimeSeriesProfileOrthTimeIncompleteDepth2_0(NCEI2_0Check, NCEITimeSeriesProfileOrthTimeIncompleteDepthBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'This test checks the selected file against the NCEI netCDF timeSeriesProfile Orthogonal '
'Time and Incomplete Depth template version 2.0 (found at https://www.nodc.noaa.gov/data/'
'formats/netcdf/v2.0/timeSeriesProfileIncomVOrthoT.cdl). The NCEI version 2.0 templates '
'are based on “feature types”, as identified by Unidata and CF, and conform to ACDD '
'version 1.3 and CF version 1.6. You can find more information about the version 2.0 '
'templates at https://www.nodc.noaa.gov/data/formats/netcdf/v2.0/. This test is '
'specifically for the timeSeriesProfile feature type in an Orthogonal time and Incomplete '
'depth multidimensional array representation. This representation is typically used for a '
'series of profile features at the same horizontal position with monotonically increasing '
'time and the stationary instruments measure at different depths but at the same points '
'in time.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TimeSeriesProfile_IncompleteVertical_OrthogonalTemporal_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile orthogonal dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
class NCEITimeSeriesProfileIncompleteBase(BaseCheck):
_cc_spec = 'ncei-timeseries-profile-incomplete'
valid_feature_types = [
'timeSeries',
'timeseries_id',
'timeSeriesProfile',
'timeseriesprofile_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-incomplete dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-incomplete feature types')
message = '{} must be a valid timeseries-profile-incomplete feature type.'
message += ' it must have dimensions (station, nTimeMax, zMax). x and y must have dimensions (station).'
message += ' time must have dimensions (station, nTimeMax). And z must have dimensions (station, nTimeMax, zMax).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_incomplete(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the timeseries id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "timeseries_id" exists')
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
# No need to check
exists_ctx.assert_true(timeseries_ids, 'variable defining cf_role="timseries_id" exists')
if not timeseries_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(timeseries_ids[0].name))
test_ctx.assert_true(
getattr(timeseries_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITimeSeriesProfileIncomplete1_1(NCEI1_1Check, NCEITimeSeriesProfileIncompleteBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'These templates are intended as a service to our community of Data Producers, and are '
'also being used internally at NCEI in our own data development efforts. We hope the '
'templates will serve as good starting points for Data Producers who wish to create '
'preservable, discoverable, accessible, and interoperable data. It is important to note '
'that these templates do not represent an attempt to create a new standard, and they are '
'not absolutely required for archiving data at NCEI. However, we do hope that you will '
'see the benefits in structuring your data following these conventions and NCEI stands '
'ready to assist you in doing so.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TimeSeriesProfile_Incomplete_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile Incomplete dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITimeSeriesProfileIncomplete2_0(NCEI2_0Check, NCEITimeSeriesProfileIncompleteBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'These templates are intended as a service to our community of Data Producers, and are '
'also being used internally at NCEI in our own data development efforts. We hope the '
'templates will serve as good starting points for Data Producers who wish to create '
'preservable, discoverable, accessible, and interoperable data. It is important to note '
'that these templates do not represent an attempt to create a new standard, and they are '
'not absolutely required for archiving data at NCEI. However, we do hope that you will '
'see the benefits in structuring your data following these conventions and NCEI stands '
'ready to assist you in doing so.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TimeSeriesProfile_Incomplete_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile Incomplete dataset')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '').lower() == self.valid_templates[0].lower(),
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
class NCEITimeSeriesProfileIncompleteTimeOrthDepthBase(BaseCheck):
_cc_spec = 'ncei-timeseries-profile-incompletetime-orthdepth'
valid_feature_types = [
'timeSeries',
'timeseries_id',
'timeSeriesProfile',
'timeseriesprofile_id'
]
def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-ortho-depth-incomplete-time feature types')
message = '{} must be a valid timeseries-profile-ortho-depth-incomplete-time feature type.'
message += ' it must have dimensions (station, time, z). x and y must have dimensions (station).'
message += ' time must have dimensions (station, time). And z must be a coordinate variable with'
message += ' dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_ortho_depth(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results
def check_timeseries_id(self, dataset):
'''
Checks that if a variable exists for the timeseries id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "timeseries_id" exists')
timeseries_ids = dataset.get_variables_by_attributes(cf_role='timeseries_id')
# No need to check
exists_ctx.assert_true(timeseries_ids, 'variable defining cf_role="timseries_id" exists')
if not timeseries_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(timeseries_ids[0].name))
test_ctx.assert_true(
getattr(timeseries_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results
class NCEITimeSeriesProfileIncompleteTimeOrthDepth1_1(NCEI1_1Check, NCEITimeSeriesProfileIncompleteTimeOrthDepthBase):
register_checker = True
_cc_spec_version = '1.1'
_cc_description = (
'These templates are intended as a service to our community of Data Producers, and are '
'also being used internally at NCEI in our own data development efforts. We hope the '
'templates will serve as good starting points for Data Producers who wish to create '
'preservable, discoverable, accessible, and interoperable data. It is important to note '
'that these templates do not represent an attempt to create a new standard, and they are '
'not absolutely required for archiving data at NCEI. However, we do hope that you will '
'see the benefits in structuring your data following these conventions and NCEI stands '
'ready to assist you in doing so.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v1.1/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.1.0'
valid_templates = [
"NODC_NetCDF_TimeSeriesProfile_OrthogonalVertical_IncompleteTemporal_Template_v1.1"
]
@classmethod
def beliefs(cls):
'''
Not applicable for gliders
'''
return {}
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile Incomplete Time and Depth')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '') == self.valid_templates[0],
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
class NCEITimeSeriesProfileIncompleteTimeOrthDepth2_0(NCEI2_0Check, NCEITimeSeriesProfileIncompleteTimeOrthDepthBase):
register_checker = True
_cc_spec_version = '2.0'
_cc_description = (
'These templates are intended as a service to our community of Data Producers, and are '
'also being used internally at NCEI in our own data development efforts. We hope the '
'templates will serve as good starting points for Data Producers who wish to create '
'preservable, discoverable, accessible, and interoperable data. It is important to note '
'that these templates do not represent an attempt to create a new standard, and they are '
'not absolutely required for archiving data at NCEI. However, we do hope that you will '
'see the benefits in structuring your data following these conventions and NCEI stands '
'ready to assist you in doing so.')
_cc_url = 'http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/timeSeriesIncomplete.cdl'
_cc_authors = 'Luke Campbell, Dan Maher'
_cc_checker_version = '2.3.0'
valid_templates = [
"NCEI_NetCDF_TimeSeriesProfile_OrthogonalVertical_IncompleteTemporal_Template_v2.0"
]
def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Timeseries Profile Incomplete Time and Depth')
required_ctx.assert_true(
getattr(dataset, 'ncei_template_version', '') == self.valid_templates[0],
'ncei_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Station',
'cdm_data_type attribute must be set to Station'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'timeSeriesProfile',
'featureType attribute must be set to timeSeriesProfile'
)
results.append(required_ctx.to_result())
return results
def check_recommended_attributes(self, dataset):
'''
Feature type specific check of global recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
# Check time_coverage_duration and resolution
for attr in ['time_coverage_duration', 'time_coverage_resolution']:
attr_value = getattr(dataset, attr, '')
try:
parse_duration(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except Exception:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
results.append(recommended_ctx.to_result())
return results
|
ioos/cc-plugin-ncei
|
cc_plugin_ncei/ncei_timeseries_profile.py
|
Python
|
apache-2.0
| 32,990
|
[
"NetCDF"
] |
b3cd526f2e2ecf21df1f548cc02e0c459893618c7e4861042bf6b42347d96fa0
|
"""
EvMenu
This implements a full menu system for Evennia. It is considerably
more flexible than the older contrib/menusystem.py and also uses
menu plugin modules.
To start the menu, just import the EvMenu class from this module.
Example usage:
```python
from evennia.utils.evmenu import EvMenu
EvMenu(caller, menu_module_path,
startnode="node1",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, cmd_on_exit="look", persistent=True)
```
Where `caller` is the Object to use the menu on - it will get a new
cmdset while using the Menu. The menu_module_path is the python path
to a python module containing function defintions. By adjusting the
keyword options of the Menu() initialization call you can start the
menu at different places in the menu definition file, adjust if the
menu command should overload the normal commands or not, etc.
The `perstent` keyword will make the menu survive a server reboot.
It is `False` by default. Note that if using persistent mode, every
node and callback in the menu must be possible to be *pickled*, this
excludes e.g. callables that are class methods or functions defined
dynamically or as part of another function. In non-persistent mode
no such restrictions exist.
The menu is defined in a module (this can be the same module as the
command definition too) with function defintions:
```python
def node1(caller):
# (this is the start node if called like above)
# code
return text, options
def node_with_other_name(caller, input_string):
# code
return text, options
```
Where caller is the object using the menu and input_string is the
command entered by the user on the *previous* node (the command
entered to get to this node). The node function code will only be
executed once per node-visit and the system will accept nodes with
both one or two arguments interchangeably.
The menu tree itself is available on the caller as
`caller.ndb._menutree`. This makes it a convenient place to store
temporary state variables between nodes, since this NAttribute is
deleted when the menu is exited.
The return values must be given in the above order, but each can be
returned as None as well. If the options are returned as None, the
menu is immediately exited and the default "look" command is called.
text (str, tuple or None): Text shown at this node. If a tuple, the
second element in the tuple is a help text to display at this
node when the user enters the menu help command there.
options (tuple, dict or None): (
{'key': name, # can also be a list of aliases. A special key is
# "_default", which marks this option as the default
# fallback when no other option matches the user input.
'desc': description, # optional description
'goto': nodekey, # node to go to when chosen
'exec': nodekey}, # node or callback to trigger as callback when chosen.
# If a node key is given, the node will be executed once
# but its return values are ignored. If a callable is
# given, it must accept one or two args, like any node.
{...}, ...)
If key is not given, the option will automatically be identified by
its number 1..N.
Example:
```python
# in menu_module.py
def node1(caller):
text = ("This is a node text",
"This is help text for this node")
options = ({"key": "testing",
"desc": "Select this to go to node 2",
"goto": "node2",
"exec": "callback1"},
{"desc": "Go to node 3.",
"goto": "node3"})
return text, options
def callback1(caller):
# this is called when choosing the "testing" option in node1
# (before going to node2). It needs not have return values.
caller.msg("Callback called!")
def node2(caller):
text = '''
This is node 2. It only allows you to go back
to the original node1. This extra indent will
be stripped. We don't include a help text.
'''
options = {"goto": "node1"}
return text, options
def node3(caller):
text = "This ends the menu since there are no options."
return text, None
```
When starting this menu with `Menu(caller, "path.to.menu_module")`,
the first node will look something like this:
This is a node text
______________________________________
testing: Select this to go to node 2
2: Go to node 3
Where you can both enter "testing" and "1" to select the first option.
If the client supports MXP, they may also mouse-click on "testing" to
do the same. When making this selection, a function "callback1" in the
same Using `help` will show the help text, otherwise a list of
available commands while in menu mode.
The menu tree is exited either by using the in-menu quit command or by
reaching a node without any options.
For a menu demo, import CmdTestMenu from this module and add it to
your default cmdset. Run it with this module, like `testmenu
evennia.utils.evmenu`.
"""
from __future__ import print_function
from builtins import object, range
from textwrap import dedent
from inspect import isfunction, getargspec
from django.conf import settings
from evennia import Command, CmdSet
from evennia.utils import logger
from evennia.utils.evtable import EvTable
from evennia.utils.ansi import ANSIString, strip_ansi
from evennia.utils.utils import mod_import, make_iter, pad, m_len
from evennia.commands import cmdhandler
# read from protocol NAWS later?
_MAX_TEXT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# we use cmdhandler instead of evennia.syscmdkeys to
# avoid some cases of loading before evennia init'd
_CMD_NOMATCH = cmdhandler.CMD_NOMATCH
_CMD_NOINPUT = cmdhandler.CMD_NOINPUT
# Return messages
# i18n
from django.utils.translation import ugettext as _
_ERR_NOT_IMPLEMENTED = _("Menu node '{nodename}' is not implemented. Make another choice.")
_ERR_GENERAL = _("Error in menu node '{nodename}'.")
_ERR_NO_OPTION_DESC = _("No description.")
_HELP_FULL = _("Commands: <menu option>, help, quit")
_HELP_NO_QUIT = _("Commands: <menu option>, help")
_HELP_NO_OPTIONS = _("Commands: help, quit")
_HELP_NO_OPTIONS_NO_QUIT = _("Commands: help")
_HELP_NO_OPTION_MATCH = _("Choose an option or try 'help'.")
_ERROR_PERSISTENT_SAVING = \
"""
{error}
|rThe menu state could not be saved for persistent mode. Switching
to non-persistent mode (which means the menu session won't survive
an eventual server reload).|n
"""
_TRACE_PERSISTENT_SAVING = \
"EvMenu persistent-mode error. Commonly, this is because one or " \
"more of the EvEditor callbacks could not be pickled, for example " \
"because it's a class method or is defined inside another function."
class EvMenuError(RuntimeError):
"""
Error raised by menu when facing internal errors.
"""
pass
#------------------------------------------------------------
#
# Menu command and command set
#
#------------------------------------------------------------
class CmdEvMenuNode(Command):
"""
Menu options.
"""
key = _CMD_NOINPUT
aliases = [_CMD_NOMATCH]
locks = "cmd:all()"
help_category = "Menu"
def func(self):
"""
Implement all menu commands.
"""
def _restore(caller):
# check if there is a saved menu available.
# this will re-start a completely new evmenu call.
saved_options = caller.attributes.get("_menutree_saved")
if saved_options:
startnode_tuple = caller.attributes.get("_menutree_saved_startnode")
try:
startnode, startnode_input = startnode_tuple
except ValueError: # old form of startnode stor
startnode, startnode_input = startnode_tuple, ""
if startnode:
saved_options[1]["startnode"] = startnode
saved_options[1]["startnode_input"] = startnode_input
# this will create a completely new menu call
EvMenu(caller, *saved_options[0], **saved_options[1])
return True
caller = self.caller
menu = caller.ndb._menutree
if not menu:
if _restore(caller):
return
orig_caller = caller
caller = caller.player if hasattr(caller, "player") else None
menu = caller.ndb._menutree if caller else None
if not menu:
if caller and _restore(caller):
return
caller = self.session
menu = caller.ndb._menutree
if not menu:
# can't restore from a session
err = "Menu object not found as %s.ndb._menutree!" % (orig_caller)
orig_caller.msg(err)
raise EvMenuError(err)
# we have a menu, use it.
menu._input_parser(menu, self.raw_string, caller)
class EvMenuCmdSet(CmdSet):
"""
The Menu cmdset replaces the current cmdset.
"""
key = "menu_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"""
Called when creating the set.
"""
self.add(CmdEvMenuNode())
# These are default node formatters
def dedent_strip_nodetext_formatter(nodetext, has_options, caller=None):
"""
Simple dedent formatter that also strips text
"""
return dedent(nodetext).strip()
def dedent_nodetext_formatter(nodetext, has_options, caller=None):
"""
Just dedent text.
"""
return dedent(nodetext)
def evtable_options_formatter(optionlist, caller=None):
"""
Formats the option list display.
"""
if not optionlist:
return ""
# column separation distance
colsep = 4
nlist = len(optionlist)
# get the widest option line in the table.
table_width_max = -1
table = []
for key, desc in optionlist:
if not (key or desc):
continue
table_width_max = max(table_width_max,
max(m_len(p) for p in key.split("\n")) +
max(m_len(p) for p in desc.split("\n")) + colsep)
raw_key = strip_ansi(key)
if raw_key != key:
# already decorations in key definition
table.append(" |lc%s|lt%s|le: %s" % (raw_key, key, desc))
else:
# add a default white color to key
table.append(" |lc%s|lt|w%s|n|le: %s" % (raw_key, raw_key, desc))
ncols = (_MAX_TEXT_WIDTH // table_width_max) + 1 # number of ncols
nlastcol = nlist % ncols # number of elements left in last row
# get the amount of rows needed (start with 4 rows)
nrows = 4
while nrows * ncols < nlist:
nrows += 1
ncols = nlist // nrows # number of full columns
nlastcol = nlist % nrows # number of elements in last column
# get the final column count
ncols = ncols + 1 if nlastcol > 0 else ncols
if ncols > 1:
# only extend if longer than one column
table.extend([" " for i in range(nrows - nlastcol)])
# build the actual table grid
table = [table[icol * nrows : (icol * nrows) + nrows] for icol in range(0, ncols)]
# adjust the width of each column
for icol in range(len(table)):
col_width = max(max(m_len(p) for p in part.split("\n")) for part in table[icol]) + colsep
table[icol] = [pad(part, width=col_width + colsep, align="l") for part in table[icol]]
# format the table into columns
return unicode(EvTable(table=table, border="none"))
def underline_node_formatter(nodetext, optionstext, caller=None):
"""
Draws a node with underlines '_____' around it.
"""
nodetext_width_max = max(m_len(line) for line in nodetext.split("\n"))
options_width_max = max(m_len(line) for line in optionstext.split("\n"))
total_width = max(options_width_max, nodetext_width_max)
separator1 = "_" * total_width + "\n\n" if nodetext_width_max else ""
separator2 = "\n" + "_" * total_width + "\n\n" if total_width else ""
return separator1 + "|n" + nodetext + "|n" + separator2 + "|n" + optionstext
def null_node_formatter(nodetext, optionstext, caller=None):
"""
A minimalistic node formatter, no lines or frames.
"""
return nodetext + "\n\n" + optionstext
def evtable_parse_input(menuobject, raw_string, caller):
"""
Processes the user' node inputs.
Args:
menuobject (EvMenu): The EvMenu instance
raw_string (str): The incoming raw_string from the menu
command.
caller (Object, Player or Session): The entity using
the menu.
"""
cmd = raw_string.strip().lower()
if cmd in menuobject.options:
# this will take precedence over the default commands
# below
goto, callback = menuobject.options[cmd]
menuobject.callback_goto(callback, goto, raw_string)
elif menuobject.auto_look and cmd in ("look", "l"):
menuobject.display_nodetext()
elif menuobject.auto_help and cmd in ("help", "h"):
menuobject.display_helptext()
elif menuobject.auto_quit and cmd in ("quit", "q", "exit"):
menuobject.close_menu()
elif menuobject.default:
goto, callback = menuobject.default
menuobject.callback_goto(callback, goto, raw_string)
else:
caller.msg(_HELP_NO_OPTION_MATCH)
if not (menuobject.options or menuobject.default):
# no options - we are at the end of the menu.
menuobject.close_menu()
#------------------------------------------------------------
#
# Menu main class
#
#------------------------------------------------------------
class EvMenu(object):
"""
This object represents an operational menu. It is initialized from
a menufile.py instruction.
"""
def __init__(self, caller, menudata, startnode="start",
cmdset_mergetype="Replace", cmdset_priority=1,
auto_quit=True, auto_look=True, auto_help=True,
cmd_on_exit="look",
nodetext_formatter=dedent_strip_nodetext_formatter,
options_formatter=evtable_options_formatter,
node_formatter=underline_node_formatter,
input_parser=evtable_parse_input,
persistent=False, startnode_input="", **kwargs):
"""
Initialize the menu tree and start the caller onto the first node.
Args:
caller (Object, Player or Session): The user of the menu.
menudata (str, module or dict): The full or relative path to the module
holding the menu tree data. All global functions in this module
whose name doesn't start with '_ ' will be parsed as menu nodes.
Also the module itself is accepted as input. Finally, a dictionary
menu tree can be given directly. This must then be a mapping
`{"nodekey":callable,...}` where `callable` must be called as
and return the data expected of a menu node. This allows for
dynamic menu creation.
startnode (str, optional): The starting node name in the menufile.
cmdset_mergetype (str, optional): 'Replace' (default) means the menu
commands will be exclusive - no other normal commands will
be usable while the user is in the menu. 'Union' means the
menu commands will be integrated with the existing commands
(it will merge with `merge_priority`), if so, make sure that
the menu's command names don't collide with existing commands
in an unexpected way. Also the CMD_NOMATCH and CMD_NOINPUT will
be overloaded by the menu cmdset. Other cmdser mergetypes
has little purpose for the menu.
cmdset_priority (int, optional): The merge priority for the
menu command set. The default (1) is usually enough for most
types of menus.
auto_quit (bool, optional): Allow user to use "q", "quit" or
"exit" to leave the menu at any point. Recommended during
development!
auto_look (bool, optional): Automatically make "looK" or "l" to
re-show the last node. Turning this off means you have to handle
re-showing nodes yourself, but may be useful if you need to
use "l" for some other purpose.
auto_help (bool, optional): Automatically make "help" or "h" show
the current help entry for the node. If turned off, eventual
help must be handled manually, but it may be useful if you
need 'h' for some other purpose, for example.
cmd_on_exit (callable, str or None, optional): When exiting the menu
(either by reaching a node with no options or by using the
in-built quit command (activated with `allow_quit`), this
callback function or command string will be executed.
The callback function takes two parameters, the caller then the
EvMenu object. This is called after cleanup is complete.
Set to None to not call any command.
nodetext_formatter (callable, optional): This callable should be on
the form `function(nodetext, has_options, caller=None)`, where `nodetext` is the
node text string and `has_options` a boolean specifying if there
are options associated with this node. It must return a formatted
string. `caller` is optionally a reference to the user of the menu.
`caller` is optionally a reference to the user of the menu.
options_formatter (callable, optional): This callable should be on
the form `function(optionlist, caller=None)`, where ` optionlist is a list
of option dictionaries, like
[{"key":..., "desc",..., "goto": ..., "exec",...}, ...]
Each dictionary describes each possible option. Note that this
will also be called if there are no options, and so should be
able to handle an empty list. This should
be formatted into an options list and returned as a string,
including the required separator to use between the node text
and the options. If not given the default EvMenu style will be used.
`caller` is optionally a reference to the user of the menu.
node_formatter (callable, optional): This callable should be on the
form `func(nodetext, optionstext, caller=None)` where the arguments are strings
representing the node text and options respectively (possibly prepared
by `nodetext_formatter`/`options_formatter` or by the default styles).
It should return a string representing the final look of the node. This
can e.g. be used to create line separators that take into account the
dynamic width of the parts. `caller` is optionally a reference to the
user of the menu.
input_parser (callable, optional): This callable is responsible for parsing the
options dict from a node and has the form `func(menuobject, raw_string, caller)`,
where menuobject is the active `EvMenu` instance, `input_string` is the
incoming text from the caller and `caller` is the user of the menu.
It should use the helper method of the menuobject to goto new nodes, show
help texts etc. See the default `evtable_parse_input` function for help
with parsing.
persistent (bool, optional): Make the Menu persistent (i.e. it will
survive a reload. This will make the Menu cmdset persistent. Use
with caution - if your menu is buggy you may end up in a state
you can't get out of! Also note that persistent mode requires
that all formatters, menu nodes and callables are possible to
*pickle*. When the server is reloaded, the latest node shown will be completely
re-run with the same input arguments - so be careful if you are counting
up some persistent counter or similar - the counter may be run twice if
reload happens on the node that does that.
startnode_input (str, optional): Send an input text to `startnode` as if
a user input text from a fictional previous node. When the server reloads,
the latest visited node will be re-run using this kwarg.
Kwargs:
any (any): All kwargs will become initialization variables on `caller._menutree`,
to be available at run.
Raises:
EvMenuError: If the start/end node is not found in menu tree.
Notes:
In persistent mode, all nodes, formatters and callbacks in
the menu must be possible to be *pickled*, this excludes
e.g. callables that are class methods or functions defined
dynamically or as part of another function. In
non-persistent mode no such restrictions exist.
"""
self._startnode = startnode
self._menutree = self._parse_menudata(menudata)
self._nodetext_formatter = nodetext_formatter
self._options_formatter = options_formatter
self._node_formatter = node_formatter
self._input_parser = input_parser
self._persistent = persistent
if startnode not in self._menutree:
raise EvMenuError("Start node '%s' not in menu tree!" % startnode)
# public variables made available to the command
self.caller = caller
self.auto_quit = auto_quit
self.auto_look = auto_look
self.auto_help = auto_help
if isinstance(cmd_on_exit, str):
self.cmd_on_exit = lambda caller, menu: caller.execute_cmd(cmd_on_exit)
elif callable(cmd_on_exit):
self.cmd_on_exit = cmd_on_exit
else:
self.cmd_on_exit = None
self.default = None
self.nodetext = None
self.helptext = None
self.options = None
# assign kwargs as initialization vars on ourselves.
if set(("_startnode", "_menutree", "_nodetext_formatter", "_options_formatter",
"node_formatter", "_input_parser", "_peristent", "cmd_on_exit", "default",
"nodetext", "helptext", "options")).intersection(set(kwargs.keys())):
raise RuntimeError("One or more of the EvMenu `**kwargs` is reserved by EvMenu for internal use.")
for key, val in kwargs.iteritems():
setattr(self, key, val)
# store ourself on the object
self.caller.ndb._menutree = self
if persistent:
# save the menu to the database
try:
caller.attributes.add("_menutree_saved",
((menudata, ),
{"startnode": startnode,
"cmdset_mergetype": cmdset_mergetype,
"cmdset_priority": cmdset_priority,
"auto_quit": auto_quit, "auto_look": auto_look, "auto_help": auto_help,
"cmd_on_exit": cmd_on_exit,
"nodetext_formatter": nodetext_formatter, "options_formatter": options_formatter,
"node_formatter": node_formatter, "input_parser": input_parser,
"persistent": persistent,}))
caller.attributes.add("_menutree_saved_startnode", (startnode, startnode_input))
except Exception as err:
caller.msg(_ERROR_PERSISTENT_SAVING.format(error=err))
logger.log_trace(_TRACE_PERSISTENT_SAVING)
persistent = False
# set up the menu command on the caller
menu_cmdset = EvMenuCmdSet()
menu_cmdset.mergetype = str(cmdset_mergetype).lower().capitalize() or "Replace"
menu_cmdset.priority = int(cmdset_priority)
self.caller.cmdset.add(menu_cmdset, permanent=persistent)
# start the menu
self.goto(self._startnode, startnode_input)
def _parse_menudata(self, menudata):
"""
Parse a menufile for node functions and store in dictionary
map. Alternatively, accept a pre-made mapping dictionary of
node functions.
Args:
menudata (str, module or dict): The python.path to the menufile,
or the python module itself. If a dict, this should be a
mapping nodename:callable, where the callable must match
the criteria for a menu node.
Returns:
menutree (dict): A {nodekey: func}
"""
if isinstance(menudata, dict):
# This is assumed to be a pre-loaded menu tree.
return menudata
else:
# a python path of a module
module = mod_import(menudata)
return dict((key, func) for key, func in module.__dict__.items()
if isfunction(func) and not key.startswith("_"))
def _format_node(self, nodetext, optionlist):
"""
Format the node text + option section
Args:
nodetext (str): The node text
optionlist (list): List of (key, desc) pairs.
Returns:
string (str): The options section, including
all needed spaces.
Notes:
This will adjust the columns of the options, first to use
a maxiumum of 4 rows (expanding in columns), then gradually
growing to make use of the screen space.
"""
# handle the node text
nodetext = self._nodetext_formatter(nodetext, len(optionlist), self.caller)
# handle the options
optionstext = self._options_formatter(optionlist, self.caller)
# format the entire node
return self._node_formatter(nodetext, optionstext, self.caller)
def _execute_node(self, nodename, raw_string):
"""
Execute a node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
Returns:
nodetext, options (tuple): The node text (a string or a
tuple and the options tuple, if any.
"""
try:
node = self._menutree[nodename]
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
try:
# the node should return data as (text, options)
if len(getargspec(node).args) > 1:
# a node accepting raw_string
nodetext, options = node(self.caller, raw_string)
else:
# a normal node, only accepting caller
nodetext, options = node(self.caller)
except KeyError:
self.caller.msg(_ERR_NOT_IMPLEMENTED.format(nodename=nodename))
raise EvMenuError
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
return nodetext, options
def display_nodetext(self):
self.caller.msg(self.nodetext)
def display_helptext(self):
self.caller.msg(self.helptext)
def callback_goto(self, callback, goto, raw_string):
if callback:
self.callback(callback, raw_string)
if goto:
self.goto(goto, raw_string)
def callback(self, nodename, raw_string):
"""
Run a node as a callback. This makes no use of the return
values from the node.
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
if callable(nodename):
# this is a direct callable - execute it directly
try:
if len(getargspec(nodename).args) > 1:
# callable accepting raw_string
nodename(self.caller, raw_string)
else:
# normal callable, only the caller as arg
nodename(self.caller)
except Exception:
self.caller.msg(_ERR_GENERAL.format(nodename=nodename))
raise
else:
# nodename is a string; lookup as node
try:
# execute the node; we make no use of the return values here.
self._execute_node(nodename, raw_string)
except EvMenuError:
return
def goto(self, nodename, raw_string):
"""
Run a node by name
Args:
nodename (str): Name of node.
raw_string (str): The raw default string entered on the
previous node (only used if the node accepts it as an
argument)
"""
try:
# execute the node, make use of the returns.
nodetext, options = self._execute_node(nodename, raw_string)
except EvMenuError:
return
if self._persistent:
self.caller.attributes.add("_menutree_saved_startnode", (nodename, raw_string))
# validation of the node return values
helptext = ""
if hasattr(nodetext, "__iter__"):
if len(nodetext) > 1:
nodetext, helptext = nodetext[:2]
else:
nodetext = nodetext[0]
nodetext = "" if nodetext is None else str(nodetext)
options = [options] if isinstance(options, dict) else options
# this will be displayed in the given order
display_options = []
# this is used for lookup
self.options = {}
self.default = None
if options:
for inum, dic in enumerate(options):
# fix up the option dicts
keys = make_iter(dic.get("key"))
if "_default" in keys:
keys = [key for key in keys if key != "_default"]
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
self.default = (goto, execute)
else:
keys = list(make_iter(dic.get("key", str(inum+1).strip())))
desc = dic.get("desc", dic.get("text", _ERR_NO_OPTION_DESC).strip())
goto, execute = dic.get("goto", None), dic.get("exec", None)
if keys:
display_options.append((keys[0], desc))
for key in keys:
if goto or execute:
self.options[strip_ansi(key).strip().lower()] = (goto, execute)
self.nodetext = self._format_node(nodetext, display_options)
# handle the helptext
if helptext:
self.helptext = helptext
elif options:
self.helptext = _HELP_FULL if self.auto_quit else _HELP_NO_QUIT
else:
self.helptext = _HELP_NO_OPTIONS if self.auto_quit else _HELP_NO_OPTIONS_NO_QUIT
self.display_nodetext()
def close_menu(self):
"""
Shutdown menu; occurs when reaching the end node or using the quit command.
"""
self.caller.cmdset.remove(EvMenuCmdSet)
del self.caller.ndb._menutree
if self._persistent:
self.caller.attributes.remove("_menutree_saved")
self.caller.attributes.remove("_menutree_saved_startnode")
if self.cmd_on_exit is not None:
self.cmd_on_exit(self.caller, self)
# -------------------------------------------------------------------------------------------------
#
# Simple input shortcuts
#
# -------------------------------------------------------------------------------------------------
class CmdGetInput(Command):
"""
Enter your data and press return.
"""
key = _CMD_NOMATCH
aliases = _CMD_NOINPUT
def func(self):
"This is called when user enters anything."
caller = self.caller
callback = caller.ndb._getinputcallback
if not callback:
# this can be happen if called from a player-command when IC
caller = self.player
callback = caller.ndb._getinputcallback
if not callback:
raise RuntimeError("No input callback found.")
prompt = caller.ndb._getinputprompt
result = self.raw_string.strip() # we strip the ending line break caused by sending
ok = not callback(caller, prompt, result)
if ok:
# only clear the state if the callback does not return
# anything
del caller.ndb._getinputcallback
del caller.ndb._getinputprompt
caller.cmdset.remove(InputCmdSet)
class InputCmdSet(CmdSet):
"""
This stores the input command
"""
key = "input_cmdset"
priority = 1
mergetype = "Replace"
no_objs = True
no_exits = True
no_channels = False
def at_cmdset_creation(self):
"called once at creation"
self.add(CmdGetInput())
def get_input(caller, prompt, callback):
"""
This is a helper function for easily request input from
the caller.
Args:
caller (Player or Object): The entity being asked
the question. This should usually be an object
controlled by a user.
prompt (str): This text will be shown to the user,
in order to let them know their input is needed.
callback (callable): A function that will be called
when the user enters a reply. It must take three
arguments: the `caller`, the `prompt` text and the
`result` of the input given by the user. If the
callback doesn't return anything or return False,
the input prompt will be cleaned up and exited. If
returning True, the prompt will remain and continue to
accept input.
Raises:
RuntimeError: If the given callback is not callable.
Notes:
The result value sent to the callback is raw and not
processed in any way. This means that you will get
the ending line return character from most types of
client inputs. So make sure to strip that before
doing a comparison.
"""
if not callable(callback):
raise RuntimeError("get_input: input callback is not callable.")
caller.ndb._getinputcallback = callback
caller.ndb._getinputprompt = prompt
caller.cmdset.add(InputCmdSet)
caller.msg(prompt)
#------------------------------------------------------------
#
# test menu strucure and testing command
#
#------------------------------------------------------------
def test_start_node(caller):
menu = caller.ndb._menutree
text = """
This is an example menu.
If you enter anything except the valid options, your input will be
recorded and you will be brought to a menu entry showing your
input.
Select options or use 'quit' to exit the menu.
The menu was initialized with two variables: %s and %s.
""" % (menu.testval, menu.testval2)
options = ({"key": ("{yS{net", "s"),
"desc": "Set an attribute on yourself.",
"exec": lambda caller: caller.attributes.add("menuattrtest", "Test value"),
"goto": "test_set_node"},
{"key": ("{yL{nook", "l"),
"desc": "Look and see a custom message.",
"goto": "test_look_node"},
{"key": ("{yV{niew", "v"),
"desc": "View your own name",
"goto": "test_view_node"},
{"key": ("{yQ{nuit", "quit", "q", "Q"),
"desc": "Quit this menu example.",
"goto": "test_end_node"},
{"key": "_default",
"goto": "test_displayinput_node"})
return text, options
def test_look_node(caller):
text = ""
options = {"key": ("{yL{nook", "l"),
"desc": "Go back to the previous menu.",
"goto": "test_start_node"}
return text, options
def test_set_node(caller):
text = ("""
The attribute 'menuattrtest' was set to
{w%s{n
(check it with examine after quitting the menu).
This node's has only one option, and one of its key aliases is the
string "_default", meaning it will catch any input, in this case
to return to the main menu. So you can e.g. press <return> to go
back now.
""" % caller.db.menuattrtest,
# optional help text for this node
"""
This is the help entry for this node. It is created by returning
the node text as a tuple - the second string in that tuple will be
used as the help text.
""")
options = {"key": ("back (default)", "_default"),
"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_view_node(caller):
text = """
Your name is {g%s{n!
click |lclook|lthere|le to trigger a look command under MXP.
This node's option has no explicit key (nor the "_default" key
set), and so gets assigned a number automatically. You can infact
-always- use numbers (1...N) to refer to listed options also if you
don't see a string option key (try it!).
""" % caller.key
options = {"desc": "back to main",
"goto": "test_start_node"}
return text, options
def test_displayinput_node(caller, raw_string):
text = """
You entered the text:
"{w%s{n"
... which could now be handled or stored here in some way if this
was not just an example.
This node has an option with a single alias "_default", which
makes it hidden from view. It catches all input (except the
in-menu help/quit commands) and will, in this case, bring you back
to the start node.
""" % raw_string
options = {"key": "_default",
"goto": "test_start_node"}
return text, options
def test_end_node(caller):
text = """
This is the end of the menu and since it has no options the menu
will exit here, followed by a call of the "look" command.
"""
return text, None
class CmdTestMenu(Command):
"""
Test menu
Usage:
testmenu <menumodule>
Starts a demo menu from a menu node definition module.
"""
key = "testmenu"
def func(self):
if not self.args:
self.caller.msg("Usage: testmenu menumodule")
return
# start menu
EvMenu(self.caller, self.args.strip(), startnode="test_start_node", persistent=True, cmdset_mergetype="Replace",
testval="val", testval2="val2")
|
titeuf87/evennia
|
evennia/utils/evmenu.py
|
Python
|
bsd-3-clause
| 39,605
|
[
"VisIt"
] |
45cb217936ef02c734b14ca541a36c8e2ddbe755eb29c5d07a23dc7f3d3fe31a
|
'''
hw4_2.py
A single cell driven by synaptic inputs.
Written by Sungho Hong, Computational Neuroscience Unit, OIST, 2017
'''
from neuron import h, gui
h.load_file("stdrun.hoc")
h.load_file("CNSutils.hoc") # CNSsaveVectors
# The simulation will run for 100 ms.
h.tstop = 300
# Global sampling period will be 0.1 ms -> 10 kHz sampling..
Dt = 0.1
# Creating a cell
soma = h.Section()
soma.diam = 100/h.PI
soma.L = 100 # With these, the membrane area = 1e-4 cm^2
soma.insert("pas")
soma.g_pas = 5e-5 # This makes tau = 20 ms
soma.e_pas = -70 # Reversal potential
# Add an excitatory and inhibitory synapse to the cells
syns = []
# 1 excitatory synapses
syns.append(h.Exp2Syn(0.5, sec=soma))
syns[-1].tau1 = 0.1 # rise time
syns[-1].tau2 = 1.25 # decay time
syns[-1].e = 0 # reversal potential
# 1 inhibitory synapses
syns.append(h.Exp2Syn(0.5, sec=soma))
syns[-1].tau1 = 1 # rise time
syns[-1].tau2 = 10 # decay time
syns[-1].e = -75 # reversal potential
# Define 100 excitatory ahd 20 inhibitory synapses firing at 10 Hz
Nexc = 100 # Number of excitatory stimuli
Ninh = 20 # Number of inhibitory stimuli
fexc = 10. # Average firing rate of excitatory inputs
finh = 0.001 # Average firing rate of inhibitory inputs
stims = []
# Define Nexc excitatory stimuli
for i in range(Nexc):
stims.append(h.NetStimFD(0.5))
stims[-1].noise = 1 # random firing
stims[-1].start = 0
stims[-1].duration = h.tstop
stims[-1].interval = 1e3/fexc
# Define Ninh inhibitory stimuli
for i in range(Ninh):
stims.append(h.NetStimFD(0.5))
stims[-1].noise = 1 # random firing
stims[-1].start = 0
stims[-1].duration = h.tstop
stims[-1].interval = 1e3/finh
stims[0].seed(1) # Set seed for noise
# Make connections
ncs = []
for i in range(Nexc):
ncs.append(h.NetCon(stims[i], syns[0]))
ncs[-1].weight[0] = 1e-3 # 1 nS
for i in range(Ninh):
ncs.append(h.NetCon(stims[i+Nexc], syns[1]))
ncs[-1].weight[0] = 0
# Insert a current clamp to probe the neuron
# ic = h.IClamp(0.5, sec=soma)
# ic.delay = 50
# ic.amp = 0.1
# ic.dur = 150
# Open the GUI
h.xopen("week2.ses")
# Set up the recording for membrane potential
vtemp = h.Vector()
vtemp.record(soma(0.5)._ref_v, Dt)
# For repeated run
vrecords = h.List()
Nrepeat = 100
for i in range(Nrepeat):
h.v_init = -70
h.init()
h.run()
vrecords.append(vtemp.c() )
print "Simulation:", i
h.CNSsaveListOfVectors("voltages_hw4_2.csv", Dt, vrecords)
|
shhong/a310_cns_2017
|
Assignment_4/hw4_2.py
|
Python
|
gpl-3.0
| 2,504
|
[
"NEURON"
] |
b2f755c75c2fb82ea050accd3d2f585babf91f9565a9084628ab572f8b494887
|
"""
=============================================
Integration and ODEs (:mod:`scipy.integrate`)
=============================================
.. currentmodule:: scipy.integrate
Integrating functions, given function object
============================================
.. autosummary::
:toctree: generated/
quad -- General purpose integration.
dblquad -- General purpose double integration.
tplquad -- General purpose triple integration.
fixed_quad -- Integrate func(x) using Gaussian quadrature of order n.
quadrature -- Integrate with given tolerance using Gaussian quadrature.
romberg -- Integrate func using Romberg integration.
Integrating functions, given fixed samples
==========================================
.. autosummary::
:toctree: generated/
trapz -- Use trapezoidal rule to compute integral from samples.
cumtrapz -- Use trapezoidal rule to cumulatively compute integral.
simps -- Use Simpson's rule to compute integral from samples.
romb -- Use Romberg Integration to compute integral from
-- (2**k + 1) evenly-spaced samples.
.. seealso::
:mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
quadrature roots and weights for other weighting factors and regions.
Integrators of ODE systems
==========================
.. autosummary::
:toctree: generated/
odeint -- General integration of ordinary differential equations.
ode -- Integrate ODE using VODE and ZVODE routines.
complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
"""
from quadrature import *
from odepack import *
from quadpack import *
from _ode import *
__all__ = filter(lambda s:not s.startswith('_'),dir())
from numpy.testing import Tester
test = Tester().test
|
teoliphant/scipy
|
scipy/integrate/__init__.py
|
Python
|
bsd-3-clause
| 1,850
|
[
"Gaussian"
] |
be24f8b451d8b7d2002ef186d66697988e0abb72d88091019a17d2538ed64ce6
|
"""
KeepNote
General rich text editor that saves to HTML
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import codecs
import gettext
import sys
import os
import tempfile
import re
import random
import urllib2
import StringIO
from itertools import chain
# pygtk imports
import pygtk
pygtk.require('2.0')
import gtk, gobject, pango
from gtk import gdk
import gtk.keysyms # this is necessary for py2exe discovery
# try to import spell check
try:
import gtkspell
except ImportError:
gtkspell = None
# textbuffer_tools imports
from .textbuffer_tools import \
iter_buffer_contents, iter_buffer_anchors, sanitize_text
# richtextbuffer imports
from .richtextbuffer import \
ignore_tag, \
add_child_to_buffer, \
RichTextBuffer, \
RichTextImage, \
RichTextIndentTag
# tag imports
from .richtext_tags import \
RichTextModTag, \
RichTextJustifyTag, \
RichTextFamilyTag, \
RichTextSizeTag, \
RichTextFGColorTag, \
RichTextBGColorTag, \
RichTextIndentTag, \
RichTextBulletTag, \
RichTextLinkTag, \
get_text_scale, \
set_text_scale
# richtext io
from .richtext_html import HtmlBuffer, HtmlError
from keepnote import safefile
import keepnote
_ = keepnote.translate
#=============================================================================
# constants
DEFAULT_FONT = "Sans 10"
TEXTVIEW_MARGIN = 5
if keepnote.get_platform() == "darwin":
CLIPBOARD_NAME = gdk.SELECTION_PRIMARY
else:
CLIPBOARD_NAME = "CLIPBOARD"
RICHTEXT_ID = -3 # application defined integer for the clipboard
CONTEXT_MENU_ACCEL_PATH = "<main>/richtext_context_menu"
# mime types
# richtext mime type is process specific
MIME_RICHTEXT = "application/x-richtext" + str(random.randint(1, 100000))
MIME_IMAGES = ["image/png",
"image/bmp",
"image/jpeg",
"image/xpm",
# Mac OS X MIME types
"public.png",
"public.bmp",
"public.jpeg",
"public.xpm"]
# TODO: add more text MIME types?
MIME_TEXT = ["text/plain",
"text/plain;charset=utf-8",
"text/plain;charset=UTF-8",
"UTF8_STRING",
"STRING",
"COMPOUND_TEXT",
"TEXT"]
MIME_HTML = ["HTML Format",
"text/html"]
# globals
_g_clipboard_contents = None
def parse_font(fontstr):
"""Parse a font string from the font chooser"""
tokens = fontstr.split(" ")
size = int(tokens.pop())
mods = []
# NOTE: underline is not part of the font string and is handled separately
while tokens[-1] in ["Bold", "Italic"]:
mods.append(tokens.pop().lower())
return " ".join(tokens), mods, size
def parse_utf(text):
# TODO: lookup the standard way to do this
if text[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE) or (
len(text) > 1 and text[1] == '\x00') or (
len(text) > 3 and text[3] == '\x00'):
return text.decode("utf16")
else:
text = text.replace("\x00", "")
return unicode(text, "utf8")
def is_relative_file(filename):
"""Returns True if filename is relative"""
return (not re.match("[^:/]+://", filename) and
not os.path.isabs(filename))
#=============================================================================
class RichTextError (StandardError):
"""Class for errors with RichText"""
# NOTE: this is only used for saving and loading in textview
# should this stay here?
def __init__(self, msg, error):
StandardError.__init__(self, msg)
self.msg = msg
self.error = error
def __str__(self):
if self.error:
return str(self.error) + "\n" + self.msg
else:
return self.msg
class RichTextMenu (gtk.Menu):
"""A popup menu for child widgets in a RichTextView"""
def __inti__(self):
gkt.Menu.__init__(self)
self._child = None
def set_child(self, child):
self._child = child
def get_child(self):
return self._child
class RichTextIO (object):
"""Read/Writes the contents of a RichTextBuffer to disk"""
def __init__(self):
self._html_buffer = HtmlBuffer()
def save(self, textbuffer, filename, title=None, stream=None):
"""
Save buffer contents to file
textbuffer -- richtextbuffer to save
filename -- HTML filename to save to (optional if stream given)
title -- title of HTML file (optional)
stream -- output stream for HTML file (optional)
"""
print "help"
self._save_images(textbuffer, filename)
try:
buffer_contents = iter_buffer_contents(
textbuffer, None, None, ignore_tag)
if stream:
out = stream
else:
out = safefile.open(filename, "wb", codec="utf-8")
self._html_buffer.set_output(out)
self._html_buffer.write(buffer_contents,
textbuffer.tag_table,
title=title)
out.close()
except IOError, e:
raise RichTextError("Could not save '%s'." % filename, e)
textbuffer.set_modified(False)
def load(self, textview, textbuffer, filename, stream=None):
"""
Load buffer with data from file
textbuffer -- richtextbuffer to load
filename -- HTML filename to load (optional if stream given)
stream -- output stream for HTML file (optional)
"""
# unhook expensive callbacks
textbuffer.block_signals()
if textview:
spell = textview.is_spell_check_enabled()
textview.enable_spell_check(False)
textview.set_buffer(None)
# clear buffer
textbuffer.clear()
err = None
try:
if stream:
infile = stream
else:
infile = safefile.open(filename, "r", codec="utf-8")
buffer_contents = self._html_buffer.read(infile)
textbuffer.insert_contents(buffer_contents,
textbuffer.get_start_iter())
infile.close()
# put cursor at begining
textbuffer.place_cursor(textbuffer.get_start_iter())
except (HtmlError, IOError, Exception), e:
err = e
textbuffer.clear()
if textview:
textview.set_buffer(textbuffer)
ret = False
else:
# finish loading
self._load_images(textbuffer, filename)
if textview:
textview.set_buffer(textbuffer)
textview.show_all()
ret = True
# rehook up callbacks
textbuffer.unblock_signals()
if textview:
textview.enable_spell_check(spell)
textview.enable()
textbuffer.set_modified(False)
# reraise error
if not ret:
raise RichTextError("Error loading '%s'." % filename, e)
def _load_images(self, textbuffer, html_filename):
"""Load images present in textbuffer"""
for kind, it, param in iter_buffer_anchors(textbuffer, None, None):
child, widgets = param
if isinstance(child, RichTextImage):
self._load_image(textbuffer, child, html_filename)
def _save_images(self, textbuffer, html_filename):
"""Save images present in text buffer"""
for kind, it, param in iter_buffer_anchors(textbuffer, None, None):
child, widgets = param
print "writing images"
if isinstance(child, RichTextImage):
self._save_image(textbuffer, child, html_filename)
def _load_image(self, textbuffer, image, html_filename):
image.set_from_file(
self._get_filename(html_filename, image.get_filename()))
def _save_image(self, textbuffer, image, html_filename):
if child.save_needed():
print "writing"
image.write(self._get_filename(html_filename, image.get_filename()))
def _get_filename(self, html_filename, filename):
if is_relative_file(filename):
path = os.path.dirname(html_filename)
return os.path.join(path, filename)
return filename
class RichTextDragDrop (object):
"""Manages drag and drop events for a richtext editor"""
def __init__(self, targets=[]):
self._acceptable_targets = []
self._acceptable_targets.extend(targets)
def append_target(self, target):
self._acceptable_targets.append(target)
def extend_targets(self, targets):
self._acceptable_targets.extend(target)
def find_acceptable_target(self, targets):
for target in self._acceptable_targets:
if target in targets:
return target
return None
class RichTextView (gtk.TextView):
"""A RichText editor widget"""
def __init__(self, textbuffer=None):
gtk.TextView.__init__(self, textbuffer)
self._textbuffer = None
self._buffer_callbacks = []
self._blank_buffer = RichTextBuffer()
self._popup_menu = None
self._html_buffer = HtmlBuffer()
self._accel_group = None
self._accel_path = CONTEXT_MENU_ACCEL_PATH
self.dragdrop = RichTextDragDrop(MIME_IMAGES + ["text/uri-list"] +
MIME_HTML + MIME_TEXT)
if textbuffer is None:
textbuffer = RichTextBuffer()
self.set_buffer(textbuffer)
self.set_default_font(DEFAULT_FONT)
# spell checker
self._spell_checker = None
self.enable_spell_check(True)
# signals
self.set_wrap_mode(gtk.WRAP_WORD)
self.set_property("right-margin", TEXTVIEW_MARGIN)
self.set_property("left-margin", TEXTVIEW_MARGIN)
self.connect("key-press-event", self.on_key_press_event)
#self.connect("insert-at-cursor", self.on_insert_at_cursor)
self.connect("backspace", self.on_backspace)
self.connect("button-press-event", self.on_button_press)
# drag and drop
self.connect("drag-data-received", self.on_drag_data_received)
self.connect("drag-motion", self.on_drag_motion)
self.connect("drag-data-get", self.on_drag_data_get)
self.drag_dest_add_image_targets()
# clipboard
self.connect("copy-clipboard", lambda w: self._on_copy())
self.connect("cut-clipboard", lambda w: self._on_cut())
self.connect("paste-clipboard", lambda w: self._on_paste())
#self.connect("button-press-event", self.on_button_press)
self.connect("populate-popup", self.on_popup)
# popup menus
self.init_menus()
# requires new pygtk
#self._textbuffer.register_serialize_format(MIME_TAKENOTE,
# self.serialize, None)
#self._textbuffer.register_deserialize_format(MIME_TAKENOTE,
# self.deserialize, None)
def init_menus(self):
"""Initialize popup menus"""
# image menu
self._image_menu = RichTextMenu()
self._image_menu.attach_to_widget(self, lambda w,m:None)
item = gtk.ImageMenuItem(gtk.STOCK_CUT)
item.connect("activate", lambda w: self.emit("cut-clipboard"))
self._image_menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_COPY)
item.connect("activate", lambda w: self.emit("copy-clipboard"))
self._image_menu.append(item)
item.show()
item = gtk.ImageMenuItem(gtk.STOCK_DELETE)
def func(widget):
if self._textbuffer:
self._textbuffer.delete_selection(True, True)
item.connect("activate", func)
self._image_menu.append(item)
item.show()
def set_buffer(self, textbuffer):
"""Attach this textview to a RichTextBuffer"""
# tell current buffer we are detached
if self._textbuffer:
for callback in self._buffer_callbacks:
self._textbuffer.disconnect(callback)
# change buffer
if textbuffer:
gtk.TextView.set_buffer(self, textbuffer)
else:
gtk.TextView.set_buffer(self, self._blank_buffer)
self._textbuffer = textbuffer
# tell new buffer we are attached
if self._textbuffer:
self._textbuffer.set_default_attr(self.get_default_attributes())
self._modified_id = self._textbuffer.connect(
"modified-changed", self._on_modified_changed)
self._buffer_callbacks = [
self._textbuffer.connect("font-change",
self._on_font_change),
self._textbuffer.connect("child-added",
self._on_child_added),
self._textbuffer.connect("child-activated",
self._on_child_activated),
self._textbuffer.connect("child-menu",
self._on_child_popup_menu),
self._modified_id
]
# add all deferred anchors
self._textbuffer.add_deferred_anchors(self)
def set_accel_group(self, accel_group):
self._accel_group = accel_group
def set_accel_path(self, accel_path):
self._accel_path = accel_path
#======================================================
# keyboard callbacks
def on_key_press_event(self, textview, event):
"""Callback from key press event"""
if self._textbuffer is None:
return
if event.keyval == gtk.keysyms.ISO_Left_Tab:
# shift+tab is pressed
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
# indent if there is a selection
if self._textbuffer.get_selection_bounds():
# tab at start of line should do unindentation
self.unindent()
return True
if event.keyval == gtk.keysyms.Tab:
# tab is pressed
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
# indent if cursor at start of paragraph or if there is a selection
if self._textbuffer.starts_par(it) or \
self._textbuffer.get_selection_bounds():
# tab at start of line should do indentation
self.indent()
return True
if event.keyval == gtk.keysyms.Delete:
# delete key pressed
# TODO: make sure selection with delete does not fracture
# unedititable regions.
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
if not self._textbuffer.get_selection_bounds() and \
self._textbuffer.starts_par(it) and \
not self._textbuffer.is_insert_allowed(it) and \
self._textbuffer.get_indent(it)[0] > 0:
# delete inside bullet phrase, removes bullet
self.toggle_bullet("none")
self.unindent()
return True
def on_backspace(self, textview):
"""Callback for backspace press"""
if not self._textbuffer:
return
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
if self._textbuffer.starts_par(it):
# look for indent tags
indent, par_type = self._textbuffer.get_indent()
if indent > 0:
self.unindent()
self.stop_emission("backspace")
#==============================================
# callbacks
def on_button_press(self, widget, event):
"""Process context popup menu"""
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS:
# double left click
x, y = self.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,
int(event.x), int(event.y))
it = self.get_iter_at_location(x, y)
if self.click_iter(it):
self.stop_emission("button-press-event")
def click_iter(self, it=None):
"""Perfrom click action at TextIter it"""
if not self._textbuffer:
return
if it is None:
it = self._textbuffer.get_insert_iter()
for tag in chain(it.get_tags(), it.get_toggled_tags(False)):
if isinstance(tag, RichTextLinkTag):
self.emit("visit-url", tag.get_href())
return True
return False
#=======================================================
# Drag and drop
def on_drag_motion(self, textview, drag_context, x, y, timestamp):
"""Callback for when dragging over textview"""
if not self._textbuffer:
return
target = self.dragdrop.find_acceptable_target(drag_context.targets)
if target:
textview.drag_dest_set_target_list([(target, 0, 0)])
def on_drag_data_received(self, widget, drag_context, x, y,
selection_data, info, eventtime):
"""Callback for when drop event is received"""
if not self._textbuffer:
return
#TODO: make this pluggable.
target = self.dragdrop.find_acceptable_target(drag_context.targets)
if target in MIME_IMAGES:
# process image drop
pixbuf = selection_data.get_pixbuf()
if pixbuf != None:
image = RichTextImage()
image.set_from_pixbuf(pixbuf)
self.insert_image(image)
drag_context.finish(True, True, eventtime)
self.stop_emission("drag-data-received")
elif target == "text/uri-list":
# process URI drop
uris = parse_utf(selection_data.data)
# remove empty lines and comments
uris = [x for x in (uri.strip()
for uri in uris.split("\n"))
if len(x) > 0 and x[0] != "#"]
links = ['<a href="%s">%s</a> ' % (uri, uri) for uri in uris]
# insert links
self.insert_html("<br />".join(links))
elif target in MIME_HTML:
# process html drop
html = parse_utf(selection_data.data)
if taget == "HTML Format":
# skip over headers
html = html[html.find("\r\n\r\n")+4:]
self.insert_html(html)
elif target in MIME_TEXT:
# process text drop
self._textbuffer.insert_at_cursor(selection_data.get_text())
def on_drag_data_get(self, widget, drag_context, selection_data,
info, timestamp):
"""
Callback for when data is requested by drag_get_data
"""
return
'''
# override gtk's data get code
self.stop_emission("drag-data-get")
sel = self._textbuffer.get_selection_bounds()
# do nothing if nothing is selected
if not sel:
text = ""
else:
start, end = sel
text = start.get_text(end)
print "get", repr(text)
selection_data.set_text(text.encode("utf8"), -1)
#self.emit("cut-clipboard")
'''
#==================================================================
# Copy and Paste
def _on_copy(self):
"""Callback for copy action"""
clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
self.stop_emission('copy-clipboard')
self.copy_clipboard(clipboard)
def _on_cut(self):
"""Callback for cut action"""
clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
self.stop_emission('cut-clipboard')
self.cut_clipboard(clipboard, self.get_editable())
def _on_paste(self):
"""Callback for paste action"""
clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
self.stop_emission('paste-clipboard')
self.paste_clipboard(clipboard, None, self.get_editable())
def copy_clipboard(self, clipboard):
"""Callback for copy event"""
#clipboard.set_can_store(None)
if not self._textbuffer:
return
sel = self._textbuffer.get_selection_bounds()
# do nothing if nothing is selected
if not sel:
return
start, end = sel
contents = list(self._textbuffer.copy_contents(start, end))
if len(contents) == 1 and \
contents[0][0] == "anchor" and \
isinstance(contents[0][2][0], RichTextImage):
# copy image
targets = [(MIME_RICHTEXT, gtk.TARGET_SAME_APP, RICHTEXT_ID)] + \
[(x, 0, RICHTEXT_ID) for x in MIME_HTML] + \
[(x, 0, RICHTEXT_ID) for x in MIME_IMAGES]
clipboard.set_with_data(targets, self._get_selection_data,
self._clear_selection_data,
(contents, ""))
else:
# copy text
targets = [(MIME_RICHTEXT, gtk.TARGET_SAME_APP, RICHTEXT_ID)] + \
[(x, 0, RICHTEXT_ID) for x in MIME_HTML] + \
[(x, 0, RICHTEXT_ID) for x in MIME_TEXT]
text = start.get_text(end)
clipboard.set_with_data(targets, self._get_selection_data,
self._clear_selection_data,
(contents, text))
def cut_clipboard(self, clipboard, default_editable):
"""Callback for cut event"""
if not self._textbuffer:
return
self.copy_clipboard(clipboard)
self._textbuffer.delete_selection(True, default_editable)
def paste_clipboard(self, clipboard, override_location, default_editable):
"""Callback for paste event"""
if not self._textbuffer:
return
targets = clipboard.wait_for_targets()
if targets is None:
# nothing on clipboard
return
targets = set(targets)
# check that insert is allowed
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
if not self._textbuffer.is_insert_allowed(it):
return
if MIME_RICHTEXT in targets:
# request RICHTEXT contents object
clipboard.request_contents(MIME_RICHTEXT, self._do_paste_object)
return
for mime_html in MIME_HTML:
if mime_html in targets:
# request HTML
if mime_html == "HTML Format":
clipboard.request_contents(mime_html,
self._do_paste_html_headers)
else:
clipboard.request_contents(mime_html, self._do_paste_html)
return
# test image formats
for mime_image in MIME_IMAGES:
if mime_image in targets:
clipboard.request_contents(mime_image, self._do_paste_image)
return
# request text
clipboard.request_text(self._do_paste_text)
def paste_clipboard_as_text(self):
"""Callback for paste action"""
clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
if not self._textbuffer:
return
targets = clipboard.wait_for_targets()
if targets is None:
# nothing on clipboard
return
# check that insert is allowed
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
if not self._textbuffer.is_insert_allowed(it):
return
# request text
clipboard.request_text(self._do_paste_text)
def _do_paste_text(self, clipboard, text, data):
"""Paste text into buffer"""
if text is None:
return
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(False, True)
self._textbuffer.insert_at_cursor(sanitize_text(text))
self._textbuffer.end_user_action()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
def _do_paste_html(self, clipboard, selection_data, data):
"""Paste HTML into buffer"""
html = parse_utf(selection_data.data)
try:
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(False, True)
self.insert_html(html)
self._textbuffer.end_user_action()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
except Exception, e:
pass
def _do_paste_html_headers(self, clipboard, selection_data, data):
"""Paste HTML into buffer"""
html = parse_utf(selection_data.data)
# skip over headers
index = html.find("<!--StartFragment")
if index == -1:
return
index = html.find(">", index)
html = html[index+1:]
try:
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(False, True)
self.insert_html(html)
self._textbuffer.end_user_action()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
except Exception, e:
pass
def _do_paste_image(self, clipboard, selection_data, data):
"""Paste image into buffer"""
pixbuf = selection_data.get_pixbuf()
image = RichTextImage()
image.set_from_pixbuf(pixbuf)
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(False, True)
self._textbuffer.insert_image(image)
self._textbuffer.end_user_action()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
def _do_paste_object(self, clipboard, selection_data, data):
"""Paste a program-specific object into buffer"""
if _g_clipboard_contents is None:
# do nothing
return
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(False, True)
self._textbuffer.insert_contents(_g_clipboard_contents)
self._textbuffer.end_user_action()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
def _get_selection_data(self, clipboard, selection_data, info, data):
"""Callback for when Clipboard needs selection data"""
global _g_clipboard_contents
contents, text = data
_g_clipboard_contents = contents
if MIME_RICHTEXT in selection_data.target:
# set rich text
selection_data.set(MIME_RICHTEXT, 8, "<richtext>")
elif len([x for x in MIME_HTML
if x in selection_data.target]) > 0:
# set html
stream = StringIO.StringIO()
self._html_buffer.set_output(stream)
self._html_buffer.write(contents,
self._textbuffer.tag_table,
partial=True,
xhtml=False)
selection_data.set("text/html", 8, stream.getvalue())
elif len([x for x in MIME_IMAGES
if x in selection_data.target]) > 0:
# set image
image = contents[0][2][0]
selection_data.set_pixbuf(image.get_original_pixbuf())
else:
# set plain text
selection_data.set_text(text)
def _clear_selection_data(self, clipboard, data):
"""Callback for when Clipboard contents are reset"""
global _g_clipboard_contents
_g_clipboard_contents = None
#=============================================
# State
def is_modified(self):
"""Returns True if buffer is modified"""
if self._textbuffer:
return self._textbuffer.get_modified()
else:
return False
def _on_modified_changed(self, textbuffer):
"""Callback for when buffer is modified"""
# propogate modified signal to listeners of this textview
self.emit("modified", textbuffer.get_modified())
def enable(self):
self.set_sensitive(True)
def disable(self):
"""Disable TextView"""
if self._textbuffer:
self._textbuffer.handler_block(self._modified_id)
self._textbuffer.clear()
self._textbuffer.set_modified(False)
self._textbuffer.handler_unblock(self._modified_id)
self.set_sensitive(False)
"""
def serialize(self, register_buf, content_buf, start, end, data):
print "serialize", content_buf
self.a = u"SERIALIZED"
return self.a
def deserialize(self, register_buf, content_buf, it, data, create_tags, udata):
print "deserialize"
"""
#=====================================================
# Popup Menus
def on_popup(self, textview, menu):
"""Popup menu for RichTextView"""
self._popup_menu = menu
# insert "paste as plain text" after paste
item = gtk.ImageMenuItem(stock_id=gtk.STOCK_PASTE,
accel_group=None)
item.child.set_text(_("Paste As Plain Text"))
item.connect("activate", lambda item: self.paste_clipboard_as_text())
#item.add_accelerator("activate", self._accel_group, ord("V"),
# gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK,
# gtk.ACCEL_VISIBLE)
item.show()
menu.insert(item, 3)
menu.set_accel_path(self._accel_path)
if self._accel_group:
menu.set_accel_group(self._accel_group)
def _on_child_popup_menu(self, textbuffer, child, button, activate_time):
"""Callback for when child menu should appear"""
self._image_menu.set_child(child)
# popup menu based on child widget
if isinstance(child, RichTextImage):
# image menu
self._image_menu.popup(None, None, None, button, activate_time)
self._image_menu.show()
def get_image_menu(self):
"""Returns the image popup menu"""
return self._image_menu
def get_popup_menu(self):
"""Returns the popup menu"""
return self._popup_menu
#==========================================
# child events
def _on_child_added(self, textbuffer, child):
"""Callback when child added to buffer"""
self._add_children()
def _on_child_activated(self, textbuffer, child):
"""Callback for when child has been activated"""
self.emit("child-activated", child)
#===========================================================
# Actions
def _add_children(self):
"""Add all deferred children in textbuffer"""
self._textbuffer.add_deferred_anchors(self)
def indent(self):
"""Indents selection one more level"""
if self._textbuffer:
self._textbuffer.indent()
def unindent(self):
"""Unindents selection one more level"""
if self._textbuffer:
self._textbuffer.unindent()
def insert_image(self, image, filename="image.png"):
"""Inserts an image into the textbuffer"""
if self._textbuffer:
self._textbuffer.insert_image(image, filename)
def insert_image_from_file(self, imgfile, filename="image.png"):
"""Inserts an image from a file"""
pixbuf = gdk.pixbuf_new_from_file(imgfile)
img = RichTextImage()
img.set_from_pixbuf(pixbuf)
self.insert_image(img, filename)
def insert_hr(self):
"""Inserts a horizontal rule"""
if self._textbuffer:
self._textbuffer.insert_hr()
def insert_html(self, html):
"""Insert HTML content into Buffer"""
if not self._textbuffer:
return
contents = list(self._html_buffer.read(StringIO.StringIO(html),
partial=True,
ignore_errors=True))
# scan contents
for kind, pos, param in contents:
# download images included in html
if kind == "anchor" and isinstance(param[0], RichTextImage):
img = param[0]
filename = img.get_filename()
if filename and (filename.startswith("http:") or
filename.startswith("file:")):
try:
img.set_from_url(filename, "image.png")
except:
# Be robust to errors from loading from the web.
pass
# add to buffer
self._textbuffer.insert_contents(contents)
def get_link(self, it=None):
if self._textbuffer is None:
return None, None, None
return self._textbuffer.get_link(it)
def set_link(self, url="", start=None, end=None):
if self._textbuffer is None:
return
if start is None or end is None:
tagname = RichTextLinkTag.tag_name(url)
self._apply_tag(tagname)
return self._textbuffer.tag_table.lookup(tagname)
else:
return self._textbuffer.set_link(url, start, end)
#==========================================================
# Find/Replace
def forward_search(self, it, text, case_sensitive, wrap=True):
"""Finds next occurrence of 'text' searching forwards"""
it = it.copy()
if not case_sensitive:
text = text.lower()
textlen = len(text)
while True:
end = it.copy()
end.forward_chars(textlen)
text2 = it.get_slice(end)
if not case_sensitive:
text2 = text2.lower()
if text2 == text:
return it, end
if not it.forward_char():
if wrap:
return self.forward_search(self._textbuffer.get_start_iter(),
text, case_sensitive, False)
else:
return None
def backward_search(self, it, text, case_sensitive, wrap=True):
"""Finds next occurrence of 'text' searching backwards"""
it = it.copy()
it.backward_char()
if not case_sensitive:
text = text.lower()
textlen = len(text)
while True:
end = it.copy()
end.forward_chars(textlen)
text2 = it.get_slice(end)
if not case_sensitive:
text2 = text2.lower()
if text2 == text:
return it, end
if not it.backward_char():
if wrap:
return self.backward_search(self._textbuffer.get_end_iter(),
text, case_sensitive, False)
else:
return None
def find(self, text, case_sensitive=False, forward=True, next=True):
"""Finds next occurrence of 'text'"""
if not self._textbuffer:
return
it = self._textbuffer.get_iter_at_mark(self._textbuffer.get_insert())
if forward:
if next:
it.forward_char()
result = self.forward_search(it, text, case_sensitive)
else:
result = self.backward_search(it, text, case_sensitive)
if result:
self._textbuffer.select_range(result[0], result[1])
self.scroll_mark_onscreen(self._textbuffer.get_insert())
return result[0].get_offset()
else:
return -1
def replace(self, text, replace_text,
case_sensitive=False, forward=True, next=False):
"""Replaces next occurrence of 'text' with 'replace_text'"""
if not self._textbuffer:
return
pos = self.find(text, case_sensitive, forward, next)
if pos != -1:
self._textbuffer.begin_user_action()
self._textbuffer.delete_selection(True, self.get_editable())
self._textbuffer.insert_at_cursor(replace_text)
self._textbuffer.end_user_action()
return pos
def replace_all(self, text, replace_text,
case_sensitive=False, forward=True):
"""Replaces all occurrences of 'text' with 'replace_text'"""
if not self._textbuffer:
return
found = False
self._textbuffer.begin_user_action()
while self.replace(text, replace_text, case_sensitive, forward, False) != -1:
found = True
self._textbuffer.end_user_action()
return found
#===========================================================
# Spell check
def can_spell_check(self):
"""Returns True if spelling is available"""
return gtkspell is not None
def enable_spell_check(self, enabled=True):
"""Enables/disables spell check"""
if not self.can_spell_check():
return
if enabled:
if self._spell_checker is None:
try:
self._spell_checker = gtkspell.Spell(self)
except Exception:
# unable to intialize spellcheck, abort
self._spell_checker = None
else:
if self._spell_checker is not None:
self._spell_checker.detach()
self._spell_checker = None
def is_spell_check_enabled(self):
"""Returns True if spell check is enabled"""
return self._spell_checker is not None
#===========================================================
# font manipulation
def _apply_tag(self, tag_name):
if self._textbuffer:
self._textbuffer.apply_tag_selected(
self._textbuffer.tag_table.lookup(tag_name))
def toggle_font_mod(self, mod):
"""Toggle a font modification"""
if self._textbuffer:
self._textbuffer.toggle_tag_selected(
self._textbuffer.tag_table.lookup(RichTextModTag.tag_name(mod)))
def set_font_mod(self, mod):
"""Sets a font modification"""
self._apply_tag(RichTextModTag.tag_name(mod))
def toggle_link(self):
"""Toggles a link tag"""
tag, start, end = self.get_link()
if not tag:
tag = self._textbuffer.tag_table.lookup(
RichTextLinkTag.tag_name(""))
self._textbuffer.toggle_tag_selected(tag)
def set_font_family(self, family):
"""Sets the family font of the selection"""
self._apply_tag(RichTextFamilyTag.tag_name(family))
def set_font_size(self, size):
"""Sets the font size of the selection"""
self._apply_tag(RichTextSizeTag.tag_name(size))
def set_justify(self, justify):
"""Sets the text justification"""
self._apply_tag(RichTextJustifyTag.tag_name(justify))
def set_font_fg_color(self, color):
"""Sets the text foreground color"""
if self._textbuffer:
if color:
self._textbuffer.toggle_tag_selected(
self._textbuffer.tag_table.lookup(
RichTextFGColorTag.tag_name(color)))
else:
self._textbuffer.remove_tag_class_selected(
self._textbuffer.tag_table.lookup(
RichTextFGColorTag.tag_name("#000000")))
def set_font_bg_color(self, color):
"""Sets the text background color"""
if self._textbuffer:
if color:
self._textbuffer.toggle_tag_selected(
self._textbuffer.tag_table.lookup(
RichTextBGColorTag.tag_name(color)))
else:
self._textbuffer.remove_tag_class_selected(
self._textbuffer.tag_table.lookup(
RichTextBGColorTag.tag_name("#000000")))
def toggle_bullet(self, par_type=None):
"""Toggle state of a bullet list"""
if self._textbuffer:
self._textbuffer.toggle_bullet_list(par_type)
def set_font(self, font_name):
"""Font change from choose font widget"""
if not self._textbuffer:
return
family, mods, size = parse_font(font_name)
self._textbuffer.begin_user_action()
# apply family and size tags
self.set_font_family(family)
self.set_font_size(size)
# apply modifications
for mod in mods:
self.set_font_mod(mod)
# disable modifications not given
mod_class = self._textbuffer.tag_table.get_tag_class("mod")
for tag in mod_class.tags:
if tag.get_property("name") not in mods:
self._textbuffer.remove_tag_selected(tag)
self._textbuffer.end_user_action()
#==================================================================
# UI Updating from changing font under cursor
def _on_font_change(self, textbuffer, font):
"""Callback for when font under cursor changes"""
# forward signal along to listeners
self.emit("font-change", font)
def get_font(self):
"""Get the font under the cursor"""
if self._textbuffer:
return self._textbuffer.get_font()
else:
return self._blank_buffer.get_font()
def set_default_font(self, font):
"""Sets the default font of the textview"""
try:
# HACK: fix small font sizes on Mac
#PIXELS_PER_PANGO_UNIT = 1024
#native_size = self.get_default_attributes().font.get_size() // PIXELS_PER_PANGO_UNIT
#set_text_scale(native_size / 10.0)
f = pango.FontDescription(font)
f.set_size(int(f.get_size() * get_text_scale()))
self.modify_font(f)
except:
# TODO: think about how to handle this error
pass
#=========================================
# undo/redo methods
def undo(self):
"""Undo the last action in the RichTextView"""
if self._textbuffer:
self._textbuffer.undo()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
def redo(self):
"""Redo the last action in the RichTextView"""
if self._textbuffer:
self._textbuffer.redo()
self.scroll_mark_onscreen(self._textbuffer.get_insert())
# register new signals
gobject.type_register(RichTextView)
gobject.signal_new("modified", RichTextView, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (bool,))
gobject.signal_new("font-change", RichTextView, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (object,))
gobject.signal_new("child-activated", RichTextView, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (object,))
gobject.signal_new("visit-url", RichTextView, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (str,))
'''
def drop_pdf(self, data):
"""Drop a PDF into the TextView"""
if not self._textbuffer:
return
# NOTE: requires hardcoded convert
# TODO: generalize
self._textbuffer.begin_user_action()
try:
f, imgfile = tempfile.mkstemp(".png", "pdf")
os.close(f)
out = os.popen("convert - %s" % imgfile, "wb")
out.write(data)
out.close()
name, ext = os.path.splitext(imgfile)
imgfile2 = name + "-0" + ext
if os.path.exists(imgfile2):
i = 0
while True:
imgfile = name + "-" + str(i) + ext
if not os.path.exists(imgfile):
break
self.insert_image_from_file(imgfile)
os.remove(imgfile)
i += 1
elif os.path.exists(imgfile):
self.insert_image_from_file(imgfile)
os.remove(imgfile)
except:
if os.path.exists(imgfile):
os.remove(imgfile)
self._textbuffer.end_user_action()
'''
|
reshadh/Keepnote-LaTeX
|
keepnote/gui/richtext/__init__.py
|
Python
|
gpl-2.0
| 46,902
|
[
"VisIt"
] |
46055e730cdb2dfa406687b90cf63d721b7f8828a638aff9152b5a54fbe934ac
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllMaskBits(vtk.test.Testing.vtkTest):
def testAllMaskBits(self):
# This script calculates the luminance of an image
renWin = vtk.vtkRenderWindow()
# Image pipeline
image1 = vtk.vtkTIFFReader()
image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif")
# "beach.tif" image contains ORIENTATION tag which is
# ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF
# reader parses this tag and sets the internal TIFF image
# orientation accordingly. To overwrite this orientation with a vtk
# convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke
# SetOrientationType method with parameter value of 4.
image1.SetOrientationType(4)
shrink = vtk.vtkImageShrink3D()
shrink.SetInputConnection(image1.GetOutputPort())
shrink.SetShrinkFactors(2, 2, 1)
operators = ["ByPass", "And", "Nand", "Xor", "Or", "Nor"]
operator = dict()
mapper = dict()
actor = dict()
imager = dict()
for idx, op in enumerate(operators):
if op != "ByPass":
operator.update({idx: vtk.vtkImageMaskBits()})
operator[idx].SetInputConnection(shrink.GetOutputPort())
eval('operator[' + str(idx) + '].SetOperationTo' + op + '()')
operator[idx].SetMasks(255, 255, 0)
mapper.update({idx: vtk.vtkImageMapper()})
if op != "ByPass":
mapper[idx].SetInputConnection(operator[idx].GetOutputPort())
else:
mapper[idx].SetInputConnection(shrink.GetOutputPort())
mapper[idx].SetColorWindow(255)
mapper[idx].SetColorLevel(127.5)
actor.update({idx: vtk.vtkActor2D()})
actor[idx].SetMapper(mapper[idx])
imager.update({idx: vtk.vtkRenderer()})
imager[idx].AddActor2D(actor[idx])
renWin.AddRenderer(imager[idx])
column = 0
row = 0
deltaX = 1.0 / 3.0
deltaY = 1.0 / 2.0
for idx in range(len(operators)):
imager[idx].SetViewport(column * deltaX, row * deltaY, (column + 1) * deltaX, (row + 1) * deltaY)
column += 1
if column > 2:
column = 0
row += 1
renWin.SetSize(384, 256)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllMaskBits.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllMaskBits, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestAllMaskBits.py
|
Python
|
gpl-3.0
| 3,568
|
[
"VTK"
] |
a6c0e881c9c7c73e76490ae6b697c9cbfcc375d03285b05797571998f4692ac4
|
import ssl
from sleekxmpp import ClientXMPP
from ShadowLogger import ShadowLogger
from Constants import Constants
class LoLChat(ClientXMPP):
users = []
def __init__(self, shadow, loldb, jid, password):
self.shadow = shadow
self.loldb = loldb
self.INTERCONTINENT = self.loldb.GetSetting("LoLRedbackID")
#Create the XMPP connection
ClientXMPP.__init__(self, jid + '@' + Constants.CHAT_SERVER + '/lolshadow', 'AIR_' + password)
#Automatically add new friends?
self.auto_authorize = None
self.auto_subscribe = True
self.ssl_version = ssl.PROTOCOL_SSLv3
# Add the event handlers
self.add_event_handler("session_start", self._session_start)
self.add_event_handler("message", self._message)
self.add_event_handler("got_online", self._got_online)
self.add_event_handler("got_offline", self._got_offline)
self.add_event_handler("presence_subscribe", self._presence_subscribe)
self.add_event_handler("presence_unsubscribe", self._presence_unsubscribe)
self.add_event_handler("disconnected", self._disconnected)
self.add_event_handler("failed_auth", self._failed_auth)
def Start(self):
address = (self._getChatAddress(), Constants.CHAT_ADDRESS_PORT)
ShadowLogger.ShadowInfo('Connecting to server...', self.shadow.model.SummonerName)
self.alive = True
self.connect(address, True, False, True)
self.process(block=False)
def Stop(self):
self.alive = False
self.disconnect(wait=False)
def SendMessage(self, target, message):
self.send_message(mto='sum'+target+'@'+Constants.CHAT_SERVER+'/xiff', mbody=message, mtype='chat')
def UnFriend(self, summoner_id):
self.sendPresence(pto=self._getToId(summoner_id), ptype='unsubscribed')
self.sendPresence(pto=self._getToId(summoner_id), ptype='unsubscribe')
def _session_start(self, event):
self.send_presence(-1, self._getPresenceString(self.shadow.model.Message))
self.get_roster()
def _message(self, msg):
if msg['type'] in ('chat', 'normal'):
ShadowLogger.ShadowInfo('Recieved message (%s): %s' % (str(msg['from']), str(msg['body'])), self.shadow.model.SummonerName)
msgResponse = self._processMessage(str(msg['body']), self._getSummonerId(str(msg['from'])))
if(msgResponse is not None):
msg.reply(msgResponse).send()
def _got_online(self, presence):
ShadowLogger.ShadowInfo('Friend Online: ' + str(presence['from']), self.shadow.model.SummonerName)
newUser = self._getSummonerId(str(presence['from']))
self.shadow.UserOn(newUser)
def _got_offline(self, presence):
ShadowLogger.ShadowInfo('Friend Offline: ' + str(presence['from']), self.shadow.model.SummonerName)
newUser = self._getSummonerId(str(presence['from']))
self.shadow.UserOff(newUser)
def _presence_subscribe(self, presence):
requestor = self._getSummonerId(str(presence['from']))
toAccept = self.loldb.CheckUserReserved(requestor, self.shadow.model.ID)
ShadowLogger.ShadowInfo('Friendship Requested: %s : %s' % (str(requestor), str(toAccept)), self.shadow.model.SummonerName)
if(toAccept):
self.sendPresence(pto=presence['from'], ptype='subscribed')
self.sendPresence(pto=presence['from'], ptype='subscribe')
self.loldb.AddShadowFriend(requestor, self.shadow.model.ID)
self.SendMessage(requestor, "Summoner successfully added. Welcome to LoLShadow!")
else:
self.sendPresence(pto=presence['from'], ptype='unsubscribed')
self.sendPresence(pto=presence['from'], ptype='unsubscribe')
def _presence_unsubscribe(self, presence):
requestor = self._getSummonerId(str(presence['from']))
self.loldb.RemoveShadowFriend(requestor, self.shadow.model.ID)
self.shadow.StopUser(requestor)
ShadowLogger.ShadowInfo('Deleted Friend: %s' % (str(presence['from'])), self.shadow.model.SummonerName)
def _disconnected(self):
if self.alive:
self.shadow.Restart()
def _failed_auth(self, error):
ShadowLogger.ShadowInfo('Failed to authenticate', self.shadow.model.SummonerName)
ShadowLogger.ShadowDebug(error, self.shadow.model.SummonerName)
def _getChatAddress(self):
chatCode = (self.loldb.GetShadowRegion(self.shadow.model.ID).RegionChat)
return (Constants.CHAT_ADDRESS % chatCode)
def _getSummonerId(self, fromID):
return fromID.split('@',1)[0].replace('sum','')
def _getToId(self, summoner_id):
return "sum"+summoner_id+"@pvp.net/xiff"
def _getPresenceString(self, message):
return '<body><profileIcon>668</profileIcon><level>30</level><wins>0</wins><leaves>0</leaves>'+\
'<queueType>RANKED_SOLO_5x5</queueType><rankedWins>5</rankedWins><rankedLosses>0</rankedLosses>'+\
'<rankedRating>0</rankedRating><statusMsg>'+message+\
'</statusMsg><gameStatus>outOfGame</gameStatus><tier>CHALLENGER</tier></body>'
def _processMessage(self, message_body, sender):
if 'help' in message_body:
return 'Need help? This is the LoLShadow bot run by Redback (or Intercontinent).'+\
' For more information, visit http://lolshadow.com/'
firstChar = message_body[0]
if(firstChar is '!'):
#Commands go here
split = message_body[1:].split(' ')
if(split[0].lower() == 'hello'):
return 'Hi!'
elif (split[0].lower() == 'info'):
return 'This is the LoLShadow bot run by Redback (or Intercontinent).'+\
' For more information, visit http://lolshadow.com/'
elif sender == self.INTERCONTINENT:
if split[0].lower() == 'message' and len(split) >= 3:
newSplit = message_body[1:].split(' ', 2)
self.SendMessage(newSplit[1], newSplit[2])
elif split[0].lower() == 'broadcast' and len(split) >= 2:
newSplit = message_body[1:].split(' ', 1)
self.shadow.Broadcast(newSplit[1])
elif split[0].lower() == 'online':
self.SendMessage(self.INTERCONTINENT, 'Users Online: ' + ', '.join(str(x) for x in self.shadow.GetOnline()))
else:
self.SendMessage(self.INTERCONTINENT, sender + ': '+message_body)
|
RedbackThomson/LoLShadow
|
LoLChat.py
|
Python
|
mit
| 5,800
|
[
"VisIt"
] |
fb05c4d267c4c212478e6a95e0e46ada065e3a1af2ba8b83f125abc7df2e3059
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
from lxml import etree
import six
from testtools import content
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'key': self.key, 'd1_value': self.d1_value,
'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = sorted(d1keys - d2keys)
d2only = sorted(d2keys - d1keys)
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % {'path': self.path}
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLDocInfoMismatch(XMLMismatch):
"""XML version or encoding doesn't match."""
def __init__(self, state, expected_doc_info, actual_doc_info):
super(XMLDocInfoMismatch, self).__init__(state)
self.expected_doc_info = expected_doc_info
self.actual_doc_info = actual_doc_info
def describe(self):
return ("%(path)s: XML information mismatch(version, encoding) "
"expected version %(expected_version)s, "
"expected encoding %(expected_encoding)s; "
"actual version %(actual_version)s, "
"actual encoding %(actual_encoding)s" %
{'path': self.path,
'expected_version': self.expected_doc_info['version'],
'expected_encoding': self.expected_doc_info['encoding'],
'actual_version': self.actual_doc_info['version'],
'actual_encoding': self.actual_doc_info['encoding']})
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" %
{'path': self.path, 'idx': self.idx,
'expected_tag': self.expected_tag,
'actual_tag': self.actual_tag})
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" %
{'path': self.path, 'expected_only': self.expected_only,
'actual_only': self.actual_only})
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" %
{'path': self.path, 'key': self.key,
'expected_value': self.expected_value,
'actual_value': self.actual_value})
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" %
{'path': self.path, 'expected_text': self.expected_text,
'actual_text': self.actual_text})
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLMatchState(object):
"""Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction)
def __init__(self, expected, allow_mixed_nodes=False,
skip_empty_text_nodes=True, skip_values=('DONTCARE',)):
self.expected_xml = expected
self.expected = etree.parse(six.StringIO(expected))
self.allow_mixed_nodes = allow_mixed_nodes
self.skip_empty_text_nodes = skip_empty_text_nodes
self.skip_values = set(skip_values)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.parse(six.StringIO(actual_xml))
state = XMLMatchState(self.expected_xml, actual_xml)
expected_doc_info = self._get_xml_docinfo(self.expected)
actual_doc_info = self._get_xml_docinfo(actual)
if expected_doc_info != actual_doc_info:
return XMLDocInfoMismatch(state, expected_doc_info,
actual_doc_info)
result = self._compare_node(self.expected.getroot(),
actual.getroot(), state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
@staticmethod
def _get_xml_docinfo(xml_document):
return {'version': xml_document.docinfo.xml_version,
'encoding': xml_document.docinfo.encoding}
def _compare_text_nodes(self, expected, actual, state):
expected_text = [expected.text]
expected_text.extend(child.tail for child in expected)
actual_text = [actual.text]
actual_text.extend(child.tail for child in actual)
if self.skip_empty_text_nodes:
expected_text = [text for text in expected_text
if text and not text.isspace()]
actual_text = [text for text in actual_text
if text and not text.isspace()]
if self.skip_values.intersection(
expected_text + actual_text):
return
if self.allow_mixed_nodes:
# lets sort text nodes because they can be mixed
expected_text = sorted(expected_text)
actual_text = sorted(actual_text)
if expected_text != actual_text:
return XMLTextValueMismatch(state, expected_text, actual_text)
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if self.skip_values.intersection(
[expected_value, actual_value]):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare text nodes
text_nodes_mismatch = self._compare_text_nodes(
expected, actual, state)
if text_nodes_mismatch:
return text_nodes_mismatch
# Compare the contents of the node
matched_actual_child_idxs = set()
# first_actual_child_idx - pointer to next actual child
# used with allow_mixed_nodes=False ONLY
# prevent to visit actual child nodes twice
first_actual_child_idx = 0
for expected_child in expected:
if expected_child.tag in self.SKIP_TAGS:
continue
related_actual_child_idx = None
if self.allow_mixed_nodes:
first_actual_child_idx = 0
for actual_child_idx in range(
first_actual_child_idx, len(actual)):
if actual[actual_child_idx].tag in self.SKIP_TAGS:
first_actual_child_idx += 1
continue
if actual_child_idx in matched_actual_child_idxs:
continue
# Compare the nodes
result = self._compare_node(expected_child,
actual[actual_child_idx],
state, actual_child_idx)
first_actual_child_idx += 1
if result is not True:
if self.allow_mixed_nodes:
continue
else:
return result
else: # nodes match
related_actual_child_idx = actual_child_idx
break
if related_actual_child_idx is not None:
matched_actual_child_idxs.add(actual_child_idx)
else:
return XMLExpectedChild(state, expected_child.tag,
actual_child_idx + 1)
# Make sure we consumed all nodes in actual
for actual_child_idx, actual_child in enumerate(actual):
if (actual_child.tag not in self.SKIP_TAGS and
actual_child_idx not in matched_actual_child_idxs):
return XMLUnexpectedChild(state, actual_child.tag,
actual_child_idx)
# The nodes match
return True
|
scripnichenko/nova
|
nova/tests/unit/matchers.py
|
Python
|
apache-2.0
| 18,988
|
[
"VisIt"
] |
0e2c6f32d21553062b257d9dc966a323d511cbf17380661f9e69384cccc69417
|
import os
import cPickle
from sys import argv
ATOMS = ["H", "C", "N", "O", "S", "P"]
def read_charges(filename):
""" Reads NPA charges from an outputfile containing NPA/NBO output
"""
f = open(filename, "r")
lines = f.readlines()
f.close()
start_line = 0
end_line = 0
for i, line in enumerate(lines):
if "Atom No Charge Core Valence Rydberg Total" in line:
start_line = i+2
if "* Total *" in line:
end_line = i - 1
data = lines[start_line:end_line]
print data
charges = []
types = []
for atom in ATOMS:
for entry in data:
if atom in entry:
tokens = entry.split()
charge = float(tokens[2])
charges.append(charge)
types.append(tokens[0])
return charges, types
if __name__ == "__main__":
path = "gaussian/"
npa_charges = dict()
npa_types = dict()
listing = os.listdir(path)
for filename in sorted(listing):
if filename.endswith(".log"):
# print filename
charges, types = read_charges(path + filename)
npa_charges[filename] = charges
npa_types[filename] = types
# for i in range(len(charges)):
# print types[i], i+1, charges[i]
exit(0)
print npa_charges["133_pyridinium[+1].log"]
listing = os.listdir(path)
for filename in sorted(listing):
if filename.endswith(".log"):
print filename, npa_charges[filename]
f = open("charges_gaussian.pickle","wb")
cPickle.dump(npa_charges, f, protocol=2)
f.close()
# f = open("types.pickle","wb")
# cPickle.dump(npa_types, f, protocol=2)
# f.close()
|
andersx/dftbfit
|
scripts/parse_orca.py
|
Python
|
bsd-2-clause
| 1,786
|
[
"Gaussian"
] |
00271c4cbacc9f65069a1931125d637a13855228b98872649988038ebd56c297
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Modified parameters file for the Hybrid LFP scheme, applying the methodology with
the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
'''
import numpy as np
import os
import json
from mpi4py import MPI # this is needed to initialize other classes correctly
import multiprocessing as mp # to facilitate OpenMP parallelization w. NEST
# if MPI.SIZE == 1
###################################
# Initialization of MPI stuff #
###################################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
'''
TODO: rename to simulation_and_model_params.py
'''
####################################
# HELPER FUNCTIONS #
####################################
def flattenlist(lst): return sum(sum(lst, []), [])
####################################
# SPATIAL CONNECTIVITY EXTRACTION #
####################################
'''
Include functions that extract information from binzegger.json here
'''
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']):
'''
Extract frequency of occurrences of those cell types that are modeled.
The data set contains cell types that are not modeled (TCs etc.)
The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1
'''
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
occurr = []
for cell_type in y:
occurr += [data['data'][cell_type]['occurrence']]
return list(np.array(occurr) / np.sum(occurr))
def get_L_yXL(fname, y, x_in_X, L):
'''
compute the layer specificity, defined as:
::
L_yXL = k_yXL / k_yX
'''
def _get_L_yXL_per_yXL(fname, x_in_X, X_index,
y, layer):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# Get number of synapses
if layer in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
# init variables
k_yXL = 0
k_yX = 0
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][layer][x] / 100.
k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron']
k_yXL += p_yxL * k_yL
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
if k_yXL != 0.:
return k_yXL / k_yX
else:
return 0.
else:
return 0.
# init dict
L_yXL = {}
# iterate over postsynaptic cell types
for y_value in y:
# container
data = np.zeros((len(L), len(x_in_X)))
# iterate over lamina
for i, Li in enumerate(L):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
data[i][j] = _get_L_yXL_per_yXL(fname, x_in_X,
X_index=j,
y=y_value,
layer=Li)
L_yXL[y_value] = data
return L_yXL
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y):
'''
compute the cell type specificity, defined as:
::
T_yX = K_yX / K_YX
= F_y * k_yX / sum_y(F_y*k_yX)
'''
def _get_k_yX_mul_F_y(y, y_index, X_index):
# Load data from json dictionary
f = open(fname, 'r')
data = json.load(f)
f.close()
# init variables
k_yX = 0.
for l in [str(key)
for key in list(data['data'][y]['syn_dict'].keys())]:
for x in x_in_X[X_index]:
p_yxL = data['data'][y]['syn_dict'][l][x] / 100.
k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron']
k_yX += p_yxL * k_yL
return k_yX * F_y[y_index]
# container
T_yX = np.zeros((len(y), len(x_in_X)))
# iterate over postsynaptic cell types
for i, y_value in enumerate(y):
# iterate over presynapse population inds
for j in range(len(x_in_X)):
k_yX_mul_F_y = 0
for k, yy in enumerate(sum(y_in_Y, [])):
if y_value in yy:
for yy_value in yy:
ii = np.where(np.array(y) == yy_value)[0][0]
k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j)
if k_yX_mul_F_y != 0:
T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y
return T_yX
class general_params(object):
def __init__(self):
'''class defining general model parameters'''
####################################
# REASON FOR THIS SIMULATION #
####################################
self.reason = 'Default Potjans model with spontaneous activity'
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
####################################
# MAIN SIMULATION CONTROL #
####################################
# simulation step size
self.dt = 0.1
# simulation start
self.tstart = 0
# simulation stop
self.tstop = 5200
####################################
# OUTPUT LOCATIONS #
####################################
# folder for all simulation output and scripts
# using the cluster's dedicated SCRATCH area
if 'SCRATCH' in os.environ and os.path.isdir(
os.path.join(os.environ['SCRATCH'], os.environ['USER'])):
self.savefolder = os.path.join(
os.environ['SCRATCH'],
os.environ['USER'],
'hybrid_model',
'simulation_output_default')
# LOCALLY
else:
self.savefolder = 'simulation_output_default'
# folder for simulation scripts
self.sim_scripts_path = os.path.join(self.savefolder, 'sim_scripts')
# folder for each individual cell's output
self.cells_path = os.path.join(self.savefolder, 'cells')
# folder for figures
self.figures_path = os.path.join(self.savefolder, 'figures')
# folder for population resolved output signals
self.populations_path = os.path.join(self.savefolder, 'populations')
# folder for raw nest output files
self.raw_nest_output_path = os.path.join(self.savefolder,
'raw_nest_output')
# folder for processed nest output files
self.spike_output_path = os.path.join(self.savefolder,
'processed_nest_output')
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# POPULATIONS #
####################################
# Number of populations
self.Npops = 9
# number of neurons in each population (unscaled)
self.full_scale_num_neurons = [[20683, # layer 23 e
5834], # layer 23 i
[21915, # layer 4 e
5479], # layer 4 i
[4850, # layer 5 e
1065], # layer 5 i
[14395, # layer 6 e
2948]] # layer 6 i
# Number of thalamic neurons/ point processes
self.n_thal = 902
# population names TODO: rename
self.X = [
'TC',
'L23E',
'L23I',
'L4E',
'L4I',
'L5E',
'L5I',
'L6E',
'L6I']
self.Y = self.X[1:]
# TC and cortical population sizes in one list TODO: rename
self.N_X = np.array([self.n_thal] +
flattenlist([self.full_scale_num_neurons]))
####################################
# CONNECTIVITY #
####################################
# intra-cortical connection probabilities between populations
# 23e 23i 4e 4i 5e 5i 6e 6i
self.conn_probs = np.array([[0.1009, 0.1689, 0.0437, 0.0818, 0.0323, 0., 0.0076, 0.], # 23e
[0.1346, 0.1371, 0.0316, 0.0515,
0.0755, 0., 0.0042, 0.], # 23i
[0.0077, 0.0059, 0.0497, 0.135,
0.0067, 0.0003, 0.0453, 0.], # 4e
[0.0691, 0.0029, 0.0794, 0.1597,
0.0033, 0., 0.1057, 0.], # 4i
[0.1004, 0.0622, 0.0505, 0.0057,
0.0831, 0.3726, 0.0204, 0.], # 5e
[0.0548, 0.0269, 0.0257, 0.0022,
0.06, 0.3158, 0.0086, 0.], # 5i
[0.0156, 0.0066, 0.0211, 0.0166, 0.0572,
0.0197, 0.0396, 0.2252], # 6e
[0.0364, 0.001, 0.0034, 0.0005, 0.0277, 0.008, 0.0658, 0.1443]]) # 6i
# connection probabilities for thalamic input
self.C_th = [[0.0, # layer 23 e
0.0], # layer 23 i
[0.0983, # layer 4 e
0.0619], # layer 4 i
[0.0, # layer 5 e
0.0], # layer 5 i
[0.0512, # layer 6 e
0.0196]] # layer 6 i
# full connection probabilities including TC connections
self.C_YX = np.c_[flattenlist([self.C_th]), self.conn_probs]
####################################
# CONNECTION PROPERTIES #
####################################
# mean EPSP amplitude (mV) for all connections except L4e->L23e
self.PSP_e = 0.15
# mean EPSP amplitude (mv) for L4e->L23e connections
# FIX POLISH NOTATION !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
self.PSP_23e_4e = self.PSP_e * 2
# standard deviation of PSC amplitudes relative to mean PSC amplitudes
# this is sigma/mu in probability distribution
# Gaussian (lognormal_weights = False): mu is mean, sigma is standard deviation
# Lognormal (lognormal_weights = False): mean and stdev can be
# calculated from mu and sigma
self.PSC_rel_sd = 0.1
# IPSP amplitude relative to EPSP amplitude
self.g = -4.
# L4i ->L4e stronger in order to get rid of 84 Hz peak
self.g_4e_4i = self.g
# Whether to use lognormal weights or not
self.lognormal_weights = False
# mean dendritic delays for excitatory and inhibitory transmission (ms)
self.delays = [1.5, 0.75]
# standard deviation relative to mean delays; former delay_rel
self.delay_rel_sd = 0.5
####################################
# CELL-TYPE PARAMETERS #
####################################
# Note that these parameters are only relevant for the point-neuron network in case
# one wants to calculate depth-resolved cell-type specific input
# currents
# point to .json connectivity table file
self.connectivity_table = 'binzegger_connectivity_table.json'
# list of cell type names used in this script
# names of every post-syn pop layer
self.y_in_Y = [
[['p23'], ['b23', 'nb23']],
[['p4', 'ss4(L23)', 'ss4(L4)'], ['b4', 'nb4']],
[['p5(L23)', 'p5(L56)'], ['b5', 'nb5']],
[['p6(L4)', 'p6(L56)'], ['b6', 'nb6']]]
self.y = flattenlist(self.y_in_Y)
# need presynaptic cell type to population mapping
self.x_in_X = [['TCs', 'TCn']] + sum(self.y_in_Y, [])
# map the pre-synaptic populations to the post-syn populations
self.mapping_Yy = list(zip(
['L23E', 'L23I', 'L23I',
'L4E', 'L4E', 'L4E', 'L4I', 'L4I',
'L5E', 'L5E', 'L5I', 'L5I',
'L6E', 'L6E', 'L6I', 'L6I'],
self.y))
# Frequency of occurrence of each cell type (F_y); 1-d array
self.F_y = get_F_y(fname=self.connectivity_table, y=self.y)
# Relative frequency of occurrence of each cell type within its
# population (F_{y,Y})
self.F_yY = [[get_F_y(fname=self.connectivity_table, y=y)
for y in Y] for Y in self.y_in_Y]
# Number of neurons of each cell type (N_y); 1-d array
self.N_y = np.array([self.full_scale_num_neurons[layer][pop] * self.F_yY[layer][pop][k]
for layer, array in enumerate(self.y_in_Y)
for pop, cell_types in enumerate(array)
for k, _ in enumerate(cell_types)]).astype(int)
# compute the number of synapses as in Potjans&Diesmann 2012
K_YX = np.zeros(self.C_YX.shape)
for i in range(K_YX.shape[1]):
K_YX[:, i] = (np.log(1. - self.C_YX[:, i]) /
np.log(1. - 1. / (self.N_X[1:] *
self.N_X[i])))
# spatial connection probabilites on each subpopulation
# Each key must correspond to a subpopulation like 'L23E' used everywhere else,
# each array maps thalamic and intracortical connections.
# First column is thalamic connections, and the rest intracortical,
# ordered like 'L23E', 'L23I' etc., first row is normalised probability of
# connection withing L1, L2, etc.;
self.L_yXL = get_L_yXL(fname=self.connectivity_table,
y=self.y,
x_in_X=self.x_in_X,
L=['1', '23', '4', '5', '6'])
# compute the cell type specificity
self.T_yX = get_T_yX(fname=self.connectivity_table, y=self.y,
y_in_Y=self.y_in_Y, x_in_X=self.x_in_X,
F_y=self.F_y)
Y, y = list(zip(*self.mapping_Yy))
# assess relative distribution of synapses for a given celltype
self.K_yXL = {}
#self.T_yX = {}
for i, (Y, y) in enumerate(self.mapping_Yy):
# fill in K_yXL (layer specific connectivity)
self.K_yXL[y] = (self.T_yX[i, ] *
K_YX[np.array(self.Y) == Y, ] *
self.L_yXL[y]).astype(int)
# number of incoming connections per cell type per layer per cell
self.k_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.k_yXL.update({y: (1. * self.K_yXL[y]).astype(int) // N_y})
# calculate corresponding connectivity to K_yXL
self.C_yXL = {}
for y, N_y in zip(self.y, self.N_y):
self.C_yXL.update(
{y: 1. - (1. - 1. / (N_y * self.N_X))**self.K_yXL[y]})
##########################################################################
class point_neuron_network_params(general_params):
def __init__(self):
'''class point-neuron network parameters'''
# inherit general params
general_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
# use same number of threads as MPI COMM.size() for parallel jobs
# else the number of processors for serial jobs
if SIZE > 1:
self.total_num_virtual_procs = SIZE
else:
self.total_num_virtual_procs = mp.cpu_count()
####################################
# RNG PROPERTIES #
####################################
# offset for RNGs
self.seed_offset = 45
####################################
# RECORDING PARAMETERS #
####################################
self.overwrite_existing_files = True
# recording can either be done from a fraction of neurons in each
# population or from a fixed number
# whether to record spikes from a fixed fraction of neurons in each
# population.
self.record_fraction_neurons_spikes = True
if self.record_fraction_neurons_spikes:
self.frac_rec_spikes = 1.
else:
self.n_rec_spikes = 100
# whether to record membrane potentials from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_voltage = False
if self.record_fraction_neurons_voltage:
self.frac_rec_voltage = 0.1
else:
self.n_rec_voltage = 50 # 100
# whether to record weighted input spikes from a fixed fraction of
# neurons in each population
self.record_fraction_neurons_input_spikes = False
if self.record_fraction_neurons_input_spikes:
self.frac_rec_input_spikes = 0.1
else:
self.n_rec_input_spikes = 20 # 100
# number of recorded neurons for depth resolved input currents
self.n_rec_depth_resolved_input = 0
# NESTio recording format
self.record_to = 'ascii'
# whether to record thalamic spikes
self.record_thalamic_spikes = True
# global ID file name
self.GID_filename = 'population_GIDs.dat'
# readout global ID file name
self.readout_GID_filename = 'readout_GIDs.dat'
# stem for spike detector file labels
self.spike_recorder_label = 'spikes_'
# stem for voltmeter file labels
self.voltmeter_label = 'voltages_'
# stem for thalamic spike detector file labels
self.th_spike_recorder_label = 'spikes_0'
# stem for in-degree file labels
self.in_degree_label = 'in_degrees_'
# stem for file labels for in-degree from thalamus
self.th_in_degree_label = 'in_degrees_th_'
# stem for weighted input spikes labels
self.weighted_input_spikes_label = 'weighted_input_spikes_'
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING #
####################################
# scaling parameter for population sizes
self.area = 1.0
# preserve indegrees when downscaling
self.preserve_K = False
####################################
# SINGLE NEURON PARAMS #
####################################
# neuron model
self.neuron_model = '/iaf_psc_exp'
# mean of initial membrane potential (mV)
self.Vm0_mean = -58.0
# std of initial membrane potential (mV)
self.Vm0_std = 10.0
# mean of threshold potential (mV)
self.V_th_mean = -50.
# std of threshold potential (mV)
self.V_th_std = 1E-8 # nest::NormalParameter: std > 0 required.
self.model_params = {'tau_m': 10., # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
# absolute refractory period (ms)
't_ref': 2.,
# resting membrane potential (mV)
'E_L': -65.,
'V_th': self.V_th_mean, # spike threshold (mV)
'C_m': 250., # membrane capacitance (pF)
'V_reset': -65. # reset potential (mV)
}
####################################
# EXTERNAL INPUTS #
####################################
# number of external inputs (Potjans-Diesmann model 2012)
self.K_bg = [[1600, # layer 23 e
1500], # layer 23 i
[2100, # layer 4 e
1900], # layer 4 i
[2000, # layer 5 e
1900], # layer 5 i
[2900, # layer 6 e
2100]] # layer 6 i
# rate of Poisson input at each external input synapse (spikess)
self.bg_rate = 0.
# rate of equivalent input used for DC amplitude calculation,
# set to zero if self.bg_rate > 0.
self.bg_rate_dc = 8.
# DC amplitude at each external input synapse (pA)
# to each neuron via 'dc_amplitude = tau_syn_ex/1000*bg_rate*PSC_ext'
self.dc_amplitude = self.model_params["tau_syn_ex"] * \
self.bg_rate_dc * self._compute_J()
# mean EPSP amplitude (mV) for thalamic and non-thalamic external input
# spikes
self.PSP_ext = 0.15
# mean delay of thalamic input (ms)
self.delay_th = 1.5
# standard deviation relative to mean delay of thalamic input
self.delay_th_rel_sd = 0.5
####################################
# THALAMIC INPUT VERSIONS #
####################################
# off-option for start of thalamic input versions
self.off = 100. * self.tstop
# poisson_generator (pure Poisson input)
self.th_poisson_start = self.off # onset (ms)
self.th_poisson_duration = 10. # duration (ms)
self.th_poisson_rate = 120. # rate (spikess)
# spike_generator
# Note: This can be used with a large Gaussian delay distribution in order to mimic a
# Gaussian pulse packet which is different for each thalamic neuron
self.th_spike_times = [self.off] # time of the thalamic pulses (ms)
# create n_thal spikegenerator nodes connected to each respective
# postsynaptic parrot_neuron. Expected format is a len(self.n_thal) list
# of lists of activation times.
# Turn activation off by setting it as [[] for i in range(self.n_thal)]
self.th_spike_generator_times = [[] for i in range(self.n_thal)]
# sinusoidal_poisson_generator (oscillatory Poisson input)
self.th_sin_start = self.off # onset (ms)
self.th_sin_duration = 5000. # duration (ms)
self.th_sin_mean_rate = 30. # mean rate (spikess)
# rate modulation amplitude (spikess)
self.th_sin_fluc_rate = 30.
# frequency of the rate modulation (Hz)
self.th_sin_freq = 15.
# phase of rate modulation (deg)
self.th_sin_phase = 0.
# Gaussian_pulse_packages
self.th_gauss_times = [self.off] # package center times
self.th_gauss_num_spikes_per_packet = 1 # number of spikes per packet
self.th_gauss_sd = 5. # std of Gaussian pulse packet (ms^2)
####################################
# SPATIAL ORGANIZATION #
####################################
# needed for spatially resolved input currents
# number of layers TODO: find a better solution for that
self.num_input_layers = 5
def _compute_J(self):
'''
Compute the current amplitude corresponding to the exponential
synapse model PSP amplitude
Derivation using sympy:
::
from sympy import *
#define symbols
t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax')
#assume zero delay, t >= 0
#using eq. 8.10 in Sterrat et al
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V = %s' % V
#find time of V == Vmax
dVdt = diff(V, t)
print 'dVdt = %s' % dVdt
[t] = solve(dVdt, t)
print 't(t@dVdT==Vmax) = %s' % t
#solve for Is at time of maxima
V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm
print 'V(%s) = %s' % (t, V)
[Is] = solve(V-Vmax, Is)
print 'Is = %s' % Is
resulting in:
::
Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts))
- exp(ts*log(ts/tm)/(tm - ts))))
Latex source:
::
J&=-\frac{C_\text{m} V_\text{PSP} (\tau_\text{m} - \tau_\text{syn})}{\tau_\text{m} \tau_\text{syn}(
\\exp\frac{\tau_\text{m} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}}
-\\exp\frac{\tau_\text{syn} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}})} \\
I^\text{ext} &= \tau_\text{syn} \nu^\text{ext} J \\
&=-\frac{\nu^\text{ext}C_\text{m} V_\text{PSP} (\tau_\text{m} - \tau_\text{syn})}{\tau_\text{m}(
\\exp\frac{\tau_\text{m} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}}
-\\exp\frac{\tau_\text{syn} \\ln(\tau_\text{syn}/\tau_\text{m})}{\tau_\text{m} - \tau_\text{syn}})}
'''
# LIF params
tm = self.model_params['tau_m']
Cm = self.model_params['C_m']
# synapse
ts = self.model_params['tau_syn_ex']
Vmax = self.PSP_e
# max current amplitude
J = Cm * Vmax * (-tm + ts) / (tm * ts * (np.exp(tm * np.log(ts / tm) /
(tm - ts)) - np.exp(ts * np.log(ts / tm) / (tm - ts))))
# unit conversion pF*mV -> nA
J *= 1E-3
return J
class multicompartment_params(point_neuron_network_params):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and
population.DummyNetwork
This class do not take any kwargs
'''
def __init__(self):
'''
Inherited class defining additional attributes needed by e.g., the
classes population.Population and
population.DummyNetwork
This class do not take any kwargs
'''
# initialize parent classes
point_neuron_network_params.__init__(self)
####################################
# #
# #
# SIMULATION PARAMETERS #
# #
# #
####################################
#######################################
# PARAMETERS FOR LOADING NEST RESULTS #
#######################################
# parameters for class population.DummyNetwork class
self.networkSimParams = {
'simtime': self.tstop - self.tstart,
'dt': self.dt,
'spike_output_path': self.spike_output_path,
'label': 'population_spikes',
'ext': 'dat',
'GIDs': self.get_GIDs(),
'X': self.X,
'skiprows': 0,
}
####################################
# #
# #
# MODEL PARAMETERS #
# #
# #
####################################
####################################
# SCALING (VOLUME not density) #
####################################
self.SCALING = 1.0
####################################
# MORPHOLOGIES #
####################################
# list of morphology files with default location, testing = True
# will point to simplified morphologies
testing = False
if testing:
self.PATH_m_y = os.path.join('morphologies', 'ballnsticks')
self.m_y = [Y + '_' + y + '.hoc' for Y, y in self.mapping_Yy]
else:
self.PATH_m_y = os.path.join('morphologies', 'stretched')
self.m_y = [
'L23E_oi24rpy1.hoc',
'L23I_oi38lbc1.hoc',
'L23I_oi38lbc1.hoc',
'L4E_53rpy1.hoc',
'L4E_j7_L4stellate.hoc',
'L4E_j7_L4stellate.hoc',
'L4I_oi26rbc1.hoc',
'L4I_oi26rbc1.hoc',
'L5E_oi15rpy4.hoc',
'L5E_j4a.hoc',
'L5I_oi15rbc1.hoc',
'L5I_oi15rbc1.hoc',
'L6E_51-2a.CNG.hoc',
'L6E_oi15rpy4.hoc',
'L6I_oi15rbc1.hoc',
'L6I_oi15rbc1.hoc',
]
####################################
# CONNECTION WEIGHTS #
####################################
# compute the synapse weight from fundamentals of exp synapse LIF
# neuron
self.J = self._compute_J()
# set up matrix containing the synapse weights between any population X
# and population Y, including exceptions for certain connections
J_YX = np.zeros(self.C_YX.shape)
J_YX += self.J
J_YX[:, 2::2] *= self.g
if hasattr(self, 'PSP_23e_4e'):
J_YX[0, 3] *= self.PSP_23e_4e / self.PSP_e
if hasattr(self, 'g_4e_4i'):
J_YX[2, 4] *= self.g_4e_4i / self.g
# extrapolate weights between populations X and
# cell type y in population Y
self.J_yX = {}
for Y, y in self.mapping_Yy:
[i] = np.where(np.array(self.Y) == Y)[0]
self.J_yX.update({y: J_YX[i, ]})
####################################
# GEOMETRY OF CORTICAL COLUMN #
####################################
# set the boundaries of each layer, L1->L6,
# and mean depth of soma layers
self.layerBoundaries = np.array([[0.0, -81.6],
[-81.6, -587.1],
[-587.1, -922.2],
[-922.2, -1170.0],
[-1170.0, -1491.7]])
# assess depth of each 16 subpopulation
self.depths = self._calcDepths()
# make a nice structure with data for each subpopulation
self.y_zip_list = list(zip(self.y, self.m_y,
self.depths, self.N_y))
##############################################################
# POPULATION PARAMS (cells, population, synapses, electrode) #
##############################################################
# Global LFPy.Cell-parameters, by default shared between populations
# Some passive parameters will not be fully consistent with LIF params
self.cellParams = {
'v_init': self.model_params['E_L'],
'cm': 1.0,
'Ra': 150,
'passive': True,
'passive_parameters': dict(g_pas=1. / (self.model_params['tau_m'] * 1E3), # assume cm=1
e_pas=self.model_params['E_L']),
'nsegs_method': 'lambda_f',
'lambda_f': 100,
'dt': self.dt,
'tstart': self.tstart,
'tstop': self.tstop,
'verbose': False,
}
# layer specific LFPy.Cell-parameters as nested dictionary
self.yCellParams = self._yCellParams()
# set the axis of which each cell type y is randomly rotated,
# SS types and INs are rotated around both x- and z-axis
# in the population class, while P-types are
# only rotated around the z-axis
self.rand_rot_axis = {}
for y, _, _, _ in self.y_zip_list:
# identify pyramidal cell populations:
if y.rfind('p') >= 0:
self.rand_rot_axis.update({y: ['z']})
else:
self.rand_rot_axis.update({y: ['x', 'z']})
# additional simulation kwargs, see LFPy.Cell.simulate() docstring
self.simulationParams = {'rec_imem': True}
# a dict setting the number of cells N_y and geometry
# of cell type population y
self.populationParams = {}
for y, _, depth, N_y in self.y_zip_list:
self.populationParams.update({
y: {
'number': int(N_y * self.SCALING),
'radius': np.sqrt(1000**2 / np.pi),
'z_min': depth - 25,
'z_max': depth + 25,
'min_cell_interdist': 1.,
'min_r': [[-1E199, -1600, -1550, 1E99], [0, 0, 10, 10]]
}
})
# Set up cell type specific synapse parameters in terms of synapse model
# and synapse locations
self.synParams = {}
for y in self.y:
if y.rfind('p') >= 0:
# pyramidal types have apical dendrites
section = ['apic', 'dend']
else:
# other cell types do not
section = ['dend']
self.synParams.update({
y: {
'syntype': 'ExpSynI', # current based exponential synapse
'section': section,
# 'tau' : self.model_params["tau_syn_ex"],
},
})
# set up dictionary of synapse time constants specific to each
# postsynaptic cell type and presynaptic population
self.tau_yX = {}
for y in self.y:
self.tau_yX.update({
y: [self.model_params["tau_syn_in"] if 'I' in X else
self.model_params["tau_syn_ex"] for X in self.X]
})
# synaptic delay parameters, loc and scale is mean and std for every
# network population, negative values will be removed
self.synDelayLoc, self.synDelayScale = self._synDelayParams()
# Define electrode geometry corresponding to a laminar electrode,
# where contact points have a radius r, surface normal vectors N,
# and LFP calculated as the average LFP in n random points on
# each contact. Recording electrode emulate NeuroNexus array,
# contact 0 is superficial
self.electrodeParams = {
# contact locations:
'x': np.zeros(16),
'y': np.zeros(16),
'z': -np.mgrid[0:16] * 100,
# extracellular conductivity:
'sigma': 0.3,
# contact surface normals, radius, n-point averaging
'N': np.array([[1, 0, 0]] * 16),
'r': 7.5,
'n': 50,
'seedvalue': None,
# dendrite line sources, soma sphere source (Linden2014)
'method': 'root_as_point',
}
# parameters for LFPykit.LaminarCurrentSourceDensity
self.CSDParams = dict(
z=np.array([[-(i + 1) * 100, -i * 100] for i in range(16)]) + 50.,
r=np.ones(16) * np.sqrt(1000**2 / np.pi) # same as pop radius
)
# these cell attributes variables will be saved to file
self.savelist = []
#########################################
# MISC #
#########################################
# time resolution of downsampled data in ms
self.dt_output = 1.
# set fraction of neurons from population which LFP output is stored
self.recordSingleContribFrac = 0.
def get_GIDs(self):
GIDs = {}
ind = 1
for i, (X, N_X) in enumerate(zip(self.X, self.N_X)):
GIDs[X] = [ind, N_X]
ind += N_X
return GIDs
def _synDelayParams(self):
'''
set up the detailed synaptic delay parameters,
loc is mean delay,
scale is std with low bound cutoff,
assumes numpy.random.normal is used later
'''
delays = {}
# mean delays
loc = np.zeros((len(self.y), len(self.X)))
loc[:, 0] = self.delays[0]
loc[:, 1::2] = self.delays[0]
loc[:, 2::2] = self.delays[1]
# standard deviations
scale = loc * self.delay_rel_sd
# prepare output
delay_loc = {}
for i, y in enumerate(self.y):
delay_loc.update({y: loc[i]})
delay_scale = {}
for i, y in enumerate(self.y):
delay_scale.update({y: scale[i]})
return delay_loc, delay_scale
def _calcDepths(self):
'''
return the cortical depth of each subpopulation
'''
depths = self.layerBoundaries.mean(axis=1)[1:]
depth_y = []
for y in self.y:
if y in ['p23', 'b23', 'nb23']:
depth_y = np.r_[depth_y, depths[0]]
elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']:
depth_y = np.r_[depth_y, depths[1]]
elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']:
depth_y = np.r_[depth_y, depths[2]]
elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']:
depth_y = np.r_[depth_y, depths[3]]
else:
raise Exception('Error, revise parameters')
return depth_y
def _yCellParams(self):
'''
Return dict with parameters for each population.
The main operation is filling in cell type specific morphology
'''
# cell type specific parameters going into LFPy.Cell
yCellParams = {}
for layer, morpho, _, _ in self.y_zip_list:
yCellParams.update({layer: self.cellParams.copy()})
yCellParams[layer].update({
'morphology': os.path.join(self.PATH_m_y, morpho),
})
return yCellParams
if __name__ == '__main__':
params = multicompartment_params()
print(dir(params))
|
espenhgn/hybridLFPy
|
examples/Hagen_et_al_2016_cercor/cellsim16popsParams_default.py
|
Python
|
gpl-3.0
| 39,405
|
[
"Gaussian",
"NEURON"
] |
a0159324ea7e2bcf223a2a03874ca9c7a4cec52c44a0bd6f7b734d0206548df7
|
import numpy as np
from matplotlib import pyplot
import rft1d
eps = np.finfo(float).eps #smallest float
#(0) Set parameters:
np.random.seed(0)
nResponses = 2000
nNodes = 101
FWHM = 10.0
interp = True
wrap = True
heights = [2.2, 2.4, 2.6, 2.8]
### generate data:
y = rft1d.randn1d(nResponses, nNodes, FWHM)
calc = rft1d.geom.ClusterMetricCalculator()
rftcalc = rft1d.prob.RFTCalculator(STAT='Z', nodes=nNodes, FWHM=FWHM)
#(1) Maximum region size:
K0 = np.linspace(eps, 15, 21)
K = np.array([[calc.max_cluster_extent(yy, h, interp, wrap) for yy in y] for h in heights])
P = np.array([(K>=k0).mean(axis=1) for k0 in K0]).T
P0 = np.array([[rftcalc.p.cluster(k0, h) for k0 in K0/FWHM] for h in heights])
#(2) Plot results:
pyplot.close('all')
colors = ['b', 'g', 'r', 'orange']
labels = ['u = %.1f'%h for h in heights]
ax = pyplot.axes()
for color,p,p0,label in zip(colors,P,P0,labels):
ax.plot(K0, p, 'o', color=color)
ax.plot(K0, p0, '-', color=color, label=label)
ax.plot([0,1],[10,10], 'k-', label='Theoretical')
ax.plot([0,1],[10,10], 'ko-', label='Simulated')
ax.legend()
ax.set_xlabel('$x$', size=20)
ax.set_ylabel('$P(k_{max}) > x$', size=20)
ax.set_ylim(0, 0.25)
ax.set_title('Upcrossing extent validations (Gaussian fields)', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/val_upx_0_gauss_cluster.py
|
Python
|
gpl-3.0
| 1,372
|
[
"Gaussian"
] |
e084ff2721f39f08a28ff53e3bb83f2a799476ad7e6664500d23e9b898826b09
|
import struct, zlib, sys, re, os, gzip, random
import Bio.Align
#import Bio.Format.BamIndex as BamIndex
from Bio.Sequence import rc
from cStringIO import StringIO
from string import maketrans
from Bio.Range import GenomicRange
from subprocess import Popen, PIPE
_bam_ops = maketrans('012345678','MIDNSHP=X')
_bam_char = maketrans('abcdefghijklmnop','=ACMGRSVTWYHKDBN')
_bam_value_type = {'c':[1,'<b'],'C':[1,'<B'],'s':[2,'<h'],'S':[2,'<H'],'i':[4,'<i'],'I':[4,'<I']}
_sam_cigar_target_add = re.compile('[M=XDN]$')
# A sam entry
class SAM(Bio.Align.Alignment):
def __init__(self,line,reference=None,reference_lengths=None):
self._line = line.rstrip()
self._reference = reference
self._reference_lengths = None # reference would also cover this
self._target_range = None
self._private_values = SAM.PrivateValues()
self._parse_sam_line()
# Private values holds tags cigar and entries
self._alignment_ranges = None
self._set_alignment_ranges()
return
def __str__(self):
if self._line:
return self._line
return self.get_line()
# Get the length of the target sequence
def get_target_length(self):
if not self.is_aligned():
sys.stderr.write("ERROR no length for reference when not aligned\n")
sys.exit()
if self._reference_lengths:
if self.value('rname') in self._reference_lengths:
return self._reference_lengths[self.value('rname')]
elif self._reference:
return len(self._reference[self.value('rname')])
else:
sys.stderr.write("ERROR some reference needs to be set to go from psl to bam\n")
sys.exit()
sys.stderr.write("ERROR reference found\n")
sys.exit()
#Overrides Bio.Alignment.Align.get_query_sequence()
def get_query_sequence(self):
if self.value('seq') == '*': return None
if self.check_flag(0x10): return rc(self.value('seq'))
return self.value('seq')
#Overrides Bio.Alignment.Align.get_query_sequence()
def get_query_quality(self):
if not self.get_query_sequence(): return None
if self.value('qual') == '*': return None
if self.check_flag(0x10): return self.value('qual')[::-1]
return self.value('qual')
#Overrides Bio.Alignment.Align.get_query_length()
def get_query_length(self):
seq = self.value('seq')
if seq != '*': return len(self.value('seq'))
return sum([x[0] for x in self.get_cigar() if re.match('[MIS=X]',x[1])])
# Similar to get_get_query_length, but it also includes
# hard clipped bases
# if there is no cigar, then default to trying the sequence
def get_original_query_length(self):
if not self.is_aligned():
return self.get_query_length()
if self.get_cigar() == '*':
return self.get_query_length()
return sum([x[0] for x in self.get_cigar() if re.match('[HMIS=X]',x[1])])
# This accounts for hard clipped bases
# and a query sequence that hasnt been reverse complemented
def get_actual_original_query_range(self):
l = self.get_original_query_length()
a = self.get_alignment_ranges()
qname = a[0][1].chr
qstart = a[0][1].start
qend = a[-1][1].end
#rng = self.get_query_range()
start = qstart
end = qend
if self.get_strand() == '-':
end = l-(qstart-1)
start = 1+l-(qend)
return GenomicRange(qname,start,end,self.get_strand())
#Overrides Bio.Alignment.Align.get_strand()
#Which strand is the query aligned to
def get_strand(self):
if self.check_flag(0x10): return '-'
return '+'
#Overrides Bio.Alignment.Align.get_SAM()
def get_SAM(self):
return self
def get_tag(self,key):
return self._private_values.get_tags()[key]['value']
#Overrides Bio.Alignment.Align._set_alignment_ranges()
#[target, query]
def _set_alignment_ranges(self):
if not self.is_aligned():
self._alignment_ranges = None
return
self._alignment_ranges = []
cig = self.get_cigar()[:]
target_pos = self.value('pos')
query_pos = 1
while len(cig) > 0:
c = cig.pop(0)
if re.match('[S]$',c[1]): # hard or soft clipping
query_pos += c[0]
elif re.match('[ND]$',c[1]): # deleted from reference
target_pos += c[0]
elif re.match('[I]$',c[1]): # insertion to the reference
query_pos += c[0]
elif re.match('[MI=X]$',c[1]): # keep it
t_start = target_pos
q_start = query_pos
target_pos += c[0]
query_pos += c[0]
t_end = target_pos-1
q_end = query_pos-1
self._alignment_ranges.append([GenomicRange(self.value('rname'),t_start,t_end),GenomicRange(self.value('qname'),q_start,q_end)])
return
def _parse_sam_line(self):
f = self._line.rstrip().split("\t")
self._private_values.set_entry('qname',f[0])
self._private_values.set_entry('flag',int(f[1]))
self._private_values.set_entry('rname',f[2])
if f[2] == '*':
self._private_values.set_entry('pos',0)
else:
self._private_values.set_entry('pos',int(f[3]))
self._private_values.set_entry('mapq',int(f[4]))
self._private_values.set_entry('cigar',f[5])
self._private_values.set_entry('rnext',f[6])
self._private_values.set_entry('pnext',int(f[7]))
self._private_values.set_entry('tlen',int(f[8]))
self._private_values.set_entry('seq',f[9])
self._private_values.set_entry('qual',f[10])
self._private_values.set_cigar([])
if self.value('cigar') != '*':
cig = [[int(m[0]),m[1]] for m in re.findall('([0-9]+)([MIDNSHP=X]+)',self.value('cigar'))]
self._private_values.set_cigar(cig)
tags = {}
if len(f) > 11:
for m in [[y.group(1),y.group(2),y.group(3)] for y in [re.match('([^:]{2,2}):([^:]):(.+)$',x) for x in f[11:]]]:
if m[1] == 'i': m[2] = int(m[2])
elif m[1] == 'f': m[2] = float(m[2])
tags[m[0]] = {'type':m[1],'value':m[2]}
self._private_values.set_tags(tags)
# Necessary function for doing a locus stream
# For the context of a SAM file we set this to be the target range
def get_range(self):
return self.get_target_range()
def get_target_range(self):
if not self.is_aligned(): return None
if self._target_range: return self._target_range
global _sam_cigar_target_add
tlen = sum([x[0] for x in self.get_cigar() if _sam_cigar_target_add.match(x[1])])
self._target_range = GenomicRange(self.value('rname'),self.value('pos'),self.value('pos')+tlen-1)
return self._target_range
def check_flag(self,inbit):
if self.value('flag') & inbit: return True
return False
def is_aligned(self):
return not self.check_flag(0x4)
#assemble the line if its not there yet
def get_line(self):
if not self._line:
chr = self.value('rname')
rnext = self.value('rnext')
if not self.is_aligned():
chr = '*'
rnext = '*'
self._line = self.value('qname')+"\t"+str(self.value('flag'))+"\t"+chr+"\t"+str(self.value('pos'))+"\t"+str(self.value('mapq'))+"\t"+self.value('cigar')+"\t"+rnext+"\t"+str(self.value('pnext'))+"\t"+str(self.value('tlen'))+"\t"+self.value('seq')+"\t"+self.value('qual')
if self.value('remainder'):
self._line += "\t"+self.value('remainder')
return self._line
def value(self,key):
return self._private_values.get_entry(key)
def get_tags(self):
return self._private_values.get_tags()
def get_cigar(self):
return self._private_values.get_cigar()
#Bam files need a specific override to get_tags and get_cigar that would break other parts of the class if we
# access the variables other ways
#Force tags and cigars to be hidden so we don't accidently change them.
class PrivateValues:
def __init__(self):
self.__tags = None
self.__cigar = None
self.__entries = {}
def set_tags(self,tags): self.__tags=tags
def get_tags(self): return self.__tags
def set_cigar(self,cigar): self.__cigar=cigar
def get_cigar(self): return self.__cigar
def set_entries_dict(self,mydict): self.__entries = mydict # set the entire dictionary at once
def get_entry(self,key):
if key not in self.__entries:
sys.stderr.write("WARNING: key "+str(key)+"not in entries\n")
return None
return self.__entries[key]
def is_entry_key(self,key):
if key in self.__entries: return True
return False
def set_entry(self,key,value): self.__entries[key] = value
# Very much like a sam entry but optimized for access from a bam
# Slows down for accessing things that need more decoding like
# sequence, quality, cigar string, and tags
class BAM(SAM):
def __init__(self,bin_data,ref_names,fileName=None,blockStart=None,innerStart=None,ref_lengths=None,reference=None,line_number=None):
part_dict = _parse_bam_data_block(bin_data,ref_names)
#self._bamfileobj = bamfileobj #this is most like our parent
self._line = None
self._line_number = line_number # the line number in the bam file
self._reference = reference
self._target_range = None
self._alignment_ranges = None #should be accessed by method because of BAM
self._ref_lengths = ref_lengths
self._file_position = {'fileName':fileName,'blockStart':blockStart,'innerStart':innerStart} # The most special information about the bam
self._private_values = BAM.PrivateValues() # keep from accidently accessing some variables other than by methods
self._private_values.set_entries_dict(part_dict)
#self._set_alignment_ranges()
return
def get_alignment_ranges(self):
if not self._alignment_ranges:
self._set_alignment_ranges()
return self._alignment_ranges
def get_line_number(self):
return self._line_number
def get_target_length(self):
return self._ref_lengths[self.value('rname')]
def get_filename(self):
return self._file_position['fileName']
def get_coord(self):
return [self._file_position['blockStart'],self._file_position['innerStart']]
def get_block_start(self):
return self._file_position['blockStart']
def get_inner_start(self):
return self._file_position['innerStart']
def get_file_position_string(self):
return 'fileName: '+self._file_position['fileName']+" "\
'blockStart: '+str(self._file_position['blockStart'])+" "\
'innerStart: '+str(self._file_position['innerStart'])
def get_tag(self,key):
cur = self._private_values.get_tags()
if not cur:
v1,v2 = _bin_to_extra(self.value('extra_bytes'))
self._private_values.set_tags(v1) #keep the cigar array in a special palce
self._private_values.set_entry('remainder',v2)
return self._private_values.get_tags()[key]['value']
def get_cigar(self):
cur = self._private_values.get_cigar()
if not cur:
v1,v2 = _bin_to_cigar(self.value('cigar_bytes'))
self._private_values.set_cigar(v1) #keep the cigar array in a special palce
self._private_values.set_entry('cigar',v2)
return self._private_values.get_cigar()
#def indexed_as_primary_alignment(self):
# ind = self._bamfileobj.index
# if not ind:
# sys.stderr.write("ERROR: to access indexed as primary alignment, and index must have been loaded\n")
# sys.exit()
# e = self._bamfileobj.index.get_index_line(self._line_number)
# if e['flag'] & 2304: return False
# return True
def value(self,key):
if not self._private_values.is_entry_key(key):
if key == 'seq':
v = _bin_to_seq(self.value('seq_bytes'))
if not v: v = '*'
self._private_values.set_entry('seq',v)
return v
elif key == 'qual':
v = _bin_to_qual(self.value('qual_bytes'))
if not v: v = '*'
self._private_values.set_entry('qual',v)
return v
elif key == 'cigar':
v1,v2 = _bin_to_cigar(self.value('cigar_bytes'))
self._private_values.set_cigar(v1) #keep the cigar array in a special palce
self._private_values.set_entry('cigar',v2)
return v2
elif key == 'remainder':
v1,v2 = _bin_to_extra(self.value('extra_bytes'))
self._private_values.set_tags(v1) #keep the cigar array in a special palce
self._private_values.set_entry('remainder',v2)
return v2
return self._private_values.get_entry(key)
class SAMHeader:
def __init__(self,header_text):
self._text = header_text
self.tags = []
self._sequence_lengths = {}
for line in self._text.split("\n"):
if len(line) == 0: continue
tag = line[1:3]
rem = line[4:]
self.tags.append({'tag':tag,'info':{}})
for c in [{'field':x[0:2],'value':x[3:]} for x in rem.split("\t")]:
self.tags[-1]['info'][c['field']] = c['value']
for v in [x['info'] for x in self.tags if x['tag'] == 'SQ']:
self._sequence_lengths[v['SN']] = int(v['LN'])
return
def get_sequence_names(self):
return self._sequence_lengths.keys()
#dictionary to get sequence lengths
def get_sequence_lengths(self):
return self._sequence_lengths
def get_sequence_length(self,sname):
return self._sequence_lengths[sname]
# reference is a dict
class BAMFile:
#def __init__(self,filename,blockStart=None,innerStart=None,cnt=None,index_obj=None,index_file=None,reference=None):
def __init__(self,filename,blockStart=None,innerStart=None,cnt=None,reference=None):
self.path = filename
self._reference = reference # dict style accessable reference
self.fh = BGZF(filename)
self._line_number = 0 # entry line number ... after header. starts with 1
# start reading the bam file
self.header_text = None
self._header = None
self.n_ref = None
self._read_top_header()
self.ref_names = []
self.ref_lengths = {}
self._output_range = None
#self.index = index_obj
self._read_reference_information()
# prepare for specific work
if self.path and blockStart is not None and innerStart is not None:
self.fh.seek(blockStart,innerStart)
#if self.index:
# lnum = self.index.get_coord_line_number([blockStart,innerStart])
# if lnum:
# self._line_number = lnum-1 #make it zero indexed
def close(self):
self.fh.close()
# return a string that is the header
def get_header(self):
if not self._header:
self._header = SAMHeader(self.header_text)
return self._header
return self._header
#def has_index(self):
# if self.index: return True
# return False
# Index file is a gzipped TSV file with these fields:
# 1. qname
# 2. target range
# 3. bgzf file block start
# 4. bgzf inner block start
# 5. aligned base count
# 6. flag
#def write_index(self,index_file,verbose=False):
# BamIndex.write_index(self.path,index_file,verbose=verbose)
# #_write_index(self.path,index_file,verbose=verbose)
#def read_index(self,index_file=None):
# #prepare index
# if index_file:
# self.index = BamIndex.BAMIndex(index_file)
# return True
# elif os.path.exists(self.path+'.bgi'):
# self.index = BamIndex.BAMIndex(self.path+'.bgi')
# return True
# return False
def __iter__(self):
return self
def read_entry(self):
e = self.read_entry2()
#print e
if self._output_range: # check and see if we are past out put range
if not e.is_aligned():
e = None
else:
rng2 = e.get_target_range()
if self._output_range.chr != rng2.chr: e = None
if self._output_range.cmp(rng2) == 1: e = None
if not e:
return None
else: return e
def next(self):
e = self.read_entry()
if not e:
raise StopIteration
else: return e
def read_entry2(self):
bstart = self.fh.get_block_start()
innerstart = self.fh.get_inner_start()
b = self.fh.read(4) # get block size bytes
if not b: return None
block_size = struct.unpack('<i',b)[0]
#print 'block_size '+str(block_size)
self._line_number += 1
bam = BAM(self.fh.read(block_size),self.ref_names,fileName=self.path,blockStart=bstart,innerStart=innerstart,ref_lengths=self.ref_lengths,reference=self._reference,line_number = self._line_number)
return bam
def _set_output_range(self,rng):
self._output_range = rng
return
#def fetch_random(self):
# cnt = self.index.get_length()
# num = random.randint(0,cnt-1)
# iline = self.index.get_index_line(num+1)
# bf2 = BAMFile(self.path,blockStart=iline['filestart'],innerStart=iline['innerstart'])
# bam = bf2.read_entry()
# bf2.close()
# return bam
#def fetch_by_range(self,rng):
# coord = self.index.get_range_start_coord(rng)
# line_number = self.index.get_range_start_line_number(rng)
# if not coord: return None
# b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
# b2._set_output_range(rng)
# return b2
# A special way to access via bam
#def fetch_by_query(self,name):
# bams = []
# for coord in self.index.get_coords_by_name(name):
# b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
# bams.append(b2.read_entry())
# b2.close()
# return bams
# only get a single
def fetch_by_coord(self,coord):
#b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],reference=self._reference)
bam = b2.read_entry()
b2.close()
b2 = None
return bam
def fetch_starting_at_coord(self,coord):
#b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],index_obj=self.index,reference=self._reference)
b2 = BAMFile(self.path,blockStart=coord[0],innerStart=coord[1],reference=self._reference)
return b2
def _read_reference_information(self):
for n in range(self.n_ref):
l_name = struct.unpack('<i',self.fh.read(4))[0]
name = self.fh.read(l_name).rstrip('\0')
l_ref = struct.unpack('<i',self.fh.read(4))[0]
self.ref_lengths[name] = l_ref
self.ref_names.append(name)
def _read_top_header(self):
magic = self.fh.read(4)
l_text = struct.unpack('<i',self.fh.read(4))[0]
self.header_text = self.fh.read(l_text).rstrip('\0')
self.n_ref = struct.unpack('<i',self.fh.read(4))[0]
def _parse_bam_data_block(bin_in,ref_names):
v = {}
data = StringIO(bin_in)
rname_num = struct.unpack('<i',data.read(4))[0]
v['rname'] = ref_names[rname_num] #refID to check in ref names
v['pos'] = struct.unpack('<i',data.read(4))[0] + 1 #POS
bin_mq_nl = struct.unpack('<I',data.read(4))[0]
bin = bin_mq_nl >> 16
v['mapq'] = (bin_mq_nl & 0xFF00) >> 8 #mapq
l_read_name = bin_mq_nl & 0xFF #length of qname
flag_nc = struct.unpack('<I',data.read(4))[0] #flag and n_cigar_op
v['flag'] = flag_nc >> 16
n_cigar_op = flag_nc & 0xFFFF
l_seq = struct.unpack('<i',data.read(4))[0]
rnext_num = struct.unpack('<i',data.read(4))[0]
if rnext_num == -1:
v['rnext'] = '*'
else:
v['rnext'] = ref_names[rnext_num] #next_refID in ref_names
v['pnext'] = struct.unpack('<i',data.read(4))[0]+1 #pnext
tlen = struct.unpack('<i',data.read(4))[0]
v['tlen'] = tlen
v['qname'] = data.read(l_read_name).rstrip('\0') #read_name or qname
#print 'n_cigar_op '+str(n_cigar_op)
v['cigar_bytes'] = data.read(n_cigar_op*4)
#print 'cigar bytes '+str(len(v['cigar_bytes']))
v['seq_bytes'] = data.read((l_seq+1)/2)
v['qual_bytes'] = data.read(l_seq)
v['extra_bytes'] = data.read()
#last second tweak
if v['rnext'] == v['rname']: v['rnext'] = '='
return v
def _bin_to_qual(qual_bytes):
if len(qual_bytes) == 0: return '*'
if struct.unpack('<B',qual_bytes[0])[0] == 0xFF: return '*'
#print qual_bytes
#try:
qual = ''.join([chr(struct.unpack('<B',x)[0]+33) for x in qual_bytes])
#except:
# return '*'
return qual
def _bin_to_seq(seq_bytes):
if len(seq_bytes) == 0: return None
global _bam_char
#print len(seq_bytes)
seq = ''.join([''.join([''.join([chr(z+97).translate(_bam_char) for z in [y>>4,y&0xF]]) for y in struct.unpack('<B',x)]) for x in seq_bytes]).rstrip('=')
return seq
def _bin_to_cigar(cigar_bytes):
global _bam_ops
if len(cigar_bytes) == 0: return [[],'*']
cigar_packed = [struct.unpack('<I',x)[0] for x in \
[cigar_bytes[i:i+4] for i in range(0,len(cigar_bytes),4)]]
cigar_array = [[c >> 4, str(c &0xF).translate(_bam_ops)] for c in cigar_packed]
cigar_seq = ''.join([''.join([str(x[0]),x[1]]) for x in cigar_array])
return [cigar_array,cigar_seq]
#Pre all the reamining bytes of an entry
#Post an array of
# 1. A dict keyed by Tag with {'type':,'value':} where value is a string unless type is i
# 2. A string of the remainder
def _bin_to_extra(extra_bytes):
global _bam_value_type
extra = StringIO(extra_bytes)
tags = {}
rem = ''
while extra.tell() < len(extra_bytes):
tag = extra.read(2)
val_type = extra.read(1)
if val_type == 'Z':
rem += tag+':'
rem += val_type+':'
p = re.compile('([!-~])')
m = p.match(extra.read(1))
vre = ''
while m:
vre += m.group(1)
c = extra.read(1)
#print c
m = p.match(c)
rem += vre+"\t"
tags[tag] = {'type':val_type,'value':vre}
elif val_type == 'A':
rem += tag+':'
rem += val_type+':'
vre = extra.read(1)
rem += vre+"\t"
tags[tag] = {'type':val_type,'value':vre}
elif val_type in _bam_value_type:
rem += tag+':'
rem += 'i'+':'
val = struct.unpack(_bam_value_type[val_type][1],extra.read(_bam_value_type[val_type][0]))[0]
rem += str(val)+"\t"
tags[tag] = {'type':val_type,'value':val}
elif val_type == 'B':
sys.sterr.write("WARNING array not implmented\n")
continue
rem += tag+':'
rem += val_type+':'
array_type = _bam_value_type[extra.read(1)]
element_count = struct.unpack('<I',extra.read(4))[0]
array_bytes = extra.read(element_count*_bam_value_type[array_type][0])
for by in [array_bytes[i:i+_bam_value_type[array_type][0]] for i in range(0,len(array_bytes),_bam_value_type[array_type][0])]:
aval = struct.unpack(_bam_value_type[array_type][1],by)
return [tags,rem.rstrip("\t")]
class BGZF:
# Methods adapted from biopython's bgzf.py
def __init__(self,filename,blockStart=None,innerStart=None):
self.path = filename
self.fh = open(filename,'rb')
if blockStart: self.fh.seek(blockStart)
self._block_start = 0
#self.pointer = 0
#holds block_size and data
self._buffer = self._load_block()
self._buffer_pos = 0
if innerStart: self._buffer_pos = innerStart
def close(self):
self.fh.close()
def get_block_start(self):
return self._block_start
def get_inner_start(self):
return self._buffer_pos
def seek(self,blockStart,innerStart):
self.fh.seek(blockStart)
self._buffer_pos = 0
self._buffer = self._load_block()
self._buffer_pos = innerStart
def read(self,size):
done = 0 #number of bytes that have been read so far
v = ''
while True:
if size-done < len(self._buffer['data']) - self._buffer_pos:
v += self._buffer['data'][self._buffer_pos:self._buffer_pos+(size-done)]
self._buffer_pos += (size-done)
#self.pointer += size
return v
else: # we need more buffer
vpart = self._buffer['data'][self._buffer_pos:]
self._buffer = self._load_block()
v += vpart
self._buffer_pos = 0
if len(self._buffer['data'])==0: return v
done += len(vpart)
def _load_block(self):
#pointer_start = self.fh.tell()
if not self.fh: return {'block_size':0,'data':''}
self._block_start = self.fh.tell()
magic = self.fh.read(4)
if len(magic) < 4:
#print 'end?'
#print len(self.fh.read())
return {'block_size':0,'data':''}
gzip_mod_time, gzip_extra_flags, gzip_os,extra_len = struct.unpack("<LBBH",self.fh.read(8))
pos = 0
block_size = None
#get block_size
while pos < extra_len:
subfield_id = self.fh.read(2)
subfield_len = struct.unpack("<H",self.fh.read(2))[0]
subfield_data = self.fh.read(subfield_len)
pos += subfield_len+4
if subfield_id == 'BC':
block_size = struct.unpack("<H",subfield_data)[0]+1
#block_size is determined
deflate_size = block_size - 1 - extra_len - 19
d = zlib.decompressobj(-15)
data = d.decompress(self.fh.read(deflate_size))+d.flush()
expected_crc = self.fh.read(4)
expected_size = struct.unpack("<I",self.fh.read(4))[0]
if expected_size != len(data):
sys.stderr.write("ERROR unexpected size\n")
sys.exit()
crc = zlib.crc32(data)
if crc < 0: crc = struct.pack("<i",crc)
else: crc = struct.pack("<I",crc)
if crc != expected_crc:
sys.stderr.write("ERROR crc fail\n")
sys.exit()
return {'block_size':block_size, 'data':data}
class SamStream:
# minimum_intron_size greater than zero will only show sam entries with introns (junctions)
# minimum_overhang greater than zero will require some minimal edge support to consider an intron (junction)
def __init__(self,fh=None,minimum_intron_size=0,minimum_overhang=0,reference=None):
self.previous_line = None
self.in_header = True
self._reference = reference
self.minimum_intron_size = minimum_intron_size
self.minimum_overhang = minimum_overhang
if minimum_intron_size <= 0:
self.junction_only = False
else:
self.junction_only = True
self.minimum_intron_size = minimum_intron_size
self._header = None
self.header_text = ''
if fh:
self.fh = fh
self.assign_handle(fh)
self.get_header()
# return a string that is the header
def get_header(self):
if not self._header:
self._header = SAMHeader(self.header_text)
return self._header
return self._header
def set_junction_only(self,mybool=True):
self.junction_only = mybool
def assign_handle(self,fh):
if self.in_header:
while True:
self.previous_line = fh.readline()
if is_header(self.previous_line):
self.header_text += self.previous_line
#self.header.append(self.previous_line)
else:
self.in_header = False
self.previous_line = self.previous_line
break
# make sure our first line is
if self.junction_only:
while True:
if not self.previous_line: break
if is_junction_line(self.previous_line,self.minimum_intron_size,self.minimum_overhang): break
self.previous_line = self.fh.readline()
def __iter__(self):
return self
def next(self):
r = self.read_entry()
if not r:
raise StopIteration
else:
return r
def read_entry(self):
if not self.previous_line: return False
out = self.previous_line
self.previous_line = self.fh.readline()
if self.junction_only:
while True:
if not self.previous_line: break
if is_junction_line(self.previous_line,self.minimum_intron_size,self.minimum_overhang): break
self.previous_line = self.fh.readline()
if out:
s = SAM(out,reference=self._reference)
s.get_range()
return s
return None
def is_junction_line(line,minlen=68,minoverhang=0):
prog = re.compile('([0-9]+)([NMX=])')
f = line.rstrip().split("\t")
v = prog.findall(f[5])
#get the indecies of introns
ns = [i for i in range(0,len(v)) if v[i][1]=='N' and int(v[i][0]) >= minlen]
if len(ns) == 0: return False
if minoverhang==0: return True
good_enough = False
for intron_index in ns:
left = sum([int(x[0]) for x in v[0:intron_index] if x[1] != 'N'])
right = sum([int(x[0]) for x in v[intron_index+1:] if x[1] != 'N'])
worst = min(left,right)
if worst >= minoverhang: good_enough = True
if good_enough: return True
return False
#pre: a flag from a sam file, in integer format
# a bit to convert, given as a hex number ie 0x10
#post: returns true if the flag is set on
def is_header(line):
if re.match('^@',line):
f = line.rstrip().split("\t")
if(len(f) > 9):
return False
return True
return False
class SamtoolsBAMStream(SamStream):
def __init__(self,path,minimum_intron_size=0,minimum_overhang=0,reference=None):
self.previous_line = None
self.in_header = True
self._reference = reference
self.minimum_intron_size = minimum_intron_size
self.minimum_overhang = minimum_overhang
if minimum_intron_size <= 0:
self.junction_only = False
else:
self.junction_only = True
self.minimum_intron_size = minimum_intron_size
self.header_text = ''
self._header = None
self.path = path
cmd = 'samtools view -h '+self.path
self.fh_orig = Popen(cmd.split(),stdout=PIPE)
self.fh = self.fh_orig.stdout
self.assign_handle(self.fh)
self.get_header()
def close(self):
self.fh_orig.communicate()
#def write_index(self,opath,verbose=False):
# BamIndex.write_index(self.path,opath,verbose=verbose,samtools=True)
# #_write_index(self.path,opath,verbose=verbose,samtools=True)
def sort_header(header_text):
#sort the chromosomes in a header text
lines = header_text.rstrip().split("\n")
rlens = {}
for ln in lines:
m = re.match('@SQ\tSN:(\S+)\tLN:(\S+)',ln)
if m:
rlens[m.group(1)] = m.group(2)
output = ''
done_lens = False
for ln in lines:
if re.match('@SQ\tSN:',ln):
if not done_lens:
done_lens = True
for chr in sorted(rlens.keys()):
output += "@SQ\tSN:"+chr+"\tLN:"+str(rlens[chr])+"\n"
else:
output += ln.rstrip("\n")+"\n"
return output
def check_flag(flag,inbit):
if flag & inbit: return True
return False
|
jason-weirather/Au-public
|
iron/pythonlib/Bio/Format/Sam.py
|
Python
|
apache-2.0
| 29,728
|
[
"Biopython"
] |
280839cea3b1c48f639db57149fcc354f1155e6e75e8b36db05b44a954c5598c
|
# - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King ([email protected]
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
# This controller has 3 functions:
# my_questions for reviewing progress on questions you have asked
# my_answers for reviewing your answers
# resovled for reviewing resolved questio
#paper.on('cell:pointerdown',
# function(cellView, evt, x, y) {
# alert('cell view ' + cellView.model.id + ' was clicked');
# }
#);
"""
This controller has 6 functions:
new_event - for creating events
accept_event - when event submitted
my_events - for creating, updating and deleting events
index - for a list of events
eventquery - a loadable query for events - typicaly split by upcoming, future and past
eventbar - a single column list of events for the sidebar
viewevent - the main detailed page on events which will mainly be accessed from event or the sidebars
and load functions
link - Ajax for linking and unlinking questions from events
move - Ajax for moving event questions around
"""
import datetime
from netx2py import getpositions
from ndsfunctions import getwraptext
from jointjs2py import colourcode, textcolour
@auth.requires_login()
def new_event():
#This allows creation of an event
fields = ['event_name', 'locationid', 'startdatetime', 'enddatetime',
'description', 'shared']
form = SQLFORM(db.event, fields=fields, formstyle='table3cols')
form.vars.locationid = db(db.location.location_name =='Unspecified').select(db.location.id, cache=(cache.ram,3600), cacheable=True).first().id
if form.validate():
form.vars.id = db.event.insert(**dict(form.vars))
#response.flash = 'form accepted'
session.event_name = form.vars.id
redirect(URL('accept_event', args=[form.vars.id]))
#redirect(URL('accept_question',args=[form.vars.qtype]))
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form)
def accept_event():
response.flash = "Event Created"
eventid = 0
if len(request.args) > 0:
eventid = request.args(0)
else:
redirect(URL('new_event'))
return dict(eventid=eventid)
@auth.requires_login()
def my_events():
query1 = db.event.owner == auth.user.id
myfilter = dict(event=query1)
grid = SQLFORM.smartgrid(db.event, formstyle=SQLFORM.formstyles.bootstrap3, constraints=myfilter, searchable=False)
return locals()
def index():
if len(request.args):
scope = request.args[0]
else:
scope = 'Unspecified'
#if scope == 'My':
# query = (db.event.auth_userid == auth.user.id)
#else:
query = (db.event.id > 0)
datenow = datetime.datetime.utcnow()
#start_date = end_date - datetime.timedelta(days=8)
#difference_in_days = abs((end_date - start_date).days)
#print difference_in_days
#this fails on gae as too many inequalities
if len(request.args) < 2 or request.args[1] == 'Upcoming':
#query = query & (db.event.startdatetime > datenow) & ((db.event.startdatetime - datenow) < 8.0)
query = (db.event.startdatetime > datenow)
elif request.args[1] == 'Future':
#query = query & (db.event.startdatetime > datenow) & ((db.event.startdatetime - datenow) >= 8.0)
query = (db.event.startdatetime > datenow)
if scope == 'My':
query = (db.event.auth_userid == auth.user.id)
events = db(query).select(db.event.id, db.event.event_name, db.event.description,
db.event.startdatetime, db.event.enddatetime, db.event.locationid, db.event.owner,
orderby=[db.event.startdatetime], cache=(cache.ram, 1200), cacheable=True)
return dict(events=events)
def eventqry():
if len(request.args):
scope = request.args[0]
else:
scope = 'Unspecified'
datenow = datetime.datetime.utcnow()
if len(request.args) < 2 or request.args[1] == 'Upcoming':
#query = query & (db.event.startdatetime > datenow) & ((db.event.startdatetime - datenow) < 8.0)
query = (db.event.startdatetime > datenow)
elif request.args[1] == 'Future':
#query = query & (db.event.startdatetime > datenow) & ((db.event.startdatetime - datenow) >= 8.0)
query = (db.event.startdatetime > datenow)
if scope == 'My':
query = (db.event.auth_userid == auth.user.id)
orderby = [db.event.startdatetime]
events = db(query).select(db.event.id, db.event.event_name, db.event.description,
db.event.startdatetime, db.event.enddatetime, db.event.locationid, db.event.owner,
orderby=orderby, cache=(cache.ram, 1200), cacheable=True)
return dict(events=events)
def eventbar():
datenow = datetime.datetime.utcnow()
#line below fails on gae for some reason and limitby may be fine instead to not get too many
#query = (db.event.startdatetime > datenow) & ((db.event.startdatetime - datenow) < 8.0)
query = (db.event.startdatetime > datenow)
orderby = [db.event.startdatetime]
events = db(query).select(db.event.id, db.event.event_name, db.event.description,
db.event.startdatetime, db.event.enddatetime, db.event.locationid, db.event.owner,
orderby=orderby, cache=(cache.ram, 1200), cacheable=True)
return dict(events=events)
def viewevent():
#This is a non-network view of events - think this will be removed
#just use vieweventmap instead
eventid = 0
if len(request.args):
eventid = int(request.args[0])
else:
redirect(URL('index'))
eventrow = db(db.event.id == eventid).select(db.event.id, db.event.event_name, db.event.description,
db.event.startdatetime,
db.event.enddatetime, db.event.locationid,
db.event.owner, db.event.shared, cache=(cache.ram, 1200),
cacheable=True).first()
session.eventid = eventid
return dict(eventrow=eventrow, eventid=eventid)
@auth.requires_login()
def eventaddquests():
#Think this is a non-network view of events
page = 0
eventid = 0
if len(request.args):
eventid = int(request.args[0])
if len(request.args) > 1:
page = int(request.args[1])
else:
redirect(URL('index'))
eventrow = db(db.event.id == eventid).select(db.event.id, db.event.event_name, db.event.description,
db.event.startdatetime,
db.event.enddatetime, db.event.locationid,
db.event.owner, db.event.shared).first()
session.event_name = eventrow.event_name
unspecevent = db(db.event.event_name == 'Unspecified').select(db.event.id).first().id
#Plan then would be to list some sort of items not in the event - think this will be a full view on a page
#for now as will be easier to manage the view and the enquiry types probably also want a remove questions from
#event option - both will be an ajax call I think for now with return to a div
query = (db.question.eventid == eventid) & (db.question.qtype == 'quest')
sortby = ~db.question.createdate
items_per_page = 20
limitby = (page * items_per_page, (page + 1) * items_per_page + 1)
quests = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext, db.question.priority,
orderby=sortby, limitby=limitby)
query = (db.question.eventid == eventid) & (db.question.qtype == 'action')
actions = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext,
orderby=sortby, limitby=limitby)
query = (db.question.eventid == unspecevent) & (db.question.qtype == 'quest')
othquests = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext, db.question.priority,
orderby=sortby, limitby=limitby)
query = (db.question.eventid == unspecevent) & (db.question.qtype == 'action')
othactions = db(query).select(
db.question.id, db.question.status, db.question.questiontext, db.question.duedate,
db.question.responsible, db.question.priority, db.question.achieved, db.question.level,
db.question.correctanstext, db.question.numagree, db.question.numdisagree,
db.question.activescope, db.question.category, db.question.continent,
db.question.country, db.question.subdivision, db.question.scopetext,
orderby=sortby, limitby=limitby)
return dict(eventrow=eventrow, eventid=eventid, quests=quests, actions=actions, othquests=othquests,
othactions=othactions,
page=page, items_per_page=items_per_page)
def vieweventmap():
#This now has a load option and works fine when events are setup - however the redirect is a problem if no events
#as then loads with another layout html and thing fails badly possibly better to change to just return message if
#no selection for now
grwidth = 800
grheight = 600
FIXWIDTH = 800
FIXHEIGHT = 600
resultstring = ''
gotevent=True
if len(request.args) and int(request.args[0]) > 0:
eventid = int(request.args[0])
else:
datenow = datetime.datetime.utcnow()
#query = (db.event.startdatetime > datenow) & (db.event.event_name != 'Unspecified') fails on gae 2 inequalities
query = (db.event.startdatetime > datenow)
events = db(query).select(db.event.id, orderby=[db.event.startdatetime]).first()
if events:
eventid = events.id
else:
response.view = 'noevent.load'
return dict(resultstring='No Event')
if len(request.args) > 2:
grwidth = int(request.args[1])
grheight = int(request.args[2])
eventrow = db(db.event.id == eventid).select().first()
eventmap = db(db.eventmap.eventid == eventid).select()
query = db.question.eventid == eventid
# quests=db(db.question.id.belongs([4,8,10])).select(db.question.id, db.question.questiontext,
# db.question.correctanstext, db.question.status, db.question.level)
quests = db(query).select(db.question.id, db.question.questiontext, db.question.correctanstext, db.question.status,
db.question.level, db.question.qtype, db.question.category, db.question.priority,
cache=(cache.ram, 120), cacheable=True)
questlist = [x.id for x in quests]
if not questlist:
response.view = 'noevent.load'
return dict(resultstring='No Event')
parentlist = questlist
childlist = questlist
#removed for gae for now
#intquery = (db.questlink.targetid.belongs(questlist)) & (db.questlink.status == 'Active') & (
#db.questlink.sourceid.belongs(questlist))
#this fails on gae as two inequalities
#intlinks = db(intquery).select(db.questlink.id, db.questlink.sourceid, db.questlink.targetid,
# db.questlink.createcount, db.questlink.deletecount)
intquery = (db.questlink.status == 'Active') & (db.questlink.sourceid.belongs(questlist))
intlinks = db(intquery).select(db.questlink.id, db.questlink.sourceid, db.questlink.targetid,
db.questlink.createcount, db.questlink.deletecount,cache=(cache.ram, 120), cacheable=True)
links = [x.sourceid for x in intlinks]
if links:
linklist = [(x.sourceid, x.targetid) for x in intlinks]
else:
linklist = []
# idea is to put the event as a node at the top of the graph so may offset everything else by say
# 200 and leave that space - however first question if it exists should be at a fixed position - but lets add that
# later given query doesn't seem to work may be better to add the event as a node - however that causes some issues
# as well let's remove the ports as well for now on this I think in the view and see how that goes
# ok so now got the question but need to get the list of links as well to draw the graph -
# same approach with a rows object
# this whole first question piece doesn't appear to work lets revert to std for now and not really setting first
# question either for now - spring weights might be more important in due course
if not eventmap and quests:
nodepositions = getpositions(questlist, linklist)
#think we insert them into the eventmap here and then run the query and may need to re-run if get wrong
#number because of gae
for key in nodepositions:
recid = db.eventmap.insert(eventid=eventid, questid=key, xpos=(nodepositions[key][0] * FIXWIDTH), ypos=(nodepositions[key][1] * FIXHEIGHT))
#Make sure everything picked up
eventmap = db(db.eventmap.eventid == eventid).select()
#so could then emerge here always with an eventmap established (probably as a dictionary rather than node positions
if eventmap is None:
redirect(URL('index'))
#thinking about doing a similar thing for parent child view - but not sure that's practical
#insert from viewquest to go through - so this may be made into a separate routine
questmap = {}
qlink = {}
keys = '['
for x in quests:
if x['qtype'] == 'action':
width = 200
height = 140
wraplength = 30
else:
width = 160
height = 200
wraplength = 25
qtext = getwraptext(x.questiontext, x.correctanstext, wraplength)
rectcolour = colourcode(x.qtype, x.status, x.priority)
colourtext = textcolour(x.qtype, x.status, x.priority)
strobj = 'Nod' + str(x.id)
#questmap[strobj] = [nodepositions[x.id][0] * grwidth, 200 + nodepositions[x.id][1] * grheight, qtext,
# rectcolour, 12, 'lr', width, height]
questmap[strobj] = [0, 0, qtext, rectcolour, 12, 'tb', width, height, colourtext]
keys += strobj
keys += ','
if eventmap is not None:
for row in eventmap:
strobj = 'Nod' + str(row.questid)
questmap[strobj][0] = row.xpos
questmap[strobj][1] = row.ypos
#if we have siblings and partners and layout is directionless then may need to look at joining to the best port
#or locating the ports at the best places on the shape - most questions will only have one or two connections
#so two ports may well be enough we just need to figure out where the ports should be and then link to the
#appropriate one think that means iterating through quests and links for each question but can set the
#think we should move back to the idea of an in and out port and then position them possibly by rotation
#on the document - work in progress
#thinking this graph will ultimately NOT use ports as this will be view only and would like html to work
#think link can perhaps be same as std ones once graph created
for x in intlinks:
strlink = 'Lnk' + str(x.id)
strsource = 'Nod' + str(x.sourceid)
strtarget = 'Nod' + str(x.targetid)
if questmap[strtarget][1] > questmap[strsource][1]:
sourceport = 'b'
targetport = 't'
else:
sourceport = 't'
targetport = 'b'
if x.createcount - x.deletecount > 1:
dasharray = False
linethickness = min(3 + x.createcount, 7)
else:
dasharray = True
linethickness = 3
qlink[strlink] = [strsource, strtarget, sourceport, targetport, dasharray, linethickness]
keys += strlink
keys += ','
keys = keys[:-1] + ']'
#This may now be a questmap - will need to come back to fixing the position and adding in the link to the event
session.networklist = [x.id for x in quests]
session.eventid = eventid
return dict(eventrow=eventrow, quests=quests, links=links, resultstring=resultstring, eventmap=eventmap,
questmap=questmap, keys=keys, qlink=qlink, eventid=eventid)
def link():
# This allows linking questions to an event via ajax
eventid = request.args[0]
chquestid = request.args[1]
action = request.args[2]
eventmapexists = 'T' # Change to request.args[3] presently
fixedx = 600
fixedy = 500
if auth.user is None:
responsetext = 'You must be logged in to record agreement or disagreement'
else:
#questrows = db(db.question.id == chquestid).select()
#quest = questrows.first()
quest = db(db.question.id == chquestid).select(db.question.id, db.question.eventid,
db.question.auth_userid).first()
unspecevent = db(db.event.event_name == 'Unspecified').select(db.event.id, cache=(cache.ram, 3600),).first()
#Think about where this is secured - should probably be here
event = db(db.event.id == eventid).select(db.event.id, db.event.event_name, db.event.owner,
db.event.shared).first()
if event.shared or (event.owner == auth.user.id) or (quest.auth_userid == auth.user.id):
if action == 'unlink':
db(db.question.id == chquestid).update(eventid=unspecevent.id)
responsetext = 'Question unlinked'
else:
db(db.question.id == chquestid).update(eventid=eventid)
responsetext = 'Question linked to event'
#Then if there was an eventmap it should require to be linked to
#to the eventmap but if not it shouldn't - this may need to be an arg
if eventmapexists == 'T':
db.eventmap.insert(eventid=eventid, questid=chquestid, xpos=fixedx, ypos=fixedy)
else:
responsetext = 'Not allowed - This event and you are not the owner'
return responsetext
def move():
# This will allow moving the position of questions on an eventmap - but not on a general map at present
# as no obvious way to save them - however think we will comment out the code if not authorised
eventid = request.args[0]
chquestid = request.args[1]
newxpos = request.args[2]
newypos = request.args[3]
questid = int(chquestid[3:])
if auth.user is None:
responsetext = 'You must be logged in to save movements'
else:
#questrows = db(db.question.id == chquestid).select()
#quest = questrows.first()
#eventpos = db(db.eventmap.questid == chquestid).select().first()
event = db(db.event.id == eventid).select().first()
if event.shared or (event.owner == auth.user.id):
db(db.eventmap.questid == questid).update(xpos=newxpos, ypos=newypos)
responsetext = 'Element moved'
else:
responsetext = 'Moves not saved - you must be owner of ' + event.event_name + 'to save changes'
return responsetext
|
NewGlobalStrategy/NetDecisionMaking
|
controllers/event.py
|
Python
|
mit
| 21,172
|
[
"VisIt"
] |
344ae084912904dee568726ba9fe4f719c38573e60a03acaaac67645645080c9
|
# ==============================================================================
# Copyright 2015 The Paragon Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#=====================================================================================================
__main__ = '''
The Paragon framework made in python. c++, prolog, fortran, and C.
External Libraries are not mine, cubecorps, or owned representivavely of any individual
associated with CubeCorps or the Paragon project currently. All respected rights are of the
copyright owner.
Simulated Artificially Intelligent Companion
-= Author: Klaminite & Blue =-
-= Project Name: The Paragon Project =-
About:
Simulates intelligence using external libraries and inside code to parse data,
graph and predict the modeled equation. On top of the neural networks here, we also have an interface.
'''
#=====================================================================================================
__about__ = '''Simulates intelligence using external libraries and inside code to parse data,
graph and predict the modeled equation. On top of the recurrent neural networks, we have perceptron, svm's, bayesian theorum, and a
more.'''
__version__ = '1.3.2'
'''
Initiliaze the system wide variables
here
'''
comm = MPI.COMM_WORLD
rank = comm.Get_rank() #Applies computer ranking for the backend servers
null_error = '//NULL//ERROR'
"""
#debug later
class Intelligence():
'''
All the machine learning goes on here, scipy classifaction, etc.
'''
#variables
x = 0
def lingTran(word):
#=====================
import textblob, nltk
#=====================
'''
Translate any language to english, or to any other
language
'''
def txtSum():
#=====================
import nltk
from textblob import TextBlob
#=====================
trs_message = TextBlob(message)
def science():
None
def sIC():
'''
Still Image Recognizer/classifier using the imagenet model,
trained off of keras, only trains once, and prints form in tuples.
'''
#Not for use with the webcam, although that might be a neat idea
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
class AngularGyrus():
'''
Must call a mathematical function
'''
#variables
x = 0
def Mach_FFT(x):
#Compute a fast fourier transform on a seperate computer to ease loads
x1 = arange(x)
return fft(x1)
def Matr_Det(x):
#Computes the determinate of matrice X
answer = sp.det(x)
return answer
def digitRecon():
'''
useful for when your handwriting becomes sloppy
'''
# Import the modules
import cv2
from sklearn.externals import joblib
from skimage.feature import hog
import numpy as np
# Load the classifier
clf = joblib.load("digits_cls.pkl")
# Read the input image
im = cv2.imread("photo_1.jpg")
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
for rect in rects:
# Draw the rectangles
cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# Calculate the HOG features
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
nbr = clf.predict(np.array([roi_hog_fd], 'float64'))
cv2.putText(im, str(int(nbr[0])), (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.waitKey()
"""
class spn():
'''
This contains all the classes and functions that is utilized by the master node; also used as the "core" of the software.
'''
def main():
#Start by loading all libraries
sys.path.append('./Paragon/Drivers')
print("\033[0;31m[System]" + "\033[0;32m | Importing all modules from system;")
#===============================================================================================
#import all of the needed files here, note they all are imported via importance.
try:
import os, subprocess, signal, pexpect, time, datetime, random, Speech, protocols, pyaudio, pprint, json, nltk, scipy, math, textblob, webbrowser, keras
from Drivers.Speech import SpeechDriver as sr
from pygame import mixer
import yahoo_finance as fc
from time import strftime
import requests, pywapi, feedparser
import tensorflow as tf
import numpy as np
from multiprocessing import Process
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import googlemaps as gmaps
except ImportError:
print("It appears as if you do not have all the packages!")
#===============================================================================================
'''
Load all of the files needed for
basic operation into memory
'''
print("\033[0;31m[System]" + "\033[0;32m | Loading files into memory;")
datafile = json.loads(open('./Paragon/Data/Databases/Data/data.json').read())
'''
After this, we can safely start up the system.
'''
#Globals
#sp.init_printing()
n = 0
word_to_number_mapping = {}
#My client access token.
print("VER: " + __version__)
#Start other processes within the script.
#subprocess.call("ipython3 ./Paragon/Protocols/Pitch.py &", shell=True)
#subprocess.call("ipython3 ./Paragon/Protocols/Pitch.py &", shell=True)
'''
If the webcam isn't already prioritized, then it needs to be set manually, prompting the
user for a password if they aren't dropped to root.
'''
#Checks if there is an external camera, if so, it'll use it.
'''if os.path.isdir("/dev/video1") == False:
subprocess.call("python ./Paragon/Startups/Startup_Extern_Webcam", shell=True)
subprocess.call("python ./Paragon/Protocols/vInter.py &", shell=True)
else:
subprocess.call("python ./Paragon/Protocols/vInter.py &", shell=True)
'''
print(__about__)
print(__main__)
print("//STRT//EVS//GO//VER//" + __version__)
ct = strftime("%I:%M, %p")
rand = ["Hello" + (datafile["Identity"][0]["nameFirst"]) + ", welcome back. The current time is" + repr(ct)] #go ahead and welcome whatever you set this to.
Speech.say(rand,n,mixer)
class start():
'''
The main class, other classes might be related to this or not, really
classes are just used in this program as a case around any other systems or infrastructures.
'''
def Interface():
'''
The audio version, and the primary version of the interface.
'''
doss = os.getcwd()
i=0
n=0
while (i<1):
r = sr.Recognizer()
with sr.Microphone() as source:
audio = r.adjust_for_ambient_noise(source)
n = (n+1)
audio = r.listen(source)
subprocess.call("sensors", shell=True)
'''
This uses the driver that is installed on the system
'''
try:
s = (r.recognize_google(audio))
print(s)
message = (s.lower())
# Paragon's main interface.
'''
Most of where this started was from a rather small github repo, in which I ammased this MONSTER code.
'''
if ('wikipedia') in message:
message = message.replace("wikipedia", "")
message = message.replace(" ", "_")
message = message.capitalize()
proxies = {
}
headers = {
"User-Agent": "Definitions/1.0"
}
params = {
'action':'query',
'prop':'extracts',
'format':'json',
'exintro':1,
'explaintext':1,
'generator':'search',
'gsrseParagonh':message,
'gsrlimit':1,
'continue':''
}
r = requests.get('http://en.wikipedia.org/w/api.php',
params=params,
headers=headers,
proxies=proxies)
json1 = r.json1()
result = list(json1["query"]["pages"].items())[0][1]["extract"]
print(result)
rand = [(result) + '.']
Chrome = ("google-chrome %s")
webbrowser.get(Chrome)
webbrowser.open('https://en.wikipedia.org/wiki/' + message, new=2, autoraise=True)
Speech.say(rand,n,mixer)
if ('goodbye') in message:
rand = ['Goodbye ' + (datafile["Identity"][0]["pronouns"]), 'Paragon powering off']
Speech.say(rand,n,mixer)
break
if ('evening') in message:
rand = ['Good evening ' + (datafile["Identity"][0]["pronouns"])]
Speech.say(rand,n,mixer)
if ('morning') in message:
mTime = time.strftime('%B:%d:%Y')
rand = ['Good morning ' + (datafile["Identity"][0]["pronouns"]) + ', I grabbed the news for,' + mTime]
Chrome = ("google-chrome %s")
Speech.say(rand,n,mixer)
webbrowser.get(Chrome)
webbrowser.open('https://www.sciencenews.org/topic/math-technology', new=2, autoraise=True)
print ('')
if message == ('Paragon'):
rand = ['Yes Sir?', 'What can I, do for you ' + (datafile["Identity"][0]["pronouns"])]
Speech.say(rand,n,mixer)
if ('are we connected') in message:
REMOTE_SERVER = "www.google.com"
Speech.wifi()
rand = ['We are connected']
Speech.say(rand,n,mixer)
if ('.com') in message :
rand = ['Opening' + message]
Chrome = ("google-chrome %s")
Speech.say(rand,n,mixer)
webbrowser.get(Chrome).open('http://www.'+message)
print ('')
if ('.net') in message :
rand = ['Opening' + message]
Chrome = ("google-chrome %s")
Speech.say(rand,n,mixer)
webbrowser.get(Chrome).open('http://www.'+message)
print ('')
if ('.org') in message :
rand = ['Opening' + message]
Chrome = ("google-chrome %s")
Speech.say(rand,n,mixer)
webbrowser.get(Chrome).open('http://www.'+ message)
print ('')
if ('what is the time') in message or ('what time is it') in message or ('can you get me the current time') in message or ('can you tell me the time') in message:
lTime = time.strftime('%I:%M')
rand = ['the time is,' + lTime + ',sir.']
Speech.say(rand,n,mixer)
if ('what day is it') in message or ('what is the date') in message or ('date please') in message:
tDate = time.strftime('%B:%d:%Y')
rand = ['Today is,' + tDate + (datafile["Identity"][0]["pronouns"])]
Speech.say(rand,n,mixer)
if ('Paragon can you get me the weather') in message or ('can you get the weather') in message or ('Paragon weather please') in message or ('weather please') in message:
noaa_result = pywapi.get_weather_from_noaa('KPWT')
rand = ["I've fetched the weather for you." + "It is currently" + noaa_result['weather'] + '\n' + 'Current Temperature is: ' + noaa_result['temp_f'] + 'Degrees.'+ '\n' + 'Information grabbed from' + noaa_result['location']]
Speech.say(rand,n,mixer)
if ('can you get the news') in message or ('get the news please') in message or ('Paragon get the news please') in message:
rand = ['Fetching todays headlines, sir, please wait.']
Speech.say(rand,n,mixer)
time.sleep(5)
d = feedparser.parse('http://rss.nytimes.com/services/xml/rss/nyt/Science.xml')
rand = [d.feed['title'] + d.feed['description']]
Speech.say(rand,n,mixer)
if ('night mode') in message:
rand = ['Ok, sir, turning on your nightmode settings.']
Speech.say(rand,n,mixer)
subprocess.call("xbacklight -time 5000 -set 5", shell=True)
time.sleep(4)
rand = ['Ok sir, night mode is active.']
Speech.say(rand,n,mixer)
if ('day mode') in message:
rand = ['Ok,sir, turning on your daytime settings.']
Speech.say(rand,n,mixer)
subprocess.call("xbacklight -time 5000 -set 100", shell=True)
time.sleep(3)
rand = ['Ok sir, daytime mode is now active.']
Speech.say(rand,n,mixer)
if ('sleep mode') in message:
subprocess.call("xbacklight -time 5000 -set 0", shell=True)
if ('mute computer') in message or ('mute please') in message or ('mute') in message:
subprocess.call("pactl set-sink-mute 2 1", shell=True)
if ('unmute computer') in message or ('unmute please') in message or ('unmute') in message:
subprocess.call("pactl set-sink-mute 2 0", shell=True)
if ('mute all') in message or ('please mute all') in message:
subprocess.call("Scripts/muteall.sh", shell=True)
if ('Paragon log out') in message or ('log off') in message or ('log out protocol') in message or ('initiate logout protocol') in message:
rand = ['Logging out']
Speech.say(rand,n,mixer)
time.sleep(3)
subprocess.call("gnome-session-quit --no-prompt", shell=True)
if ('clean up your folder') in message or ("clean up protocol") in message or ('initiate cleanup protocol') in message:
rand = ['Ok sir, cleaning up my folders.']
Speech.say(rand,n,mixer)
subprocess.call("find . -name './Paragon/*.mp3' -delete", shell=True)
if ('monitor protocol') in message:
rand = ['Monitoring system functions, sir.']
Speech.say(rand,n,mixer)
time.sleep(1)
protocols.monitor_protocol()
if ('where is') in message:
rand = ['Searching for' + message + ', please wait.']
LocSrch_Message = message.replace("where is", "")
Chrome = ("google-chrome %s")
Speech.say(rand,n,mixer)
webbrowser.get(Chrome).open('http://www.'+ message)
if ('what is a') in message or ("what is an") in message:
if "an" in message:
message = message.replace("an ","")
if "a" in message:
message = message.replace("a ","")
spoken_def = Word(x).definitions
colist = str(len(spoken_def))
rand = ['Sir, there are ' + colist + 'entries, reading the first one: ' + spoken_def]
webbrowser.get(Chrome)
webbrowser.open("http://www.dictionary.com/browse/" + message)
if ('medical') in message:
#Searches the entire medical dictionary for a term of definition
term = message.replace("medical","")
import nltk.corpus
medical = open('./Paragon/Data/Databases/Medical_Dictionary.txt')
#Converts the text file into an actual corpus, the reason it isn't converted or loaded above,
#is because it would consume to many resources if it were constantly loaded.
text = medical.read()
text1 = text.split()
conc_term = nltk.corpus.nltk.Text(text1)
med_term = conc_term.concordance(term)
rand = [med_term]
Speech.say(rand,n,mixer)
if ('stock opening') in message:
message = message.replace("stock opening", "")
#Search the stock database for the given company name
df[df['Name'].str.contains(message)]
#Further break down the stock table, and find the first hit.
TableDi = x.iloc[-1]['Symbol']
#Now it's easy sailing.
TableConv = stocks.Share(TableDi)
rand = ['The opening value of' + message + 'is: ' + TableConv.get_open()]
Speech.say(rand,n,mixer)
if ('stock price') in message:
message = message.replace("stock price", "")
#Saearh the stock database for the given company name
df[df['Name'].str.contains(message)]
#Further break down the stock table, and find the first hit.
TableDi = x.iloc[-1]['Symbol']
#Now it's easy sailing. x2
TableConv = stocks.Share(TableDi)
rand = ['The price value of' + message + 'is: ' + TableConv.get_price()]
Speech.say(rand,n,mixer)
if ('image scan') or ('scan this image') or ('scan image') in message:
#Send start command to the still image recognizer class
Intelligence.sIC()
wrdl1 = ['what city', 'address']
if wrdll in message
client = googlemaps.Client(key='register for a google key')
locaord = googlemaps.geolocation.geolocate(client, consider_ip=True)
lat = (locaord['location']['lat']);lng = (locaord['location']['lng'])
rgr = gmaps.reverse_geocode((lat, lng))
#Handle the index out of range error, since it will be thrown if there is not exactly
#ten completed iterations
try:
#There may be more then 10 results to returned, but the chances
#of them containing the correct result past that point is very low, the machine doesn't
#need to be concerned with accounting for them at that point.
for i in range(10):
locsay = rgr[2]['address _components'][i]['long_name'])
rand = [locsay]
Speech.say(rand,n,mixer)
except IndexError:
print("")
else:
print(null_error)
#write message to a text file
#have the computer read that text file by checking for updated files, either by using time sleep and forcing an updated
#print that file readout here
#repeat using the else method
#exceptions
except (KeyboardInterrupt,SystemExit):
print("Goodbye, Paragon powering down now")
break
except sr.UnknownValueError:
print("error")
except sr.RequestError as e:
print("Error, no internet found.")
if __name__ == '__main__':
start.Interface()
if __name__ == '__main__':
spn.main()
|
Klaminite1337/Paragon
|
WindowsBoot.py
|
Python
|
mit
| 25,298
|
[
"Gaussian"
] |
30c3863ec1796fc688e68739caff31c6ba4ddb059f2d481a1cb63a2808637ab4
|
#!/usr/bin/python
# Code to read in years of hourly high resolution ERA5 t, td and land_sea_mask data
# Converts to q, RH, e, tw and DPD
# Aggregates to daily average
# Regrids to 1by1 degree and shifts to -179.5 ot 179.5 and 180 lats from 89.5 to -89.5 (was 181 lats!!!)
# Outputs as netCDF
# Later code will read in and convert to pentads, monthlies, anomalies etc and combine to make complete record or
# append the latest year
# This can also spin through all years or cope with annual updates
#*******************************************
# START
#*******************************************
import os
import datetime as dt
import calendar
import numpy as np
import sys
import time
import pdb
import iris
import iris.coord_categorisation
from iris.coords import DimCoord
from iris.cube import Cube
import cf_units
import CalcHums as ch
#import utils
#sys.path.append('/data/users/rdunn/reanalyses/code/era5/cdsapi-0.1.4')
#sys.path.append('/data/users/hadkw/WORKING_HADISDH/UPDATE2019/PROGS/PYTHON/cdsapi-0.1.4')
sys.path.append('/home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/cdsapi-0.1.4')
import cdsapi
# Set up directory
#DataLoc = '/data/users/hadkw/WORKING_HADISDH/UPDATE2019/OTHERDATA/ERA5/'
DataLoc = '/scratch/hadkw/UPDATE2020/OTHERDATA/ERA5/'
print(DataLoc)
"""
Butchered from
http://fcm1.metoffice.com/projects/utils/browser/CM_ML/trunk/NAO_Precip_Regr/get_era5_uwind.py
and /data/users/rdunn/reanalysis/era5/get_era.py
"""
#****************************************
def retrieve(year, variable, month, ndays):
'''
Use ECMWF API to get the data
4.5GB per month --> 55GB per year, 50mins per month of processing
'''
if variable == "2m_temperature":
varlist = ["2m_temperature"]
# varlist = ["2m_temperature", "land_sea_mask"] # if you want to download both at once
elif variable == "2m_dewpoint_temperature":
varlist = ["2m_dewpoint_temperature"]
# varlist = ["2m_dewpoint_temperature", "land_sea_mask"]
elif variable == "surface_pressure":
varlist = ["surface_pressure"]
elif variable == "land_sea_mask":
varlist = ["land_sea_mask"]
else:
print("please provide correct variable to download")
return
days = ["{:2d}".format(d+1) for d in range(ndays)]
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type':'reanalysis',
'format':'netcdf',
'variable':varlist,
'year':"{}".format(year),
'month':"{:02d}".format(month),
'day':days,
'time':[
'00:00','01:00','02:00',
'03:00','04:00','05:00',
'06:00','07:00','08:00',
'09:00','10:00','11:00',
'12:00','13:00','14:00',
'15:00','16:00','17:00',
'18:00','19:00','20:00',
'21:00','22:00','23:00',
]
},
os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))
)
# time.sleep(5) # to allow any writing process to finish up.
# # make a "success" file
# with open(os.path.join(DataLoc, "{}{:02d}_{}_success.txt".format(year, month, variable)), "w") as outfile:
#
# outfile.write("Success {}".format(dt.datetime.now()))
return # retreive
#****************************************
def check_files(year, variable, month, ndays):
''' This reads in the t, td and p files and checks for full download '''
''' If the last hour field has identical values for every lat and lon box then it has failed '''
''' A failed file is removed and program aborts '''
''' Program will need to be restarted '''
action = 'contrinue' # output if file is ok
test_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
# convert from list to cube
test_cube = test_cube[0]
test_data = test_cube.data[-1,:,:]
# Are all values the same - np.unique() has a length of 1 if so
if (len(np.unique(test_cube.data[-1,:,:])) == 1):
# remove failed files
os.remove(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
action = 'retrieve' # either retry download or exit code depending on stage in process
## exit the program
#sys.exit('Incomplete file download')
return action
#****************************************
def convert(year, month, ndays, remove=False):
"""
Now need to:
- convert to q, RH , e, tw, DPD
- aggregate to daily averages
- regrid to 1by1 gridboxes
"""
MDI = -999.
# Set up null_cube with desired gridding format to use as a template
# Does this have to have the same time dimensions?
# ndays = np.int(p_cube.data[:,0,0] / 24)
time = DimCoord(np.arange(ndays*24),
standard_name = 'time',
units = 'hours')
latitude = DimCoord(np.linspace(89.5, -89.5, 180),
# latitude = DimCoord(np.linspace(90, -90, 181),
standard_name = 'latitude',
long_name = 'gridbox centre latitude',
units = 'degrees_north')
longitude = DimCoord(np.linspace(-179.5, 179.5, 360),
# longitude = DimCoord(np.linspace(0, 359, 360),
standard_name='longitude',
long_name = 'gridbox centre longitude',
units = 'degrees_east')
null_cube = Cube(np.zeros((ndays*24, 180, 360), np.float32),
dim_coords_and_dims=[(time, 0),
(latitude, 1),
(longitude, 2)])
print('Check null_cube for new grid')
# pdb.set_trace()
## START OF LSM************************************************
# # read in land_sea_mask
# variable = "land_sea_mask"
# lsm_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
# #pdb.set_trace()
# # convert from list to cube
# lsm_cube = lsm_cube[0]#
#
## REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster
# regridder = iris.analysis.Linear().regridder(lsm_cube, null_cube)
# lsm_cube_1by1 = regridder(lsm_cube)
# print('Check lsm_cube_1by1 for new grid')
## pdb.set_trace()#
#
# # remove old cube
# lsm_cube = 0
#
# lsm_cube_1by1 = lsm_cube_1by1[0,:,:]
## lsm_cube_1by1_field = lsm_cube_1by1.extract(iris.Constraint(time=0))
# lsm_cube_1by1.units = "1"
# print(lsm_cube_1by1)
# print('Check lsm_cube_1by1 for 2m_temperature')
# #pdb.set_trace()
#
## output
# iris.save(lsm_cube_1by1, os.path.join(DataLoc, "{}{:02d}_{}.nc".format(year, month, variable)), zlib=True)
# print('Check lsm_cube_1by1 output')
# pdb.set_trace()
## END OF LSM************************************************************
# read in t, td and sp (may be VERY LARGE
variable = "2m_temperature"
t_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
#pdb.set_trace()
# convert from list to cube
t_cube = t_cube[0]
# REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster
regridder = iris.analysis.Linear().regridder(t_cube, null_cube)
t_cube_1by1 = regridder(t_cube)
print('Check t_cube_1by1 for new grid')
# pdb.set_trace()
# remove old cube
t_cube = 0
t_cube_1by1.data -= 273.15 # convert to C
t_cube_1by1.units = "degreesC"
print('Check t_cube_1by1 for 2m_temperature')
#pdb.set_trace()
variable = "2m_dewpoint_temperature"
td_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
# convert from list to cube
td_cube = td_cube[0]
# REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster
td_cube_1by1 = regridder(td_cube)
print('Check td_cube_1by1 for new grid')
# pdb.set_trace()
# remove old cube
td_cube = 0
td_cube_1by1.data -= 273.15 # convert to C
td_cube_1by1.units = "degreesC"
print('Check td_cube_1by1 for 2m_dewpoint_temperature')
# pdb.set_trace()
variable = "surface_pressure"
p_cube = iris.load(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
# convert from list to cube
p_cube = p_cube[0]
# REgrid to 1by1 degree - cash the source, template, gridding type for later use - faster
p_cube_1by1 = regridder(p_cube)
print('Check p_cube_1by1 for new grid')
# pdb.set_trace()
# remove old cube
p_cube = 0
p_cube_1by1.data /= 100. # convert to C
p_cube_1by1.units = "hPa"
print('Check p_cube_1by1 for surface_pressure')
# pdb.set_trace()
# # if it contains 2 cubes where we have downloaded mask and wish to mask to land or sea....
# if len(p_cubelist) == 2:
# # extract both cubes
# pcube1 = p_cubelist[0]
# pcube2 = p_cubelist[1]#
#
# masked1, = np.where(pcube1.data.mask[:, 0, 0] == True)
# masked2, = np.where(pcube2.data.mask[:, 0, 0] == True)
#
# # use locations of masks to overwrite
# tp_cube = pcube1[:]
# tp_cube.data[masked1] = pcube2.data[masked1]
# tp_cube.var_name = "tp"
#
# # else it's just a single cube, so easier to deal with
# elif len(p_cubelist) == 1:#
#
# tp_cube = p_cubelist[0]
# tp_cube.var_name = "tp"
# No masking internally within this code...
# Process q
# Copy the t_cube and then change some of the fields?
variable = 'specific_humidity'
q_cube = t_cube_1by1.copy()
q_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer
q_cube.units = cf_units.Unit("g kg-2")
q_cube.var_name = "q2m"
q_cube.long_name = "2 metre specific humidity"
# Populate the q data
q_cube.data = ch.sh(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False)
print('Check q_cube for new data')
# pdb.set_trace()
## mask all regions which are 100% ocean
#cube.data[lsm.data == 0] = utils.MDI
#cube.data = np.ma.masked_where(lsm.data == 0, cube.data)
#cube.data.fill_value = utils.MDI
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(q_cube, "time", name="day_of_month")
q_cube_day = q_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN)
q_cube = 0
q_cube_day.remove_coord("day_of_month")
q_cube_day.units = cf_units.Unit("g kg-2")
print('Check q_cube for daily averages')
# pdb.set_trace()
# output
iris.save(q_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
q_cube_day=0
print('Check q_cube_1by1 output')
# pdb.set_trace()
# Process RH
# Copy the t_cube and then change some of the fields?
variable = 'relative_humidity'
rh_cube = t_cube_1by1.copy()
rh_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer
rh_cube.units = cf_units.Unit("%")
rh_cube.var_name = "rh2m"
rh_cube.long_name = "2 metre relative humidity"
# Populate the q data
rh_cube.data = ch.rh(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False)
print('Check rh_cube for new data')
# pdb.set_trace()
## mask all regions which are 100% ocean
#cube.data[lsm.data == 0] = utils.MDI
#cube.data = np.ma.masked_where(lsm.data == 0, cube.data)
#cube.data.fill_value = utils.MDI
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(rh_cube, "time", name="day_of_month")
rh_cube_day = rh_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN)
rh_cube = 0
rh_cube_day.remove_coord("day_of_month")
rh_cube_day.units = cf_units.Unit("%")
print('Check rh_cube for daily averages')
# pdb.set_trace()
# output
iris.save(rh_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
rh_cube_day=0
print('Check rh_cube_1by1 output')
#pdb.set_trace()
# Process e
# Copy the t_cube and then change some of the fields?
variable = 'vapour_pressure'
e_cube = t_cube_1by1.copy()
e_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer
e_cube.units = cf_units.Unit("hPa")
e_cube.var_name = "e2m"
e_cube.long_name = "2 metre vapour pressure"
# Populate the q data
e_cube.data = ch.vap(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False)
print('Check e_cube for new data')
# pdb.set_trace()
## mask all regions which are 100% ocean
#cube.data[lsm.data == 0] = utils.MDI
#cube.data = np.ma.masked_where(lsm.data == 0, cube.data)
#cube.data.fill_value = utils.MDI
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(e_cube, "time", name="day_of_month")
e_cube_day = e_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN)
e_cube = 0
e_cube_day.remove_coord("day_of_month")
e_cube_day.units = cf_units.Unit("hPa")
print('Check e_cube for daily averages')
# pdb.set_trace()
# output
iris.save(e_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
e_cube_day=0
print('Check e_cube_1by1 output')
# pdb.set_trace()
# Process tw
# Copy the t_cube and then change some of the fields?
variable = 'wetbulb_temperature'
tw_cube = t_cube_1by1.copy()
tw_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer
tw_cube.units = cf_units.Unit("degrees C")
tw_cube.var_name = "tw2m"
tw_cube.long_name = "2 metre wetbulb temperature"
# Populate the q data
tw_cube.data = ch.wb(td_cube_1by1.data,t_cube_1by1.data,p_cube_1by1.data,roundit=False)
print('Check tw_cube for new data')
# pdb.set_trace()
## mask all regions which are 100% ocean
#cube.data[lsm.data == 0] = utils.MDI
#cube.data = np.ma.masked_where(lsm.data == 0, cube.data)
#cube.data.fill_value = utils.MDI
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(tw_cube, "time", name="day_of_month")
tw_cube_day = tw_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN)
tw_cube = 0
tw_cube_day.remove_coord("day_of_month")
tw_cube_day.units = cf_units.Unit("degrees C")
print('Check tw_cube for daily averages')
# pdb.set_trace()
# output
iris.save(tw_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
tw_cube_day=0
print('Check tw_cube_1by1 output')
# pdb.set_trace()
# Process dpd
# Copy the t_cube and then change some of the fields?
variable = 'dewpoint_depression'
dpd_cube = t_cube_1by1.copy()
dpd_cube.fill_value = MDI # not sure whether we're doing -999 yet if saving as integer
dpd_cube.units = cf_units.Unit("degrees C")
dpd_cube.var_name = "dpd2m"
dpd_cube.long_name = "2 metre dewpoint depression"
# Populate the q data
dpd_cube.data = ch.dpd(td_cube_1by1.data,t_cube_1by1.data,roundit=False)
print('Check dpd_cube for new data')
# pdb.set_trace()
## mask all regions which are 100% ocean
#cube.data[lsm.data == 0] = utils.MDI
#cube.data = np.ma.masked_where(lsm.data == 0, cube.data)
#cube.data.fill_value = utils.MDI
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(dpd_cube, "time", name="day_of_month")
dpd_cube_day = dpd_cube.aggregated_by(["day_of_month"], iris.analysis.MEAN)
dpd_cube = 0
dpd_cube_day.remove_coord("day_of_month")
dpd_cube_day.units = cf_units.Unit("degrees C")
print('Check dpd_cube for daily averages')
# pdb.set_trace()
# output
iris.save(dpd_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
dpd_cube_day=0
print('Check dpd_cube_1by1 output')
# pdb.set_trace()
# Process Td
variable = '2m_dewpoint_temperature'
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(td_cube_1by1, "time", name="day_of_month")
td_cube_day = td_cube_1by1.aggregated_by(["day_of_month"], iris.analysis.MEAN)
td_cube_1by1 = 0
td_cube_day.remove_coord("day_of_month")
td_cube_day.units = cf_units.Unit("degrees C")
td_cube_day.var_name = "td2m"
print('Check td_cube for daily averages')
# pdb.set_trace()
# output
iris.save(td_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
td_cube_day=0
print('Check td_cube_1by1 output')
# pdb.set_trace()
# Process T
variable = '2m_temperature'
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(t_cube_1by1, "time", name="day_of_month")
t_cube_day = t_cube_1by1.aggregated_by(["day_of_month"], iris.analysis.MEAN)
t_cube_1by1 = 0
t_cube_day.remove_coord("day_of_month")
t_cube_day.units = cf_units.Unit("degrees C")
t_cube_day.var_name = "t2m"
print('Check t_cube for daily averages')
# pdb.set_trace()
# output
iris.save(t_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
t_cube_day=0
print('Check t_cube_1by1 output')
# pdb.set_trace()
# Process P
variable = 'surface_pressure'
# Aggregate to daily
# add a "day" indicator to allow aggregation
iris.coord_categorisation.add_day_of_month(p_cube_1by1, "time", name="day_of_month")
p_cube_day = p_cube_1by1.aggregated_by(["day_of_month"], iris.analysis.MEAN)
p_cube_1by1 = 0
p_cube_day.remove_coord("day_of_month")
p_cube_day.units = cf_units.Unit("hPa")
p_cube_day.var_name = "p2m"
print('Check p_cube for daily averages')
# pdb.set_trace()
# output
iris.save(p_cube_day, os.path.join(DataLoc, "{}{:02d}_daily_{}.nc".format(year, month, variable)), zlib=True)
p_cube_day=0
print('Check p_cube_1by1 output')
# pdb.set_trace()
# # append precipitation cube to temperature one
# cubelist += [tp_cube]
# remove input files
if remove:
for variable in ["2m_temperature", "2m_dewpoint_temperature", "surface_pressure"]:
# for variable in ["2m_temperature", "2m_dewpoint_temperature", "surface_pressure", "land_sea_mask"]:
os.remove(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable)))
return # combine
#****************************************
if __name__ == "__main__":
import argparse
# # Set up directory
# DataLoc = '/data/users/hadkw/WORKING_HADISDH/UPDATE2019/OTHERDATA/ERA5/'
# print(DataLoc)
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--start', dest='start', action='store', default=1979, type=int,
help='Start year [1979]')
parser.add_argument('--end', dest='end', action='store', default=2019, type=int,
help='End year [2019]')
parser.add_argument('--remove', dest='remove', action='store_true', default=False,
help='Remove hourly and monthly files, default = False')
args = parser.parse_args()
print(args)
for year in np.arange(args.start, args.end+1):
print(year)
if not os.path.exists(os.path.join(DataLoc, "{}12_daily_surface_pressure.nc".format(year))):
for month in np.arange(1, 13):
print("{} - {}".format(year, month))
# get number of days
ndays = calendar.monthrange(year, month)[1]
if not os.path.exists(os.path.join(DataLoc, "{}{:02d}_daily_surface_pressure.nc".format(year, month))):
for variable in ["2m_temperature", "2m_dewpoint_temperature", "surface_pressure"]:
# If there is a file present then check it first
action = 'retrieve' # instruction as to whether to retrieve (exit code if second try doesn't work), not bother (carry on)
if os.path.exists(os.path.join(DataLoc, "{}{:02d}_hourly_{}.nc".format(year, month, variable))):
print("{} - {} - {} already downloaded so testing".format(year, month, variable))
action = check_files(year, variable, month, ndays)
print('Code to', action)
if (action == 'retrieve'):
retrieve(year, variable, month, ndays)
action = 'continue'
action = check_files(year, variable, month, ndays)
if (action == 'retrieve'): # then the download has failed for some reason so stop the code and restart
sys.exit('Failed download from CDS')
convert(year, month, ndays, remove = args.remove)
else:
print("{} - {} already downloaded".format(year, month))
else:
print("{} already downloaded".format(year))
#*******************************************
# END
#*******************************************
|
Kate-Willett/Climate_Explorer
|
PYTHON/get_era5.py
|
Python
|
cc0-1.0
| 21,719
|
[
"NetCDF"
] |
d714684b152e01bd7850ecf52a83730cf4b73bf3c19c89524097f3cf1253bab0
|
import sys, string
import export
import math
import random
import copy
import os
import os.path
import unique
R_present=True
try:
### If file is present use this location
loc = unique.filepath('Config/R_location.txt')
s = open(loc,'r')
useStaticLocation=s.read()
#print useStaticLocation
#print 'Using the Config designated location'
except Exception:
#print 'NOT using the Config designated location'
useStaticLocation = False
try:
forceError ### This doesn't currently work with the compiled version of AltAnalyz
import rpy2.robjects as robjects
r = robjects.r
print "\n---------Using RPY2---------\n"
except Exception:
from pyper import *
#print "\n---------Using PypeR---------\n"
### Running the wrong one once is fine, but multiple times causes it to stall in a single session
try:
try:
if 'Xdarwin' in sys.platform:
#print 'Using AltAnalyze local version of R'
#print 'A'
path = unique.filepath("AltDatabase/tools/R/Mac/R")
r = R(RCMD=path,use_numpy=True)
elif os.name == 'nt':
path = unique.filepath("AltDatabase/tools/R/PC/bin/x64/R.exe")
r = R(RCMD=path,use_numpy=True)
else:
#print 'B'
if useStaticLocation == False or useStaticLocation=='no':
print 'NOT using static location'
r = R(use_numpy=True)
else:
print 'Using static location'
path = '/usr/local/bin/R'
if os.path.exists(path): pass
else:
path = '/usr/bin/R'
if os.path.exists(path):
print 'Using the R path:',path
r = R(RCMD=path,use_numpy=True)
else:
r = None
R_present=False
print 'R does not appear to be installed... Please install first.'
except Exception:
#print 'C'
r = R(use_numpy=True)
except Exception:
#print traceback.format_exc()
r = None
R_present=False
pass
LegacyMode = True
### Create a Directory for R packages in the AltAnalyze program directory (in non-existant)
r_package_path = string.replace(os.getcwd()+'/Config/R','\\','/') ### R doesn't link \\
r_package_path = unique.filepath(r_package_path) ### Remove the AltAnalyze.app location
try: os.mkdir(r_package_path)
except Exception: None
if R_present:
### Set an R-package installation path
command = '.libPaths("'+r_package_path+'")'; r(command) ### doesn't work with %s for some reason
#print_out = r('.libPaths()');print print_out; sys.exit()
def remoteMonocle(input_file,expPercent,pval,numGroups):
#input_file="Altanalyze"
setWorkingDirectory(findParentDir(input_file)[:-1])
try: os.mkdir(findParentDir(input_file)[:-1])
except Exception: None
z = RScripts(input_file)
setWorkingDirectory(input_file)
z.Monocle(input_file,expPercent,pval,numGroups)
def remoteHopach(input_file,cluster_method,metric_gene,metric_array):
""" Run Hopach via a call from an external clustering and visualizaiton module """
#input_file = input_file[1:] #not sure why, but the '\' needs to be there while reading initally but not while accessing the file late
force_array = ''
force_gene = ''
row_order = []
column_order = []
input_file = checkForDuplicateIDs(input_file) ### Duplicate IDs will cause R to exit when creating the data matrix
z = RScripts(input_file)
setWorkingDirectory(input_file)
z.Hopach(cluster_method,metric_gene,force_gene,metric_array,force_array)
if cluster_method == 'both' or cluster_method == 'gene':
filename = findParentDir(input_file)+'/hopach/rows.'+findFileName(input_file)
row_order = importHopachOutput(filename)
if cluster_method == 'both' or cluster_method == 'array':
filename = findParentDir(input_file)+'/hopach/columns.'+findFileName(input_file)
column_order = importHopachOutput(filename)
#print row_order; sys.exit()
return input_file, row_order, column_order
def remoteAffyNormalization(input_file,normalization_method,probe_level,batch_effects):
### Input file is the path of the expression output from normalization
setWorkingDirectory(findParentDir(input_file)[:-1])
try: os.mkdir(findParentDir(input_file)[:-1])
except Exception: None #Already exists
z = RScripts(input_file)
z.AffyNormalization(normalization_method,probe_level,batch_effects)
def checkForDuplicateIDs(input_file):
first_row = True
key_db={}
key_list=[]
fn=filepath(input_file)
offset=0
nonNumericsPresent=False
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if ('row_clusters-flat' in t and 'row_clusters-flat' not in t[0]):
headers = string.join(['uid']+t[2:],'\t')+'\n'
offset = 1
elif '-filtered.txt' in fn and ".R2." in t[1] and LegacyMode:
headers = string.join(['uid']+t[2:],'\t')+'\n'
offset = 1
else:
headers = line
first_row = False
else:
key = t[0]
try:
k1,k2string.split(key,' ')
print [k1, k2],
if k1==k2: key = k1
print key
except Exception: pass
if key!='column_clusters-flat':
key_list.append(key)
try: s = map(float,t[offset+1:])
except Exception:
nonNumericsPresent=True
key_db[key]=t
if nonNumericsPresent:
import numpy
for key in key_db:
t = key_db[key]
s=[key]
if offset ==1: s.append('')
temp=[]
for value in t[offset+1:]:
try: temp.append(float(value))
except Exception: pass
avg=numpy.mean(temp)
for value in t[offset+1:]:
try: s.append(str(float(value)-avg))
except Exception: s.append('0.000101')
key_db[key]=s
if len(key_db) != len(key_list) or offset>0 or nonNumericsPresent:
print 'Writing a cleaned-up version of the input file:'
### Duplicate IDs present
input_file = input_file[:-4]+'-clean.txt'
export_text = export.ExportFile(input_file) ### create a new input file
export_text.write(headers) ### Header is the same for each file
for key in key_db:
t = key_db[key]
if offset > 0:
t = [t[0]]+t[1+offset:]
export_text.write(string.join(t,'\t')+'\n') ### Write z-score values and row names
export_text.close()
print 'File written...'
return input_file
def importHopachOutput(filename):
print filename
""" Import the ID order information """
db={} ### Used to store the cluster data
hopach_clusters=[]
cluster_level=[]
cluster_level2=[]
cluster_level3=[]
hopach_db={}
cluster_db={}
level2_level1={}
firstLine = True
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if firstLine: firstLine = False
else:
t = string.split(data,'\t')
final_level_order = int(t[-1])
index, uid, cluster_number, cluster_label, cluster_level_order, final_label, final_level_order = string.split(data,'\t')
try: l2 = str(int(round(float(cluster_label),0)))[:2]
except Exception: l2 = int(cluster_label[0])
try: l3 = str(int(round(float(cluster_label),0)))[:3]
except Exception: l3 = int(cluster_label[0])
hopach_clusters.append((int(final_level_order),int(index)-1)) ### Need to order according to the original index, sorted by the clustered order
cluster_level.append(int(cluster_label[0])) ### This is the root cluster number
cluster_level2.append(l2) ### Additional cluster levels
cluster_level3.append(l3)
hopach_db[uid] = cluster_label
level2_level1[l2] = int(cluster_label[0])
level2_level1[l3] = int(cluster_label[0])
try: cluster_db[int(float(cluster_label[0]))].append(uid)
except Exception: cluster_db[int(cluster_label[0])] = [uid]
try: cluster_db[l2].append(uid)
except Exception: cluster_db[l2] = [uid]
try: cluster_db[l3].append(uid)
except Exception: cluster_db[l3] = [uid]
split_cluster=[]
if 'column' in fn:
cluster_limit = 50 ### typically less columns than rows
else:
cluster_limit = 75
for cluster in cluster_db:
#print cluster,len(cluster_db[cluster]),(float(len(cluster_db[cluster]))/len(hopach_db))
if len(cluster_db[cluster])>cluster_limit and (float(len(cluster_db[cluster]))/len(hopach_db))>0.2:
#print cluster
if cluster<10:
split_cluster.append(cluster)
import unique
levels1 = unique.unique(cluster_level)
already_split={}
updated_indexes={}
if len(split_cluster)>0:
print 'Splitting large hopach clusters:',split_cluster
i=0
for l2 in cluster_level2:
l1 = level2_level1[l2]
if l1 in split_cluster:
cluster_level[i] = l2
try:
l2_db = already_split[l1]
l2_db[l2]=[]
except Exception: already_split[l1] = {l2:[]}
i+=1
### Check and see if the l1 was split or not (might need 3 levels)
i=0
for l3 in cluster_level3:
l1 = level2_level1[l3]
if l1 in already_split:
#l1_members = len(cluster_db[l1])
l2_members = len(already_split[l1])
#print l1, l3, l1_members, l2_members
if l2_members == 1: ### Thus, not split
cluster_level[i] = l3
#print l1, l3, 'split'
i+=1
else:
if len(cluster_level) > 50: ### Decide to use different hopach levels
if len(levels1)<3:
cluster_level = cluster_level2
if len(cluster_level) > 200:
if len(levels1)<4:
cluster_level = cluster_level2
hopach_clusters.sort()
hopach_clusters = map(lambda x: x[1], hopach_clusters) ### Store the original file indexes in order based the cluster final order
### Change the cluster_levels from non-integers to integers for ICGS comparison group simplicity and better coloring of the color bar
cluster_level2 = []
### Rename the sorted cluster IDs as integers
cluster_level_sort = []
for i in cluster_level:
if str(i) not in cluster_level_sort:
cluster_level_sort.append(str(i))
cluster_level2.append(str(i))
cluster_level_sort.sort()
cluster_level = cluster_level2
cluster_level2=[]
i=1; cluster_conversion={}
for c in cluster_level_sort:
cluster_conversion[str(c)] = str(i)
i+=1
for c in cluster_level:
cluster_level2.append(cluster_conversion[c])
#print string.join(map(str,cluster_level2),'\t');sys.exit()
db['leaves'] = hopach_clusters ### This mimics Scipy's cluster output data structure
db['level'] = cluster_level2
return db
class RScripts:
def __init__(self,file):
self._file = file
def format_value_for_R(self,value):
value = '"'+value+'"'
return value
def File(self):
filename = self._file
filename_list = string.split(filename,'/')
filename = filename_list[-1]
filename = self.format_value_for_R(filename)
#root_dir = string.join(filename_list[:-1],'/')
return filename
def Monocle(self,samplelogfile,expPercent,p_val,numGroups):
#samplelogfile='C:/Users/venz6v/Documents/Altanalyze R/data.txt'
#grp_list="C:/Users/venz6v/Documents/Altanalyze R/grous.txt"
#gene_list="C:/Users/venz6v/Documents/Altanalyze R/gene.txt"
filename=self.File()
samplelogfile=findParentDir(filename)+'Monocle/expressionFile.txt"'
grp_list=findParentDir(filename)+'Monocle/sampleGroups.txt"'
gene_list=findParentDir(filename)+'Monocle/geneAnnotations.txt"'
pseudo_tree=findParentDir(filename)+'Monocle/monoclePseudotime.pdf"'
pseudo_txt=findParentDir(filename)+'Monocle/monoclePseudotime.txt"'
#try: os.mkdir(findParentDir(samplelogfile)) ### create "hopach" dir if not present
#except Exception: None
#try: os.mkdir(findParentDir(grp_list)) ### create "hopach" dir if not present
#except Exception: None
#try: os.mkdir(findParentDir(gene_list)) ### create "hopach" dir if not present
#except Exception: None
#self._file = samplelogfile
#samplelogfile = self.File()
#self._file = grp_list
#grp_list = self.File()
#self._file = gene_list
#gene_list = self.File()
print 'Loading monocle package in R'
print_out = r('library("monocle")')
if "Error" in print_out:
print 'Installing the R package "monocle" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("monocle")')
print print_out
print_out = r('library("monocle")')
if "Error" in print_out: print 'unable to download the package "monocle"';
print_out = r('library("monocle")')
print "Reading Monocle data..."
data_import = 'fpkm_matrix<-read.delim(%s,row.names=1,check.names=FALSE)' % samplelogfile
#print [data_import]
print_out = r(data_import);
print print_out
data_import = 'sample_sheet<-read.delim(%s,row.names=1,check.names=FALSE)' % grp_list
#print [data_import]
print_out = r(data_import);
print print_out
data_import = 'gene_ann<-read.delim(%s,row.names=1,check.names=FALSE)' % gene_list
#print [data_import]
print_out = r(data_import);
print print_out
print_out= r('pd <- new("AnnotatedDataFrame",data=sample_sheet)');
print_out=r('fd <- new("AnnotatedDataFrame",data=gene_ann)');
print_out=r('URMM <- newCellDataSet(as.matrix(fpkm_matrix),phenoData = pd,featureData =fd)');
print print_out
#colname(a) == colname(b)
print_out=r('URMM<- detectGenes(URMM, min_expr = 0)')
gene_exp='expressed_genes <- row.names(subset(fData(URMM), num_cells_expressed >=%s ))'% expPercent
#print [gene_exp]
try:print_out = r(gene_exp)
except Exception:
print "expression genes"
print_out=r('length(expressed_genes)')
print print_out
# specify the grouping column for finding differential genes
import multiprocessing
cores = multiprocessing.cpu_count()
print 'using', cores, 'cores'
k = 'diff_test_res <- differentialGeneTest(URMM[expressed_genes, ], fullModelFormulaStr = "expression~Group",cores=%s)' % cores
print [k]
print_out=r(k)
print print_out
gene_ord='ordering_genes <- row.names(subset(diff_test_res, pval < %s))' %p_val
print_out=r(gene_ord); print print_out
print_out=r('write.table(ordering_genes,file="ordering_genes.txt")') ### Writing out the informative genes used
print print_out
print_out=r('length(ordering_genes)'); print print_out
print_out=r('ordering_genes <- intersect(ordering_genes, expressed_genes)'); print print_out
print_out=r('URMM <- setOrderingFilter(URMM, ordering_genes)'); print print_out
print_out=r('URMM <- reduceDimension(URMM, use_irlba = F)'); print print_out
for i in range(numGroups,1,-1):
span='URMM <- orderCells(URMM, num_paths = %s, reverse = F)'% i;
print_out=r(span);
print print_out
if "Error" in print_out:
continue
else:
print_out=r(span);print i
print print_out
break
print_out=r('png("Monocle/monoclePseudotime.png")');
print print_out
print_out=r('plot_spanning_tree(URMM)'); print print_out
print_out=r('dev.off()')
print_out=r('pdf("Monocle/monoclePseudotime.pdf")');
print print_out
print_out=r('plot_spanning_tree(URMM)'); print print_out
print_out=r('dev.off()')
print_out=r('pdf("Monocle/monoclePseudotimeOriginalGroups.pdf")');
print print_out
print_out=r('plot_spanning_tree(URMM), color_by = "originalGroups"'); print print_out
print_out=r('dev.off()')
print_out=r('write.table(pData(URMM),file="Monocle/monoclePseudotime.txt")')
print " completed"
def AffyNormalization(self,normalization_method,probe_level,batch_effects):
print 'Loading affy package in R'
print_out = r('library("affy")')
if "Error" in print_out:
#print_out = r('install.packages("ggplot2", repos="http://cran.us.r-project.org")')
print 'Installing the R package "affy" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("affy")')
if "Error" in print_out: print 'unable to download the package "affy"'; forceError
print_out = r('library("affy")')
if 'gcrma' in normalization_method:
print 'Loading gcrma package in R'
print_out = r('library("gcrma")')
if "Error" in print_out:
print 'Installing the R package "gcrma" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("gcrma")')
if "Error" in print_out: print 'unable to download the package "gcrma"'; forceError
print_out = r('library("gcrma")')
if batch_effects == 'remove':
### Import or download support for SVA/Combat
print 'Loading sva package in R'
print_out = r('library("sva")')
if "Error" in print_out:
print 'Installing the R package "sva" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("sva")')
if "Error" in print_out: print 'unable to download the package "sva"'; forceError
print_out = r('library("sva")')
print "Reading Affy files..."
print_out = r('rawdata<-ReadAffy()')
print print_out
r('setwd("ExpressionInput")')
if probe_level: ### normalize at the level of probes rahter than probeset (e.g., alt.exon analysis of 3' array)
print_out = r('PM<-probes(rawdata,which="pm")'); print print_out
print_out = r('AffyInfo<-dimnames(PM)[[1]]'); print print_out
print_out = r('cutpos<-regexpr("\\d+$",AffyInfo,perl=T)'); print print_out
print_out = r('AffyID<-substr(AffyInfo,1,cutpos-1)'); print print_out
print_out = r('probe<-as.numeric(substr(AffyInfo,cutpos,nchar(AffyInfo)))'); print print_out
print_out = r('data.bgc<-bg.correct(rawdata,method="rma")'); print print_out
print_out = r('data.bgc.q<-normalize.AffyBatch.quantiles(data.bgc,type="pmonly")'); print print_out
print_out = r('pm.bgc.q<-probes(data.bgc.q,which="pm")'); print print_out
print_out = r('normalized<-cbind(AffyID,probe,pm.bgc.q)'); print print_out
command = 'write.table(normalized,file='+self.File()+',sep="\t",row.names=FALSE, quote=FALSE)'
print_out = r(command)
print print_out
print 'probe-level normalization complete'
else:
print "Begining %s normalization (will install array annotations if needed)... be patient" % normalization_method
print_out = r('normalized<-%s(rawdata)') % normalization_method
print print_out
command = 'write.exprs(normalized,'+self.File()+')'; print_out = r(command)
print print_out
print self.File(), 'written...'
if batch_effects == 'remove':
### Import data
command = 'mod = model.matrix(~as.factor(cancer) + age, data=pheno)'
print_out = r(command)
command = 'cdata = ComBat(dat=normalized, batch=as.factor(pheno$batch), mod=mod, numCov=match("age", colnames(mod)))'
print_out = r(command)
command = 'write.table(cdata,file='+self.File()+',sep="\t",row.names=FALSE, quote=FALSE)'
print_out = r(command)
output_file = string.replace(self.File(),'exp.','stats.')
print_out = r('calls<-mas5calls(rawdata)')
#print_out = r('pvals<-se.exprs(calls)') ### outdated?
print_out = r('pvals<-assayData(calls)[["se.exprs"]]')
command = 'write.table(pvals,'+output_file+',sep = "\t", col.names = NA)'; print_out = r(command)
print output_file, 'written...'
def Limma(self,test_type):
r('library("limma")')
filename = self.File()
try: output_file = string.replace(filename,'input','output-'+test_type)
except ValueError: output_file = filename[0:-4]+'-output.txt'
print "Begining to process",filename
data_import = 'data<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % filename
print_out = r(data_import)
design_matrix_file = string.replace(filename,'input','design')
design_import = 'design<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % design_matrix_file
design_matrix = r(design_import)
print_out = r('fit<-lmFit(data,design)')
fit_data = r['fit']
print_out = r('fit<-eBayes(fit)')
fit_data = r['fit']
contrast_matrix_file = string.replace(filename,'input','contrast')
contrast_import = 'contrast<-read.table(%s,sep="\t",header=T,row.names=1,as.is=T)' % contrast_matrix_file
print_out = r(contrast_import)
contrast_matrix = r['contrast']
r('contrast<-as.matrix(contrast)')
r('fit.contrast<-contrasts.fit(fit,contrast)')
r('fit.contrast<-eBayes(fit.contrast)')
r('nonadj<-fit.contrast$F.p.value')
if test_type == 'fdr':
print_out = r('results<-p.adjust(fit.contrast$F.p.value,method="fdr")')
else:
print_out = r('results<-nonadj')
result = r['results']
print 'test_type=',test_type
print_out = r('sum(results<0.05)')
summary = r['sum']
print "Number of probeset with a p<0.05",summary,"using",test_type
r('output<-cbind(data,results)')
output = 'write.table(output,%s,sep="\t")' % output_file
print_out = r(output)
print output_file, 'written...'
def Multtest(self,test_type):
r('library("multtest")')
filename = self.File()
try: output_file = string.replace(filename,'input','output')
except ValueError: output_file = filename[0:-4]+'-output.txt'
print "Begining to process",filename
parse_line = 'job<-read.table(%s,sep="\t", row.names=1, as.is=T)' % filename
print_out = r(parse_line)
print_out = r('matrix_size<-dim(job)')
print_out = r('label<-job[1,2:matrix_size[2]]')
print_out = r('jobdata<-job[2:matrix_size[1],2:matrix_size[2]]')
if test_type == "f":
print_out = r('ttest<-mt.maxT(jobdata,label, test="f", B=50000)')
if test_type == "t":
print_out = r('ttest<-mt.maxT(jobdata,label)')
print_out = r('ttest2<-ttest[order(ttest[,1]),]')
write_file = 'write.table(ttest2,%s,sep="\t")' % output_file
print_out = r(write_file)
print "Results written to:",output_file
def check_hopach_file_type(self):
if 'hopach.input' in self.File():
return 'continue'
else: return 'break'
def check_multtest_file_type(self):
if 'output' not in self.File():
return 'continue'
else: return 'break'
def check_limma_file_type(self):
if 'input' in self.File():
return 'continue'
else: return 'break'
def Hopach(self,cluster_method,metric_gene,force_gene,metric_array,force_array):
if R_present==False:
rNotPresent
print_out = r('library("Biobase")')
if "Error" in print_out:
print 'Installing the R package "Biobase" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("Biobase")')
if "Error" in print_out: print 'unable to download the package "Biobase"'; forceError
print_out = r('library("Biobase")')
print_out = r('library("hopach")')
if "Error" in print_out:
print 'Installing the R package "hopach" in Config/R'
print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("hopach")')
if "Error" in print_out: print 'unable to download the package "hopach"'; forceError
print_out = r('library("hopach")')
filename = self.File()
#r('memory.limit(2000)')
print "Begining to process",filename,"using HOPACH"
metric_g = self.format_value_for_R(metric_gene)
metric_a = self.format_value_for_R(metric_array)
parse_line = 'data<-read.table(%s,sep="\t",as.is=T,row.names=1,header=T)' % filename
checklinelengths(self._file)
print_out = r(parse_line)
dat = r['data']
#print "Number of columns in input file:",len(dat)
print_out = r('data<-as.matrix(data)')
dat = r['data']
#print "Number of columns in matrix:",len(dat)
force1=''; force2=''; hopg='NULL'; hopa='NULL'; distmatg='NULL'; distmata = 'NULL' ### defaults for tree export
if force_gene != '' and force_gene != 0: force1=',kmax='+str(force_gene)+', khigh='+str(force_gene)
if force_array != '' and force_array != 0: force2=',kmax='+str(force_array)+', khigh='+str(force_array)
if cluster_method == 'both' or cluster_method == 'gene':
distance_matrix_line = 'distmatg<-distancematrix(data,d=%s)' % metric_g
#print distance_matrix_line
if len(dat) > 1:
print_out1 = r(distance_matrix_line)
print_out2 = r('hopg<-hopach(data,dmat=distmatg,ord="own"'+force1+')')
#print 'hopg<-hopach(data,dmat=distmatg,ord="own"'+force1+')'
try: hopach_run = r['hopg']
except Exception:
print print_out1
print print_out2
hopg = 'hopg'
distmatg = 'distmatg'
gene_output = self.HopachGeneOutputFilename(metric_gene,str(force_gene))
output = 'out<-makeoutput(data,hopg,file=%s)' % gene_output
#print output
print_out = r(output)
output_file = r['out']
status = 'stop'
if 'clustering' in hopach_run:
if 'order' in hopach_run['clustering']:
try:
if len(hopach_run['clustering']['order']) > 10: status = 'continue'
except TypeError:
error = 'file: '+filename+": Hopach returned the array of cluster orders as blank while clustering GENES... can not process cluster... continuing with other files"
print error; errors.append(error)
if status == 'continue':
r(output_file); print 'hopach output written'
else:
error = 'file: '+filename+" Hopach returned data-matrix length zero...ARRAY clusters can not be generated"
print error; errors.append(error)
if cluster_method == 'both' or cluster_method == 'array':
distance_matrix_line = 'distmata<-distancematrix(t(data),d=%s)' % metric_a
if len(dat) > 1:
dist = r(distance_matrix_line)
#print distance_matrix_line
print_out = r('hopa<-hopach(t(data),dmat=distmata,ord="own"'+force2+')')
#print 'hopa<-hopach(t(data),dmat=distmata,ord="own"'+force2+')'
hopach_run = r['hopa']
hopa = 'hopa'
distmata = 'distmata'
array_output = self.HopachArrayOutputFilename(metric_array,str(force_array))
output = 'out<-makeoutput(t(data),hopa,file=%s)' % array_output
#print output
print_out = r(output)
output_file = r['out']
status = 'stop'
if 'clustering' in hopach_run:
if 'order' in hopach_run['clustering']:
try:
if len(hopach_run['clustering']['order']) > 10: status = 'continue'
except TypeError:
error = 'file: '+filename+": Hopach returned the array of cluster orders as blank while clustering ARRAYS... can not process cluster"
print error; errors.append(error)
if status == 'continue':
r(output_file); print 'hopach output written'
else:
error = 'file: '+filename+"data-matrix length zero...ARRAY clusters can not be generated...continuing analysis"
print error; errors.append(error)
if len(metric_g)==0: metric_g = 'NULL'
if len(metric_a)==0: metric_a = 'NULL'
try:
output_filename = string.replace(gene_output,'rows.','')
cdt_output_line = 'hopach2tree(data, file = %s, hopach.genes = %s, hopach.arrays = %s, dist.genes = %s, dist.arrays = %s, d.genes = %s, d.arrays = %s, gene.wts = NULL, array.wts = NULL, gene.names = NULL)' % (output_filename,hopg,hopa,distmatg,distmata,metric_g,metric_a) ###7 values
except Exception: None
make_tree_line = 'makeTree(labels, ord, medoids, dist, side = "GENE")' ### Used internally by HOPACH
#print cdt_output_line
try: print_out = r(cdt_output_line)
except Exception: None
#print print_out
def HopachGeneOutputFilename(self,value,force):
filename = self.File() ### Relative to the set working directory
if 'hopach.input' in filename: ### When running this module on it's own (requires nown filetypes)
new_filename = string.replace(filename,'hopach.input','hopach.output')
if len(value)>1: new_filename = string.replace(new_filename,'.txt','-'+value+'.txt')
if len(force)>0: new_filename = string.replace(new_filename,'.txt','-'+'force_'+str(force)+'c.txt')
else: ### When called from an external heatmap visualization module
filename = self._file ### full path
new_filename = findParentDir(filename)+'/hopach/rows.'+findFileName(filename)
try: os.mkdir(findParentDir(new_filename)) ### create "hopach" dir if not present
except Exception: None
new_filename = '"'+new_filename+'"'
return new_filename
def HopachArrayOutputFilename(self,value,force):
filename = self.File()
if 'hopach.input' in filename: ### When running this module on it's own (requires nown filetypes)
new_filename = string.replace(filename,'hopach.input','arrays.output')
if len(value)>1: new_filename = string.replace(new_filename,'.txt','-'+value+'.txt')
if len(force)>0: new_filename = string.replace(new_filename,'.txt','-'+'force_'+str(force)+'c.txt')
else:
filename = self._file ### full path
filename = self._file ### full path
new_filename = findParentDir(filename)+'/hopach/columns.'+findFileName(filename)
try: os.mkdir(findParentDir(new_filename)) ### create "hopach" dir if not present
except Exception: None
new_filename = '"'+new_filename+'"'
return new_filename
def display(self):
print self.data
class FormatData:
def setdata(self,value):
self.data = value
def transform(self):
self.data = checktype(self.data)
def display(self):
print self.data
def returndata(self):
return self.data
def checktype(object):
###Checks to see if item is a list or dictionary. If dictionary, convert to list
import types
if type(object) is types.DictType:
object = converttolist(object)
elif type(object) is types.ListType:
object = object
elif type(object) is types.TupleType:
object = list(object)
elif type(object) is types.StringType:
object = importtable(object)
return object
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def checklinelengths(filename):
fn=filepath(filename); first_row='yes'; line_number=0
for line in open(fn,'rU').xreadlines():
try: data = cleanUpLine(line)
except Exception: print 'error parsing the line:',[line], line_number
t = string.split(data,'\t')
if first_row == 'yes':
elements = len(t)
first_row = 'no'
else:
if len(t) != elements:
print "Line number", line_number, "contains",len(t),"elements, when",elements,"expected...kill program"
print filename; kill
line_number+=1
def converttolist(dictionary):
###Converts dictionary to list by appending the dictionary key as the first item in the list
converted_lists=[]
for key in dictionary:
dictionary_list = dictionary[key]
dictionary_list.reverse(); dictionary_list.append(key); dictionary_list.reverse()
converted_lists.append(dictionary_list)
return converted_lists
############ IMPORT FILES BEGIN ############
def importtable(filename):
fn=filepath(filename); tab_db = []
for line in open(fn,'rU').readlines():
data,null = string.split(line,'\n')
t = string.split(data,'\t')
tab_db.append(t)
return tab_db
def filepath(filename):
dir=os.path.dirname(__file__) #directory file is input as a variable
status = verifyFile(filename)
if status:
fn = filename
else:
fn=os.path.join(dir,filename)
return fn
def verifyFile(filename):
status = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = True;break
except Exception: status = False
return status
def findFileName(filename):
filename = string.replace(filename,'\\','/')
dataset_name = string.split(filename,'/')[-1]
return dataset_name
def findParentDir(filename):
filename = string.replace(filename,'//','/')
filename = string.replace(filename,'\\','/')
x = string.find(filename[::-1],'/')*-1
return filename[:x]
def setWorkingDirectory(filename):
### Set R's working directory when calling this module remotely
working_dir = findParentDir(filename)
setwd = 'setwd("%s")' % working_dir
r(setwd)
def read_directory(sub_dir):
dir=os.path.dirname(__file__)
#print "Working Directory:", r('getwd()')
working_dir = dir+'/'+sub_dir[1:]
setwd = 'setwd("%s")' % working_dir
r(setwd)
#print "Working Directory:", r('getwd()')
dir_list = os.listdir(dir +'/'+ sub_dir[1:]); dir_list2 = []
for entry in dir_list: #add in code to prevent folder names from being included
if entry[-4:] == ".txt" or entry[-4:] == ".csv": dir_list2.append(entry)
return dir_list2
def CreateFilesMonocle(filename,rawExpressionFile,species='Hs'):
first_row = True
key_db={}
key_list=[]
fn=filepath(filename)
offset=0
nonNumericsPresent=False
try:
import gene_associations
gene_to_symbol = gene_associations.getGeneToUid(species,('hide','Ensembl-Symbol'))
except Exception:
print "gene_symbols present"
gene_to_symbol={}
setWorkingDirectory(findParentDir(filename)[:-1])
try: os.mkdir(findParentDir(filename)+'/Monocle')
except Exception: None
#filename=self.File()
x = 0
data_name=findParentDir(filename)+'/Monocle/expressionFile.txt'
gene_name=findParentDir(filename)+'/Monocle/geneAnnotations.txt'
sample_name=findParentDir(filename)+'/Monocle/sampleGroups.txt'
gene_names = [];
gene_list=[];
dat=[];
export_cdt = open(sample_name,'w')
export_gene=open(gene_name,'w')
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if 'row_clusters-flat' in t and 'row_clusters-flat' not in t[0]:
headers = string.join(t[2:],'\t')+'\n'
offset = 1
else:
headers = string.join(t[1:],'\t')+'\n'
first_row = False
else:
key = t[0]
if key!='column_clusters-flat':
key_list.append(key)
try: s = map(float,t[offset+1:])
except Exception:
nonNumericsPresent=True
key_db[key]=t
else:
clusters = map(str,t[offset+1:])
for key in key_list:
t = key_db[key]
s=[key]
if offset ==1: s.append('')
temp=[]
for value in t[offset+1:]:
try: temp.append(float(value))
except Exception: pass
min1=min(temp)
for value in t[offset+1:]:
try: s.append(str(float(value)-min1))
except Exception: s.append('0.000101')
key_db[key]=s
export_object = open(data_name,'w')
export_object.write(''+'\t'+headers) ### Header is the same for each file
for key in key_list:
t = key_db[key]
if offset > 0:
t = [t[0]]+t[1+offset:]
export_object.write(string.join(t,'\t')+'\n') ### Write z-score values and row names
export_object.close()
print 'File written...'
#return input_file
array_names = []; array_linker_db = {}; d = 0; i = 0
for entry in headers.split('\t'):
entry=cleanUpLine(entry)
if '::' in entry:
a = (entry.split("::"))
elif ':' in entry:
a = (entry.split(":"))
else:
a = (clusters[i],entry)
#entry=string.join(a,'.')
ent=entry+'\t'+a[0];
#if(ent[0].isdigit()):
# ent='X'+ent[0:]
#if '-' in ent:
# ent=string.replace(ent,'-','.')
#if '+' in ent:
# ent=string.replace(ent,'+','.')
#print j
array_names.append(ent);
i+=1
i=0
eheader = string.join(['']+['Group'],'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eheader)
for row in array_names:
export_cdt.write(row+'\n')
i+=1
export_cdt.close()
gheader = string.join(['']+ ['gene_short_name'],'\t')+'\n' ### format column-flat-clusters for export
export_gene.write(gheader)
for key in key_list:
proceed=False
### The commented out code just introduces errors and is not needed - re-evaluate in the future if needed
"""
if key in gene_to_symbol:
symbol = gene_to_symbol[key][0]
if symbol in gene_list:
nid = symbol
proceed = True
if proceed:
k=gene_list.index(nid)
export_object.write(line)
export_gene.write(key+'\n')
else:
export_gene.write(key+'\t'+key+'\n')"""
export_gene.write(key+'\t'+key+'\n')
export_object.close()
export_gene.close()
def reformatHeatmapFile(input_file):
import unique
export_file=string.replace(input_file,'Clustering-','Input-')
eo = export.ExportFile(export_file)
first_row = True
fn=filepath(input_file)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if first_row == True:
if 'column_clusters-flat' not in t:
array_names = []
for i in t[2:]:
array_names.append(string.replace(i,':','-'))
#print array_names;sys.exit()
#array_names.append(i)
elif 'column_clusters-flat' in t:
array_clusters = t[2:]
unique_clusters = unique.unique(array_clusters)
ind=0; headers=[]
for c in array_clusters:
headers.append(c+'::'+array_names[ind])
ind+=1
headers = string.join(['uid']+headers,'\t')+'\n'
eo.write(headers)
first_row = False
else:
values = string.join([t[0]]+t[2:],'\t')+'\n'
eo.write(values)
return export_file, len(unique_clusters)
def run_JTKcycle(expFile,annotFile,Time_range1, Time_range2,No_of_Timepoints,No_of_replicates,timepoint_difference):
print 'Loading JTK-Cycle package in R'
path='"'+r_package_path+'/JTK_CYCLE.R"'
#print [path]
line = 'source(%s)' % path
print_out = r(line)
"""
if "Error" in print_out:
print 'Installing the R package "JTK_CYCLE.R" in Config/R'
print_out = r('install.packages("devtools")')
print print_out
print_out = r('library(devtools)')
print print_out
print_out = r('install_github("mfcovington/jtk-cycle")')
#print_out = r('source("http://bioconductor.org/biocLite.R"); biocLite("jtk-cycle")')
print print_out
print_out = r('library("JTK_CYCLE.R")')
sys,exit()
print_out = r('source("/Users/ram5ge/Desktop/Krithika/JTK_Cycle/JTK_CYCLE.R")');print print_out
if "Error" in print_out: print "JTK_CYCLE.R is missing"
else: print 'Loading JTK Cycle'
"""
print_out = r('project <- "JTK_output"')
print_out = r('options(stringsAsFactors=FALSE)');print print_out
a = '"'+annotFile+'"'
read_annot = 'annot <- read.delim(%s)' % a
print [read_annot]
print_out = r(read_annot);#print print_out
v = '"'+expFile+'"'
read_data = 'input_data <- read.delim(%s)' % v
print [read_data]
print_out = r(read_data);#print print_out
print_out = r('rownames(input_data) <- input_data[,1]');#print print_out
print_out = r('input_data <- input_data[,-1]');#print print_out
#dist_calc = r('jtkdist(24,1)')
dist_calc = 'jtkdist(%s,%s)' % (str(No_of_Timepoints), str(No_of_replicates))
print [dist_calc]
print_out = r(dist_calc);#print print_out
period_calc = 'periods <- %s:%s' %(str(Time_range1), str(Time_range2))
print [period_calc]
print_out = r(period_calc);#print print_out
j = str(timepoint_difference)
jtk_calc = 'jtk.init(periods,%s)' % j
print [jtk_calc]
print_out = r(jtk_calc);#print print_out
v = 'cat("JTK analysis started on",date(),"\n")'
print [v]
print_out = r(v);#print print_out
print_out = r('flush.console()');#print print_out
v = 'st <- system.time({res <- apply(data,1,function(z)'
v+= ' {jtkx(z); c(JTK.ADJP,JTK.PERIOD,JTK.LAG,JTK.AMP)});'
v+= ' res <- as.data.frame(t(res)); bhq <- p.adjust(unlist(res[,1]),"BH");'
v+= ' res <- cbind(bhq,res); colnames(res) <- c("BH.Q","ADJ.P","PER","LAG","AMP");'
v+= ' results <- cbind(annot,res,data); results <- results[order(res$ADJ.P,-res$AMP),]})'
print [v]
print_out = r(v); print print_out
#print_out = r('dim(X)');print print_out
print_out = r('print(st)');print #print_out
print_out = r('save(results,file=paste("JTK",project,"rda",sep="."))');#print print_out
print_out = r('write.table(results,file=paste("JTK",project,"txt",sep="."),row.names=F,col.names=T,quote=F,sep="\t")');#print print_out
def performMonocleAnalysisFromHeatmap(species,heatmap_output_dir,rawExpressionFile):
numGroups=10
if 'Clustering-' in heatmap_output_dir:
export_file,numGroups = reformatHeatmapFile(heatmap_output_dir)
#else:
export_file = heatmap_output_dir;
CreateFilesMonocle(export_file,rawExpressionFile,species=species)
print 'Looking for',numGroups, 'Monocle groups in the input expression file.'
remoteMonocle(export_file,expPercent=5,pval=0.05,numGroups=numGroups)
if __name__ == '__main__':
expFile = '/Users/saljh8/Downloads/Liver_Smoothed_exp_steady_state.txt'
annotFile = '/Users/saljh8/Downloads/Liver_annot.txt'
Time_range1 = '10'
Time_range2 = '12'
No_of_Timepoints = '24'
No_of_replicates = '1'
timepoint_difference = '2'
run_JTKcycle(expFile,annotFile,Time_range1, Time_range2,No_of_Timepoints,No_of_replicates,timepoint_difference);sys.exit()
errors = []
cluster_method='array';metric_gene="";force_gene='';metric_array="euclid";force_array=''
analysis_method='hopach'; multtest_type = 'f'
#Sample log File
#Input-exp.MixedEffectsThanneer-DPF3%20DMRT3%20FOXA1%20SMAD6%20TBX3%20amplify%20monocle-hierarchical_cosine_correlated.txt
filename='/Users/saljh8/Desktop/cardiacRNASeq/DataPlots/Clustering-additionalExpressionSingleCell-annotated-hierarchical_cosine_cosine2.txt'
rawExpressionFile = filename
#filename = "/Volumes/SEQ-DATA/Eric/embryonic_singlecell_kidney/ExpressionOutput/Clustering/SampleLogFolds-Kidney.txt"
#filename = "/Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/NewCardiacMarkers1/FullDataset/ExpressionOutput/Clustering/SampleLogFolds-CM.txt"
#rawExpressionFile = '/Volumes/SEQ-DATA/SingleCell-Churko/Filtered/Unsupervised-AllExons/NewCardiacMarkers1/FullDataset/ExpressionInput/exp.CM-steady-state.txt'
#filename = '/Users/saljh8/Desktop/Stanford/ExpressionInput/amplify/DataPlots/Clustering-exp.EB-SingleCell-GPCR-hierarchical_cosine_correlation.txt'
#rawExpressionFile = '/Users/saljh8/Desktop/Stanford/ExpressionInput/exp.EB-SingleCell.txt'
performMonocleAnalysisFromHeatmap('Hs',filename,rawExpressionFile);sys.exit()
CreateFilesMonocle(filename,rawExpressionFile)
remoteMonocle(filename,expPercent=0,pval=0.01,numGroups=5);sys.exit()
filename = '/Users/nsalomonis/Downloads/GSE9440_RAW/ExpressionInput/exp.differentiation.txt'
remoteAffyNormalization(filename,'rma',True,'remove'); sys.exit()
print "******Analysis Method*******"
print "Options:"
print "1) Multtest (permutation ftest/ttest)"
print "2) HOPACH clustering"
print "3) limma 2-way ANOVA"
inp = sys.stdin.readline(); inp = inp.strip()
analysis_method_val = int(inp)
if analysis_method_val == 1: analysis_method = "multtest"
if analysis_method_val == 2: analysis_method = "hopach"
if analysis_method_val == 3: analysis_method = "limma"
if analysis_method == "hopach":
print "******Analysis Options*******"
print "Cluster type:"
print "1) genes only (cluster rows)"
print "2) arrays only (cluster columns)"
print "3) both"
inp = sys.stdin.readline(); inp = inp.strip()
cluster_type_call = int(inp)
if cluster_type_call == 1: cluster_method = "gene"
if cluster_type_call == 2: cluster_method = "array"
if cluster_type_call == 3: cluster_method = "both"
if cluster_method == "array" or cluster_method == "both":
print "******Analysis Options For Array Clustering*******"
print "Cluster metrics:"
print "1) euclidian distance (sensitive to magnitude)"
print "2) cosine angle distance (not sensitive to magnitude)"
print "3) correlation distance"
inp = sys.stdin.readline(); inp = inp.strip()
if cluster_method == "array" or cluster_method == "both":
metric_array_call = int(inp)
if metric_array_call == 1: metric_array = "euclid"
if metric_array_call == 2: metric_array = "cosangle"
if metric_array_call == 3: metric_array = "cor"
if cluster_method == "gene" or cluster_method == "both":
print "******Analysis Options For Gene Clustering*******"
print "Cluster metrics:"
print "1) euclidian distance (sensitive to magnitude)"
print "2) cosine angle distance (not sensitive to magnitude)"
print "3) correlation distance"
inp = sys.stdin.readline(); inp = inp.strip()
if cluster_method == "gene" or cluster_method == "both":
try: metric_gene_call = int(inp)
except ValueError: print [inp], 'not a valid option'; sys.exit()
if metric_gene_call == 1: metric_gene = "euclid"
if metric_gene_call == 2: metric_gene = "cosangle"
if metric_gene_call == 3: metric_gene = "cor"
if metric_gene == "cosangle":
print "******Analysis Options*******"
print "Absolute Clustering:"
print "1) yes"
print "2) no"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": metric_gene = "abscosangle"
print "Force Cluster Number for Arrays:"
print "Enter 'n' if you don't want to "
print "Enter number of clusters of arrays if you do"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == 'n' or inp == 'N': force_array = ''
else:force_array = int(inp)
working_dir = '/hopach_input'
if analysis_method == "multtest":
print "******Analysis Options*******"
print "Statistical test:"
print "1) ftest (for multiple groups)"
print "2) ttest (for two groups)"
inp = sys.stdin.readline(); inp = inp.strip()
multtest_type_call = int(inp)
if multtest_type_call == 1: multtest_type = "f"
if multtest_type_call == 2: multtest_type = "t"
working_dir = '/multtest_input'
if analysis_method == "limma":
working_dir = '/limma_input'
print "******Analysis Options*******"
print "Statistical test:"
print "1) Non-adjusted"
print "2) FDR"
inp = sys.stdin.readline(); inp = inp.strip()
limma_type_call = int(inp)
if limma_type_call == 1: limma_type = "nonadj"
if limma_type_call == 2: limma_type = "fdr"
dir_list = read_directory(working_dir)
for input in dir_list: #loop through each file in the directory to output results
input_file = working_dir + "/"+ input
input_file = input_file[1:] #not sure why, but the '\' needs to be there while reading initally but not while accessing the file late
z = RScripts(input_file)
if analysis_method == "hopach":
status = z.check_hopach_file_type()
if status == 'continue':
z.Hopach(cluster_method,metric_gene,force_gene,metric_array,force_array)
if analysis_method == "multtest":
status = z.check_multtest_file_type()
if status == 'continue':
z.Multtest(multtest_type)
if analysis_method == "limma":
status = z.check_limma_file_type()
if status == 'continue':
design_matrix_file = string.replace(input,'input','design')
contrast_matrix_file = string.replace(input,'input','contrast')
if design_matrix_file in dir_list and contrast_matrix_file in dir_list:
z.Limma(limma_type)
if analysis_method == "hopach":
if len(errors)>0:
print "**************ALL ERRORS**************"
for entry in errors:
print entry
else: print 'Execution complete... check outputs for verification'
|
wuxue/altanalyze
|
R_interface.py
|
Python
|
apache-2.0
| 52,813
|
[
"Bioconductor"
] |
bd17639fb37642be3e554259acb58c0bde169feb488a6450b7bdd83df218f929
|
#!/usr/bin/env python
import vtk
# arrow.py adapted from the C++ vtk examples and translated to python.
def main():
colors = vtk.vtkNamedColors()
arrowSource = vtk.vtkArrowSource()
# arrowSource.SetShaftRadius(0.01)
# arrowSource.SetTipLength(.9)
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(arrowSource.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Visualize
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Arrow")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("MidnightBlue"))
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == "__main__":
main()
|
lorensen/VTKExamples
|
src/Python/GeometricObjects/Arrow.py
|
Python
|
apache-2.0
| 934
|
[
"VTK"
] |
7dc22f8acb834110373427db46aec19416eeab361c0f4e0e86ab527a635b3c09
|
# Copyright (C) 2012 Mathias Brodala
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import division
import cairo
from collections import namedtuple
import glib
import gobject
import gtk
import sys
from math import pi
from xl import (
event,
player,
settings
)
from xl.nls import gettext as _
from xl.player.adapters import PlaybackAdapter
from xlgui.widgets import info
import migration
from alphacolor import alphacolor_parse
import osd_preferences
OSDWINDOW = None
def enable(exaile):
"""
Enables the on screen display plugin
"""
migration.migrate_settings()
global OSDWINDOW
OSDWINDOW = OSDWindow()
def disable(exaile):
"""
Disables the on screen display plugin
"""
global OSDWINDOW
OSDWINDOW.destroy()
OSDWINDOW = None
def get_preferences_pane():
return osd_preferences
Point = namedtuple('Point', 'x y')
class OSDWindow(gtk.Window, PlaybackAdapter):
"""
A popup window showing information
of the currently playing track
"""
autohide = gobject.property(
type=gobject.TYPE_BOOLEAN,
nick='autohide',
blurb='Whether to automatically hide the window after some time',
default=True,
flags=gobject.PARAM_READWRITE
)
__gsignals__ = {}
def __init__(self):
"""
Initializes the window
"""
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
# for whatever reason, calling set_opacity seems
# to crash on Windows when using PyGTK that comes with
# the GStreamer SDK. Since this plugin is enabled by
# default, just don't fade in/out on windows
#
# https://bugs.freedesktop.org/show_bug.cgi?id=54682
self.use_fade = True
if sys.platform == 'win32':
self.use_fade = False
self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_NOTIFICATION)
self.set_title('Exaile OSD')
self.set_decorated(False)
self.set_keep_above(True)
self.set_skip_pager_hint(True)
self.set_skip_taskbar_hint(True)
self.set_resizable(True)
self.set_app_paintable(True)
self.stick()
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK | gtk.gdk.POINTER_MOTION_MASK)
# Cached option values
self.__options = {
'background': None,
'display_duration': None,
'border_radius': None
}
self.info_area = info.TrackInfoPane(player.PLAYER)
self.info_area.set_default_text('')
self.info_area.set_auto_update(True)
self.add(self.info_area)
event.add_callback(self.on_track_tags_changed, 'track_tags_changed')
event.add_callback(self.on_option_set, 'plugin_osd_option_set')
# Trigger initial setup trough options
for option in ('format', 'background', 'display_duration',
'show_progress', 'position', 'width', 'height',
'border_radius'):
self.on_option_set('plugin_osd_option_set', settings,
'plugin/osd/{option}'.format(option=option))
# Trigger color map update
self.emit('screen-changed', self.get_screen())
PlaybackAdapter.__init__(self, player.PLAYER)
def destroy(self):
"""
Cleanups
"""
event.remove_callback(self.on_option_set, 'plugin_osd_option_set')
event.remove_callback(self.on_track_tags_changed, 'track_tags_changed')
gtk.Window.destroy(self)
def hide(self):
"""
Starts fadeout of the window
"""
if not self.use_fade:
gtk.Window.hide(self)
return
if self.get_data('fadeout-id') is None:
self.set_data('fadeout-id', glib.timeout_add(50, self.__fade_out))
def show(self):
"""
Stops fadeout and immediately shows the window
"""
if self.use_fade:
try:
glib.source_remove(self.get_data('fadeout-id'))
except:
pass
self.set_data('fadeout-id', None)
self.set_opacity(1)
gtk.Window.show_all(self)
def __fade_out(self):
"""
Constantly decreases the opacity to fade out the window
"""
opacity = self.get_opacity()
if opacity == 0:
glib.source_remove(self.get_data('fadeout-id'))
self.set_data('fadeout-id', None)
gtk.Window.hide(self)
return False
self.set_opacity(opacity - 0.1)
return True
def do_notify(self, parameter):
"""
Triggers hiding if autohide is enabled
"""
if parameter.name == 'autohide':
if self.props.autohide:
self.hide()
def do_expose_event(self, event):
"""
Draws the background of the window
"""
context = self.props.window.cairo_create()
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
context.set_source_rgba(
self.__options['background'].red_float,
self.__options['background'].green_float,
self.__options['background'].blue_float,
self.__options['background'].alpha_float
)
context.set_operator(cairo.OPERATOR_SOURCE)
context.paint()
gtk.Window.do_expose_event(self, event)
def do_screen_changed(self, screen):
"""
Updates the used colormap
"""
colormap = screen.get_rgba_colormap() or \
screen.get_rgb_colormap()
self.unrealize()
self.set_colormap(colormap)
self.realize()
def do_size_allocate(self, allocation):
"""
Applies the non-rectangular shape
"""
width, height = allocation.width, allocation.height
mask = gtk.gdk.Pixmap(None, width, height, 1)
context = mask.cairo_create()
context.set_source_rgb(0, 0, 0)
context.set_operator(cairo.OPERATOR_CLEAR)
context.paint()
radius = self.__options['border_radius']
inner = gtk.gdk.Rectangle(radius, radius, width - radius, height - radius)
context.set_source_rgb(1, 1, 1)
context.set_operator(cairo.OPERATOR_SOURCE)
# Top left corner
context.arc(inner.x, inner.y, radius, 1.0 * pi, 1.5 * pi)
# Top right corner
context.arc(inner.width, inner.y, radius, 1.5 * pi, 2.0 * pi)
# Bottom right corner
context.arc(inner.width, inner.height, radius, 0.0 * pi, 0.5 * pi)
# Bottom left corner
context.arc(inner.x, inner.height, radius, 0.5 * pi, 1.0 * pi)
context.fill()
self.shape_combine_mask(mask, 0, 0)
gtk.Window.do_size_allocate(self, allocation)
def do_configure_event(self, e):
"""
Stores the window size
"""
width, height = self.get_size()
settings.set_option('plugin/osd/width', width)
settings.set_option('plugin/osd/height', height)
gtk.Window.do_configure_event(self, e)
def do_button_press_event(self, e):
"""
Starts the dragging process
"""
if e.button == 1:
self.set_data('drag-origin', Point(e.x, e.y))
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
elif e.button == 3 and e.state & gtk.gdk.MOD1_MASK:
self.begin_resize_drag(gtk.gdk.WINDOW_EDGE_SOUTH_EAST, 3, int(e.x_root), int(e.y_root), e.time)
def do_button_release_event(self, e):
"""
Finishes the dragging process and
saves the window position
"""
if e.button == 1:
settings.set_option('plugin/osd/position', list(self.get_position()))
self.set_data('drag-origin', None)
self.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
return True
def do_motion_notify_event(self, e):
"""
Moves the window while dragging, makes sure
the window is always visible upon mouse hover
"""
drag_origin = self.get_data('drag-origin')
if drag_origin is not None:
position = Point(e.x_root, e.y_root)
self.move(
int(position.x - drag_origin.x),
int(position.y - drag_origin.y)
)
try:
glib.source_remove(self.get_data('hide-id'))
except:
pass
self.show()
def do_leave_notify_event(self, e):
"""
Hides the window upon mouse leave
"""
try:
glib.source_remove(self.get_data('hide-id'))
except:
pass
if self.props.autohide:
self.set_data('hide-id', glib.timeout_add_seconds(
self.__options['display_duration'], self.hide))
gtk.Window.do_leave_notify_event(self, e)
def on_track_tags_changed(self, e, track, tag):
if not tag.startswith('__') and track == player.PLAYER.current:
self.on_playback_track_start(e, player.PLAYER, track)
def on_playback_track_start(self, e, player, track):
"""
Shows the OSD upon track change
"""
glib.idle_add(self.show)
try:
glib.source_remove(self.get_data('hide-id'))
except:
pass
if self.props.autohide:
self.set_data('hide-id', glib.timeout_add_seconds(
self.__options['display_duration'], self.hide))
def on_playback_toggle_pause(self, e, player, track):
"""
Shows the OSD after resuming playback
"""
if not player.is_playing(): return
glib.idle_add(self.show)
try:
glib.source_remove(self.get_data('hide-id'))
except:
pass
if self.props.autohide:
self.set_data('hide-id', glib.timeout_add_seconds(
self.__options['display_duration'], self.hide))
def on_playback_player_end(self, e, player, track):
"""
Hides the OSD upon playback end
"""
if self.props.autohide:
self.set_data('hide-id', glib.timeout_add_seconds(
self.__options['display_duration'], self.hide))
def on_option_set(self, event, settings, option):
"""
Updates appearance on setting change
"""
if option == 'plugin/osd/format':
self.info_area.set_info_format(settings.get_option(option,
_('<span font_desc="Sans 11" foreground="#fff"><b>$title</b></span>\n'
'by $artist\n'
'from $album')
))
if option == 'plugin/osd/background':
self.__options['background'] = alphacolor_parse(settings.get_option(option, '#333333cc'))
glib.idle_add(self.queue_draw)
elif option == 'plugin/osd/display_duration':
self.__options['display_duration'] = int(settings.get_option(option, 4))
elif option == 'plugin/osd/show_progress':
self.info_area.set_display_progress(settings.get_option(option, True))
elif option == 'plugin/osd/position':
position = Point._make(settings.get_option(option, [20, 20]))
glib.idle_add(self.move, position.x, position.y)
elif option == 'plugin/osd/border_radius':
value = settings.get_option(option, 10)
self.set_border_width(max(6, int(value / 2)))
self.__options['border_radius'] = value
self.emit('size-allocate', self.get_allocation())
|
eri-trabiccolo/exaile
|
plugins/osd/__init__.py
|
Python
|
gpl-2.0
| 12,493
|
[
"FLEUR"
] |
b6be84be918145dede23ff25988addfe06e12f0127e84e53370abac07ebc4b19
|
# test_efield.py ---
#
# Filename: test_efield.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Sep 28 12:31:43 2011 (+0530)
# Version:
# Last-Updated: Wed Sep 28 16:10:11 2011 (+0530)
# By: Subhasis Ray
# Update #: 118
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
# Code:
import unittest
import uuid
import os
import numpy
import moose
comp_pos = [[ 1.28151389e-06, 3.28516806e-05, 4.34033507e-05],
[ 3.97458434e-05, 3.13696550e-05, 4.78284191e-06],
[ 7.81225903e-05, 5.52200047e-05, 1.14471225e-05],
[ 1.51851428e-05, 2.90013683e-05, 5.59742276e-05],
[ 6.27772622e-05, 3.17341615e-05, 1.88991378e-05],
[ 6.71202581e-05, 4.73622441e-05, 1.89676926e-05],
[ 8.93229757e-05, 1.98005883e-05, 5.21311088e-05],
[ 1.59775209e-05, 1.90757203e-06, 5.81280477e-05],
[ 9.37027485e-05, 1.05817903e-05, 8.62110626e-05],
[ 2.14710708e-05, 1.81916032e-05, 5.72403065e-05]]
def create_compartment(path, dia=1e-6, length=1e-6, specific_raxial=2.5, specific_conductance=0.2, Em=-65e-3, specific_cm=0.009):
comp = moose.Compartment(path)
comp.Rm = 1.0/(specific_conductance * length * dia * numpy.pi)
comp.Ra = specific_raxial / (dia * dia * numpy.pi/4.0)
comp.Cm = specific_cm * (length * dia * numpy.pi)
comp.length = length
comp.diameter = dia
comp.Em = Em
comp.initVm = Em
return comp
def create_pulsegen(path,
firstLevel=100e-12,
firstWidth=20e-3,
firstDelay=20e-3,
secondLevel=100e-12,
secondWidth=1e9,
secondDelay=1e9):
pulsegen = moose.PulseGen(path)
pulsegen.firstLevel = firstLevel
pulsegen.firstWidth = firstWidth
pulsegen.firstDelay = firstDelay
pulsegen.secondLevel = secondLevel
pulsegen.secondWidth = secondWidth
pulsegen.secondDelay = secondDelay
return pulsegen
class TestEfield(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
self.simdt = 1e-5
self.simtime = 0.5
self.test_id = None
self.model_container = moose.Neutral('/test')
self.data_container = moose.Neutral('/data')
self.data_dir = 'efield_data'
if not os.access(self.data_dir, os.W_OK):
os.mkdir(self.data_dir)
def setUp(self):
self.test_id = uuid.uuid4().int
self.efield = moose.Efield('electrode%d' % (self.test_id), self.model_container)
self.efield.scale = -3.33e4
self.efield.x = 100e-6
self.efield.y = 0.0
self.efield.z = 0.0
self.lfp_table = moose.Table('lfp%d' % (self.test_id), self.data_container)
self.lfp_table.stepMode = 3
self.efield.connect('potential', self.lfp_table, 'inputRequest')
def testMultiCompartments(self):
global comp_pos
vm_tabs = []
comps = []
pulsegens = []
numcomps = 10
for ii in range(numcomps):
comps.append(create_compartment('%s/test%d_comp_%d' % (self.model_container.path, self.test_id, ii)))
comps[-1].x = comp_pos[ii][0]
comps[-1].y = comp_pos[ii][1]
comps[-1].z = comp_pos[ii][2]
comps[-1].connect('ImSrc', self.efield, 'currentDest')
pulsegens.append(create_pulsegen('%s/test%d_pulse_%d' % (self.model_container.path, self.test_id, ii), firstDelay=ii*20e-3))
pulsegens[-1].connect('outputSrc', comps[-1], 'injectMsg')
vm_tabs.append(moose.Table('%s/vm%d_%d' % (self.data_container.path, self.test_id, ii)))
vm_tabs[-1].stepMode = 3
comps[-1].connect('Vm', vm_tabs[-1], 'inputRequest')
moose.context.setClock(0, self.simdt)
moose.context.setClock(1, self.simdt)
moose.context.setClock(2, self.simdt)
moose.context.setClock(3, self.simdt)
moose.context.reset()
moose.context.step(self.simtime)
self.lfp_table.dumpFile('%s/%s.dat' % (self.data_dir, self.lfp_table.name))
for tab in vm_tabs:
tab.dumpFile('%s/%s.dat' % (self.data_dir, tab.name))
print 'LFP saved in %s', (self.lfp_table.name + '.dat')
if __name__ == '__main__':
unittest.main()
#
# test_efield.py ends here
|
BhallaLab/moose-thalamocortical
|
TESTS/pymoose/test_efield.py
|
Python
|
lgpl-2.1
| 4,572
|
[
"MOOSE"
] |
565ea0db78a678b0ab7fef3ed844ca77b06db10d36848710d0393e423ca813b9
|
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
choose_conv_method -- Chooses faster of FFT and direct convolution methods.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
cubic -- B-spline basis function of order 3.
quadratic -- B-spline basis function of order 2.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
cspline1d_eval -- Evaluate a cubic spline at the given points.
qspline1d_eval -- Evaluate a quadratic spline at the given points.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
savgol_filter -- Filter a signal using the Savitzky-Golay filter.
deconvolve -- 1-d deconvolution using lfilter.
sosfilt -- 1-dimensional IIR digital linear filtering using
-- a second-order sections filter representation.
sosfilt_zi -- Compute an initial state zi for the sosfilt function that
-- corresponds to the steady state of the step response.
sosfiltfilt -- A forward-backward filter for second-order sections.
hilbert -- Compute 1-D analytic signal, using the Hilbert transform.
hilbert2 -- Compute 2-D analytic signal, using the Hilbert transform.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
resample_poly -- Resample using polyphase filtering method.
upfirdn -- Upsample, apply FIR filter, downsample.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
findfreqs -- Find array of frequencies for computing filter response.
firls -- FIR filter design using least-squares error minimization.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
sosfreqz -- Digital filter frequency response for SOS format filter.
group_delay -- Digital filter group delay.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
minimum_phase -- Convert a linear phase FIR filter to minimum phase.
savgol_coeffs -- Compute the FIR filter coefficients for a Savitzky-Golay
-- filter.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion for analog filter.
invresz -- Inverse partial fraction expansion for digital filter.
BadCoefficients -- Warning on badly conditioned filter coefficients
Lower-level filter design functions:
.. autosummary::
:toctree: generated/
abcd_normalize -- Check state-space matrices and ensure they are rank-2.
band_stop_obj -- Band Stop Objective Function for order minimization.
besselap -- Return (z,p,k) for analog prototype of Bessel filter.
buttap -- Return (z,p,k) for analog prototype of Butterworth filter.
cheb1ap -- Return (z,p,k) for type I Chebyshev filter.
cheb2ap -- Return (z,p,k) for type II Chebyshev filter.
cmplx_sort -- Sort roots based on magnitude.
ellipap -- Return (z,p,k) for analog prototype of elliptic filter.
lp2bp -- Transform a lowpass filter prototype to a bandpass filter.
lp2bs -- Transform a lowpass filter prototype to a bandstop filter.
lp2hp -- Transform a lowpass filter prototype to a highpass filter.
lp2lp -- Transform a lowpass filter prototype to a lowpass filter.
normalize -- Normalize polynomial representation of a transfer function.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
iirnotch -- Design second-order IIR notch digital filter.
iirpeak -- Design second-order IIR peak (resonant) digital filter.
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- Continuous-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
freqresp -- frequency response of a continuous-time LTI system.
bode -- Bode magnitude and phase data (continuous-time LTI).
Discrete-Time Linear Systems
============================
.. autosummary::
:toctree: generated/
dlti -- Discrete-time linear time invariant system base class.
StateSpace -- Linear time invariant system in state space form.
TransferFunction -- Linear time invariant system in transfer function form.
ZerosPolesGain -- Linear time invariant system in zeros, poles, gain form.
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
dfreqresp -- frequency response of a discrete-time LTI system.
dbode -- Bode magnitude and phase data (discrete-time LTI).
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
tf2sos -- transfer function to second-order sections.
tf2ss -- transfer function to state-space.
zpk2tf -- zero-pole-gain to transfer function.
zpk2sos -- zero-pole-gain to second-order sections.
zpk2ss -- zero-pole-gain to state-space.
ss2tf -- state-pace to transfer function.
ss2zpk -- state-space to pole-zero-gain.
sos2zpk -- second-order sections to zero-pole-gain.
sos2tf -- second-order sections to transfer function.
cont2discrete -- continuous-time to discrete-time LTI conversion.
place_poles -- pole placement.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
max_len_seq -- Maximum length sequence
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
unit_impulse -- Discrete unit impulse
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
cosine -- Cosine window
exponential -- Exponential window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
hanning -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
tukey -- Tukey window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
ricker -- return ricker wavelet
cwt -- perform continuous wavelet transform
Peak finding
============
.. autosummary::
:toctree: generated/
find_peaks_cwt -- Attempt to find the peaks in the given 1-D array
argrelmin -- Calculate the relative minima of data
argrelmax -- Calculate the relative maxima of data
argrelextrema -- Calculate the relative extrema of data
Spectral Analysis
=================
.. autosummary::
:toctree: generated/
periodogram -- Compute a (modified) periodogram
welch -- Compute a periodogram using Welch's method
csd -- Compute the cross spectral density, using Welch's method
coherence -- Compute the magnitude squared coherence, using Welch's method
spectrogram -- Compute the spectrogram
lombscargle -- Computes the Lomb-Scargle periodogram
vectorstrength -- Computes the vector strength
"""
from __future__ import division, print_function, absolute_import
from . import sigtools
from .waveforms import *
from ._max_len_seq import max_len_seq
from ._upfirdn import upfirdn
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from .spline import *
from .bsplines import *
from .filter_design import *
from .fir_filter_design import *
from .ltisys import *
from .lti_conversion import *
from .windows import *
from .signaltools import *
from ._savitzky_golay import savgol_coeffs, savgol_filter
from .spectral import *
from .wavelets import *
from ._peak_finding import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
bkendzior/scipy
|
scipy/signal/__init__.py
|
Python
|
bsd-3-clause
| 12,650
|
[
"Gaussian"
] |
030a40a7e98de0d6a9b04202e62895c096b766d65f7b96eabd2305bcc8b95ba6
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
The force from QM region acting on the background MM particles.
'''
from functools import reduce
import numpy
from pyscf import gto, scf, mp, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
def force(dm):
# The interaction between QM atoms and MM particles
# \sum_K d/dR (1/|r_K-R|) = \sum_K (r_K-R)/|r_K-R|^3
qm_coords = mol.atom_coords()
qm_charges = mol.atom_charges()
dr = qm_coords[:,None,:] - coords
r = numpy.linalg.norm(dr, axis=2)
g = numpy.einsum('r,R,rRx,rR->Rx', qm_charges, charges, dr, r**-3)
# The interaction between electron density and MM particles
# d/dR <i| (1/|r-R|) |j> = <i| d/dR (1/|r-R|) |j> = <i| -d/dr (1/|r-R|) |j>
# = <d/dr i| (1/|r-R|) |j> + <i| (1/|r-R|) |d/dr j>
for i, q in enumerate(charges):
with mol.with_rinv_origin(coords[i]):
v = mol.intor('int1e_iprinv')
f =(numpy.einsum('ij,xji->x', dm, v) +
numpy.einsum('ij,xij->x', dm, v.conj())) * -q
g[i] += f
# Force = -d/dR
return -g
# The force from HF electron density
# Be careful with the unit of the MM particle coordinates. The gradients are
# computed in the atomic unit.
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges, unit='Bohr').run()
e1_mf = mf.e_tot
dm = mf.make_rdm1()
mm_force_mf = force(dm)
print('HF force:')
print(mm_force_mf)
# Verify HF force
coords[0,0] += 1e-3
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges, unit='Bohr').run()
e2_mf = mf.e_tot
print(-(e2_mf-e1_mf)/1e-3, '==', mm_force_mf[0,0])
#
# For post-HF methods, the response of HF orbitals needs to be considered in
# the analytical gradients. It is similar to the gradients code implemented in
# the module pyscf.grad.
#
# Below we use MP2 gradients as example to demonstrate how to include the
# orbital response effects in the force for MM particles.
#
# Based on the grad_elec function in pyscf.grad.mp2
def make_rdm1_with_orbital_response(mp):
import time
from pyscf import lib
from pyscf.grad.mp2 import _response_dm1, _index_frozen_active, _shell_prange
from pyscf.mp import mp2
from pyscf.ao2mo import _ao2mo
log = lib.logger.new_logger(mp)
time0 = time.clock(), time.time()
mol = mp.mol
log.debug('Build mp2 rdm1 intermediates')
d1 = mp2._gamma1_intermediates(mp, mp.t2)
doo, dvv = d1
time1 = log.timer_debug1('rdm1 intermediates', *time0)
with_frozen = not (mp.frozen is None or mp.frozen is 0)
OA, VA, OF, VF = _index_frozen_active(mp.get_frozen_mask(), mp.mo_occ)
orbo = mp.mo_coeff[:,OA]
orbv = mp.mo_coeff[:,VA]
nao, nocc = orbo.shape
nvir = orbv.shape[1]
# Partially transform MP2 density matrix and hold it in memory
# The rest transformation are applied during the contraction to ERI integrals
part_dm2 = _ao2mo.nr_e2(mp.t2.reshape(nocc**2,nvir**2),
numpy.asarray(orbv.T, order='F'), (0,nao,0,nao),
's1', 's1').reshape(nocc,nocc,nao,nao)
part_dm2 = (part_dm2.transpose(0,2,3,1) * 4 -
part_dm2.transpose(0,3,2,1) * 2)
offsetdic = mol.offset_nr_by_atom()
diagidx = numpy.arange(nao)
diagidx = diagidx*(diagidx+1)//2 + diagidx
Imat = numpy.zeros((nao,nao))
# 2e AO integrals dot 2pdm
max_memory = max(0, mp.max_memory - lib.current_memory()[0])
blksize = max(1, int(max_memory*.9e6/8/(nao**3*2.5)))
for ia in range(mol.natm):
shl0, shl1, p0, p1 = offsetdic[ia]
ip1 = p0
for b0, b1, nf in _shell_prange(mol, shl0, shl1, blksize):
ip0, ip1 = ip1, ip1 + nf
dm2buf = lib.einsum('pi,iqrj->pqrj', orbo[ip0:ip1], part_dm2)
dm2buf+= lib.einsum('qi,iprj->pqrj', orbo, part_dm2[:,ip0:ip1])
dm2buf = lib.einsum('pqrj,sj->pqrs', dm2buf, orbo)
dm2buf = dm2buf + dm2buf.transpose(0,1,3,2)
dm2buf = lib.pack_tril(dm2buf.reshape(-1,nao,nao)).reshape(nf,nao,-1)
dm2buf[:,:,diagidx] *= .5
shls_slice = (b0,b1,0,mol.nbas,0,mol.nbas,0,mol.nbas)
eri0 = mol.intor('int2e', aosym='s2kl', shls_slice=shls_slice)
Imat += lib.einsum('ipx,iqx->pq', eri0.reshape(nf,nao,-1), dm2buf)
eri0 = None
dm2buf = None
time1 = log.timer_debug1('2e-part grad of atom %d'%ia, *time1)
# Recompute nocc, nvir to include the frozen orbitals and make contraction for
# the 1-particle quantities, see also the kernel function in ccsd_grad module.
mo_coeff = mp.mo_coeff
mo_energy = mp._scf.mo_energy
nao, nmo = mo_coeff.shape
nocc = numpy.count_nonzero(mp.mo_occ > 0)
Imat = reduce(numpy.dot, (mo_coeff.T, Imat, mp._scf.get_ovlp(), mo_coeff)) * -1
dm1mo = numpy.zeros((nmo,nmo))
if with_frozen:
dco = Imat[OF[:,None],OA] / (mo_energy[OF,None] - mo_energy[OA])
dfv = Imat[VF[:,None],VA] / (mo_energy[VF,None] - mo_energy[VA])
dm1mo[OA[:,None],OA] = doo + doo.T
dm1mo[OF[:,None],OA] = dco
dm1mo[OA[:,None],OF] = dco.T
dm1mo[VA[:,None],VA] = dvv + dvv.T
dm1mo[VF[:,None],VA] = dfv
dm1mo[VA[:,None],VF] = dfv.T
else:
dm1mo[:nocc,:nocc] = doo + doo.T
dm1mo[nocc:,nocc:] = dvv + dvv.T
dm1 = reduce(numpy.dot, (mo_coeff, dm1mo, mo_coeff.T))
vhf = mp._scf.get_veff(mp.mol, dm1) * 2
Xvo = reduce(numpy.dot, (mo_coeff[:,nocc:].T, vhf, mo_coeff[:,:nocc]))
Xvo+= Imat[:nocc,nocc:].T - Imat[nocc:,:nocc]
dm1mo += _response_dm1(mp, Xvo)
# Transform to AO basis
dm1 = reduce(numpy.dot, (mo_coeff, dm1mo, mo_coeff.T))
dm1 += mp._scf.make_rdm1(mp.mo_coeff, mp.mo_occ)
return dm1
# The force from MP2 electron density (including orbital response)
m = mp.MP2(mf).run()
e1_mp2 = m.e_tot
dm = make_rdm1_with_orbital_response(m)
mm_force_mp2 = force(dm)
print('MP2 force:')
print(mm_force_mp2)
# Verify MP2 force
coords[0,0] += 1e-3
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges, unit='Bohr').run()
m = mp.MP2(mf).run()
e2_mp2 = m.e_tot
print(-(e2_mp2-e1_mp2)/1e-3, '==', mm_force_mp2[0,0])
|
sunqm/pyscf
|
examples/qmmm/30-force_on_mm_particles.py
|
Python
|
apache-2.0
| 6,522
|
[
"PySCF"
] |
f8009fe5a86e259e088efde319ec262d284dbdcc5cce5123139509a567f53eb1
|
from rest_framework import viewsets, permissions, mixins
from rest_framework.exceptions import PermissionDenied
from timetable.models import WorkingHour
from users.models import Doctor, Patient, User
from .models import Visit
from .serializers import VisitSerializer
class VisitViewSet(viewsets.GenericViewSet,
mixins.RetrieveModelMixin,
mixins.ListModelMixin):
serializer_class = VisitSerializer
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
"""
:return: visits queryset according user.role.
Patients and doctors can views only own visits.
"""
user = self.request.user
if (user.role == 'doctor' and
user.doctor_set.first().is_chief_doctor):
return Visit.objects.all()
if user.role == 'doctor':
return user.doctor_set.first().visit_set.all()
if user.role == 'patient':
return user.patient_set.first().visit_set.all()
raise PermissionDenied(
'Only patients and their doctors can views visits')
|
vechnoe/clinic
|
src/apps/reception_office/views.py
|
Python
|
mit
| 1,116
|
[
"VisIt"
] |
75453aaf228a21d7264f56f933dfc3d3b78d27001958d0d415f625618a4d2491
|
import pytest
from ceph_deploy.cli import get_parser
COMP_FLAGS = [
'mon', 'mds', 'rgw', 'osd', 'common', 'all'
]
class TestParserInstall(object):
def setup(self):
self.parser = get_parser()
def test_install_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('install --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy install' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_install_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('install'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_install_one_host(self):
args = self.parser.parse_args('install host1'.split())
assert args.host == ['host1']
def test_install_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['install'] + hostnames)
assert frozenset(args.host) == frozenset(hostnames)
def test_install_release_default_is_none(self):
args = self.parser.parse_args('install host1'.split())
assert args.release is None
assert args.version_kind == "stable"
def test_install_release(self):
args = self.parser.parse_args('install --release hammer host1'.split())
assert args.release == "hammer"
assert args.version_kind == "stable"
@pytest.mark.skipif(reason="No release name sanity checking yet")
def test_install_release_bad_codename(self):
args = self.parser.parse_args('install --release cephalopod host1'.split())
assert args.release != "cephalopod"
def test_install_testing_default_is_none(self):
args = self.parser.parse_args('install host1'.split())
assert args.testing is None
assert args.version_kind == "stable"
def test_install_testing_true(self):
args = self.parser.parse_args('install --testing host1'.split())
assert len(args.testing) == 0
assert args.version_kind == "testing"
def test_install_dev_disabled_by_default(self):
args = self.parser.parse_args('install host1'.split())
# dev defaults to master, but version_kind nullifies it
assert args.dev == "master"
assert args.version_kind == "stable"
def test_install_dev_custom_version(self):
args = self.parser.parse_args('install --dev v0.80.8 host1'.split())
assert args.dev == "v0.80.8"
assert args.version_kind == "dev"
@pytest.mark.skipif(reason="test reflects desire, but not code reality")
def test_install_dev_option_default_is_master(self):
# I don't think this is the way argparse works.
args = self.parser.parse_args('install --dev host1'.split())
assert args.dev == "master"
assert args.version_kind == "dev"
def test_install_release_testing_mutex(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('install --release hammer --testing host1'.split())
out, err = capsys.readouterr()
assert 'not allowed with argument' in err
def test_install_release_dev_mutex(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('install --release hammer --dev master host1'.split())
out, err = capsys.readouterr()
assert 'not allowed with argument' in err
def test_install_testing_dev_mutex(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('install --testing --dev master host1'.split())
out, err = capsys.readouterr()
assert 'not allowed with argument' in err
@pytest.mark.parametrize('comp', COMP_FLAGS)
def test_install_component_default_is_false(self, comp):
args = self.parser.parse_args('install host1'.split())
assert getattr(args, 'install_%s' % comp) is False
@pytest.mark.parametrize('comp', COMP_FLAGS)
def test_install_component_true(self, comp):
args = self.parser.parse_args(('install --%s host1' % comp).split())
assert getattr(args, 'install_%s' % comp) is True
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12147")
def test_install_multi_component(self):
args = self.parser.parse_args(('install --mon --rgw host1').split())
assert args.install_mon
assert args.install_rgw
def test_install_adjust_repos_default_is_true(self):
args = self.parser.parse_args('install host1'.split())
assert args.adjust_repos
def test_install_adjust_repos_false(self):
args = self.parser.parse_args('install --no-adjust-repos host1'.split())
assert not args.adjust_repos
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12147")
def test_install_adjust_repos_false_with_custom_release(self):
args = self.parser.parse_args('install --release firefly --no-adjust-repos host1'.split())
assert args.release == "firefly"
assert not args.adjust_repos
def test_install_repo_default_is_false(self):
args = self.parser.parse_args('install host1'.split())
assert not args.repo
def test_install_repo_true(self):
args = self.parser.parse_args('install --repo host1'.split())
assert args.repo
def test_install_local_mirror_default_is_none(self):
args = self.parser.parse_args('install host1'.split())
assert args.local_mirror is None
def test_install_local_mirror_custom_path(self):
args = self.parser.parse_args('install --local-mirror /mnt/mymirror host1'.split())
assert args.local_mirror == "/mnt/mymirror"
def test_install_repo_url_default_is_none(self):
args = self.parser.parse_args('install host1'.split())
assert args.repo_url is None
def test_install_repo_url_custom_path(self):
args = self.parser.parse_args('install --repo-url https://ceph.com host1'.split())
assert args.repo_url == "https://ceph.com"
def test_install_gpg_url_default_is_none(self):
args = self.parser.parse_args('install host1'.split())
assert args.gpg_url is None
def test_install_gpg_url_custom_path(self):
args = self.parser.parse_args('install --gpg-url https://ceph.com/key host1'.split())
assert args.gpg_url == "https://ceph.com/key"
|
SUSE/ceph-deploy-to-be-deleted
|
ceph_deploy/tests/parser/test_install.py
|
Python
|
mit
| 6,476
|
[
"Firefly"
] |
bfb67847a2924ff1a29523598fab4924e6ebc1e77cb9b58175c909adeda1b6a7
|
#!/usr/local/bin/python -i
# preceeding line should have path for Python on your machine
# simple.py
# Purpose: mimic operation of couple/simple/simple.cpp via Python
# Syntax: simple.py in.lammps
# in.lammps = LAMMPS input script
import sys
# parse command line
argv = sys.argv
if len(argv) != 2:
print "Syntax: simple.py in.lammps"
sys.exit()
infile = sys.argv[1]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
# run 10 more steps
# get coords from LAMMPS
# change coords of 1st atom
# put coords back into LAMMPS
# run a single step with changed coords
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
jcarlson23/lammps
|
python/examples/simple.py
|
Python
|
gpl-2.0
| 1,043
|
[
"LAMMPS"
] |
78d5a6ad27180ee89d51fa45f37ea5ffc4583cc7a88e0b7d73bcbfb69ad07a9b
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
import galsim
valid_value_types = {
# The values are tuples with:
# - the build function to call
# - a list of types for which the type is valid
'List' : ('_GenerateFromList',
[ float, int, bool, str, galsim.Angle, galsim.Shear, galsim.PositionD ]),
'Eval' : ('_GenerateFromEval',
[ float, int, bool, str, galsim.Angle, galsim.Shear, galsim.PositionD ]),
'Catalog' : ('_GenerateFromCatalog', [ float, int, bool, str ]),
'Dict' : ('_GenerateFromDict', [ float, int, bool, str ]),
'FitsHeader' : ('_GenerateFromFitsHeader', [ float, int, bool, str ]),
'Sequence' : ('_GenerateFromSequence', [ float, int, bool ]),
'Random' : ('_GenerateFromRandom', [ float, int, bool, galsim.Angle ]),
'RandomGaussian' : ('_GenerateFromRandomGaussian', [ float ]),
'RandomDistribution' : ('_GenerateFromRandomDistribution', [ float ]),
'RandomCircle' : ('_GenerateFromRandomCircle', [ galsim.PositionD ]),
'NumberedFile' : ('_GenerateFromNumberedFile', [ str ]),
'FormattedStr' : ('_GenerateFromFormattedStr', [ str ]),
'Rad' : ('_GenerateFromRad', [ galsim.Angle ]),
'Radians' : ('_GenerateFromRad', [ galsim.Angle ]),
'Deg' : ('_GenerateFromDeg', [ galsim.Angle ]),
'Degrees' : ('_GenerateFromDeg', [ galsim.Angle ]),
'E1E2' : ('_GenerateFromE1E2', [ galsim.Shear ]),
'EBeta' : ('_GenerateFromEBeta', [ galsim.Shear ]),
'G1G2' : ('_GenerateFromG1G2', [ galsim.Shear ]),
'GBeta' : ('_GenerateFromGBeta', [ galsim.Shear ]),
'Eta1Eta2' : ('_GenerateFromEta1Eta2', [ galsim.Shear ]),
'EtaBeta' : ('_GenerateFromEtaBeta', [ galsim.Shear ]),
'QBeta' : ('_GenerateFromQBeta', [ galsim.Shear ]),
'XY' : ('_GenerateFromXY', [ galsim.PositionD ]),
'RTheta' : ('_GenerateFromRTheta', [ galsim.PositionD ]),
'NFWHaloShear' : ('_GenerateFromNFWHaloShear', [ galsim.Shear ]),
'NFWHaloMagnification' : ('_GenerateFromNFWHaloMagnification', [ float ]),
'PowerSpectrumShear' : ('_GenerateFromPowerSpectrumShear', [ galsim.Shear ]),
'PowerSpectrumMagnification' : ('_GenerateFromPowerSpectrumMagnification', [ float ]),
}
def ParseValue(config, param_name, base, value_type):
"""@brief Read or generate a parameter value from config.
@return value, safe
"""
param = config[param_name]
#print 'ParseValue for param_name = ',param_name,', value_type = ',str(value_type)
#print 'param = ',param
# First see if we can assign by param by a direct constant value
if isinstance(param, value_type):
#print param_name,' = ',param
return param, True
elif not isinstance(param, dict):
if value_type is galsim.Angle:
# Angle is a special case. Angles are specified with a final string to
# declare what unit to use.
val = _GetAngleValue(param, param_name)
elif value_type is bool:
# For bool, we allow a few special string conversions
val = _GetBoolValue(param, param_name)
else:
# Make sure strings are converted to float (or other type) if necessary.
# In particular things like 1.e6 aren't converted to float automatically
# by the yaml reader. (Although I think this is a bug.)
val = value_type(param)
# Save the converted type for next time.
config[param_name] = val
#print param_name,' = ',val
return val, True
elif 'type' not in param:
raise AttributeError(
"%s.type attribute required in config for non-constant parameter %s."%(
param_name,param_name))
else:
# Otherwise, we need to generate the value according to its type
# (See valid_value_types defined at the top of the file.)
type = param['type']
#print 'type = ',type
# First check if the value_type is valid.
if type not in valid_value_types:
raise AttributeError(
"Unrecognized type = %s specified for parameter %s"%(type,param_name))
if value_type not in valid_value_types[type][1]:
raise AttributeError(
"Invalid value_type = %s specified for parameter %s with type = %s."%(
value_type, param_name, type))
generate_func = eval(valid_value_types[type][0])
#print 'generate_func = ',generate_func
val, safe = generate_func(param, param_name, base, value_type)
#print 'returned val, safe = ',val,safe
# Make sure we really got the right type back. (Just in case...)
if not isinstance(val,value_type):
val = value_type(val)
param['current_val'] = val
#print param_name,' = ',val
return val, safe
def _GetAngleValue(param, param_name):
""" @brief Convert a string consisting of a value and an angle unit into an Angle.
"""
try :
value, unit = param.rsplit(None,1)
value = float(value)
unit = galsim.angle.get_angle_unit(unit)
return galsim.Angle(value, unit)
except Exception as e:
raise AttributeError("Unable to parse %s param = %s as an Angle."%(param_name,param))
def _GetPositionValue(param, param_name):
""" @brief Convert a string that looks like "a,b" into a galsim.PositionD.
"""
try :
x, y = param.split(',')
x = x.strip()
y = y.strip()
return galsim.PositionD(x,y)
except :
raise AttributeError("Unable to parse %s param = %s as a PositionD."%(param_name,param))
def _GetBoolValue(param, param_name):
""" @brief Convert a string to a bool
"""
#print 'GetBoolValue: param = ',param
if isinstance(param,str):
#print 'param.strip.upper = ',param.strip().upper()
if param.strip().upper() in [ 'TRUE', 'YES' ]:
return True
elif param.strip().upper() in [ 'FALSE', 'NO' ]:
return False
else:
try:
val = bool(int(param))
return val
except:
raise AttributeError("Unable to parse %s param = %s as a bool."%(param_name,param))
else:
try:
val = bool(param)
return val
except:
raise AttributeError("Unable to parse %s param = %s as a bool."%(param_name,param))
def CheckAllParams(param, param_name, req={}, opt={}, single=[], ignore=[]):
"""@brief Check that the parameters for a particular item are all valid
@return a dict, get, with get[key] = value_type for all keys to get
"""
get = {}
valid_keys = req.keys() + opt.keys()
# Check required items:
for (key, value_type) in req.items():
if key in param:
get[key] = value_type
else:
raise AttributeError(
"Attribute %s is required for %s.type = %s"%(key,param_name,param['type']))
# Check optional items:
for (key, value_type) in opt.items():
if key in param:
get[key] = value_type
# Check items for which exacly 1 should be defined:
for s in single:
if not s: # If no items in list, don't require one of them to be present.
break
valid_keys += s.keys()
count = 0
for (key, value_type) in s.items():
if key in param:
count += 1
if count > 1:
raise AttributeError(
"Only one of the attributes %s is allowed for %s.type = %s"%(
s.keys(),param_name,param['type']))
get[key] = value_type
if count == 0:
raise AttributeError(
"One of the attributes %s is required for %s.type = %s"%(
s.keys(),param_name,param['type']))
# Check that there aren't any extra keys in param:
valid_keys += ignore
valid_keys += [ 'type', 'current_val' ] # These might be there, and it's ok.
valid_keys += [ '#' ] # When we read in json files, there represent comments
for key in param.keys():
if key not in valid_keys:
raise AttributeError(
"Unexpected attribute %s found for parameter %s"%(key,param_name))
return get
def GetAllParams(param, param_name, base, req={}, opt={}, single=[], ignore=[]):
"""@brief Check and get all the parameters for a particular item
@return kwargs, safe
"""
get = CheckAllParams(param,param_name,req,opt,single,ignore)
kwargs = {}
safe = True
for (key, value_type) in sorted(get.items()):
val, safe1 = ParseValue(param, key, base, value_type)
safe = safe and safe1
kwargs[key] = val
# Just in case there are unicode strings. python 2.6 doesn't like them in kwargs.
kwargs = dict([(k.encode('utf-8'), v) for k,v in kwargs.iteritems()])
return kwargs, safe
def GetCurrentValue(config, param_name):
"""@brief Return the current value of a parameter (either stored or a simple value)
"""
param = config[param_name]
if isinstance(param, dict):
return param['current_val']
else:
return param
#
# Now all the GenerateFrom functions:
#
def _GenerateFromG1G2(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (g1, g2)
"""
req = { 'g1' : float, 'g2' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
#print 'Generate from G1G2: kwargs = ',kwargs
return galsim.Shear(**kwargs), safe
def _GenerateFromE1E2(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (e1, e2)
"""
req = { 'e1' : float, 'e2' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromEta1Eta2(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (eta1, eta2)
"""
req = { 'eta1' : float, 'eta2' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromGBeta(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (g, beta)
"""
req = { 'g' : float, 'beta' : galsim.Angle }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromEBeta(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (e, beta)
"""
req = { 'e' : float, 'beta' : galsim.Angle }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromEtaBeta(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (eta, beta)
"""
req = { 'eta' : float, 'beta' : galsim.Angle }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromQBeta(param, param_name, base, value_type):
"""@brief Return a Shear constructed from given (q, beta)
"""
req = { 'q' : float, 'beta' : galsim.Angle }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.Shear(**kwargs), safe
def _GenerateFromXY(param, param_name, base, value_type):
"""@brief Return a PositionD constructed from given (x,y)
"""
req = { 'x' : float, 'y' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return galsim.PositionD(**kwargs), safe
def _GenerateFromRTheta(param, param_name, base, value_type):
"""@brief Return a PositionD constructed from given (r,theta)
"""
req = { 'r' : float, 'theta' : galsim.Angle }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
r = kwargs['r']
theta = kwargs['theta']
import math
return galsim.PositionD(r*math.cos(theta.rad()), r*math.sin(theta.rad())), safe
def _GenerateFromRad(param, param_name, base, value_type):
"""@brief Return an Angle constructed from given theta in radians
"""
req = { 'theta' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return kwargs['theta'] * galsim.radians, safe
def _GenerateFromDeg(param, param_name, base, value_type):
"""@brief Return an Angle constructed from given theta in degrees
"""
req = { 'theta' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
return kwargs['theta'] * galsim.degrees, safe
def _GenerateFromCatalog(param, param_name, base, value_type):
"""@brief Return a value read from an input catalog
"""
if 'catalog' not in base:
raise ValueError("No input catalog available for %s.type = Catalog"%param_name)
if 'num' in param:
num, safe = ParseValue(param, 'num', base, int)
else:
num, safe = (0, True)
if num < 0:
raise ValueError("Invalid num < 0 supplied for Catalog: num = %d"%num)
if num >= len(base['catalog']):
raise ValueError("Invalid num supplied for Catalog (too large): num = %d"%num)
input_cat = base['catalog'][num]
# Setup the indexing sequence if it hasn't been specified.
# The normal thing with a Catalog is to just use each object in order,
# so we don't require the user to specify that by hand. We can do it for them.
SetDefaultIndex(param, input_cat.nobjects)
# Coding note: the and/or bit is equivalent to a C ternary operator:
# input_cat.isfits ? str : int
# which of course doesn't exist in python. This does the same thing (so long as the
# middle item evaluates to true).
req = { 'col' : input_cat.isfits and str or int , 'index' : int }
kwargs, safe1 = GetAllParams(param, param_name, base, req=req, ignore=['num'])
safe = safe and safe1
if value_type is str:
val = input_cat.get(**kwargs)
elif value_type is float:
val = input_cat.getFloat(**kwargs)
elif value_type is int:
val = input_cat.getInt(**kwargs)
elif value_type is bool:
val = _GetBoolValue(input_cat.get(**kwargs),param_name)
#print 'Catalog: ',val
return val, safe
def _GenerateFromDict(param, param_name, base, value_type):
"""@brief Return a value read from an input dict.
"""
if 'dict' not in base:
raise ValueError("No input dict available for %s.type = Dict"%param_name)
req = { 'key' : str }
opt = { 'num' : int }
kwargs, safe = GetAllParams(param, param_name, base, req=req, opt=opt)
key = kwargs['key']
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for Dict: num = %d"%num)
if num >= len(base['dict']):
raise ValueError("Invalid num supplied for Dict (too large): num = %d"%num)
d = base['dict'][num]
return d.get(key), safe
def _GenerateFromFitsHeader(param, param_name, base, value_type):
"""@brief Return a value read from a FITS header
"""
if 'fits_header' not in base:
raise ValueError("No fits header available for %s.type = FitsHeader"%param_name)
req = { 'key' : str }
opt = { 'num' : int }
kwargs, safe1 = GetAllParams(param, param_name, base, req=req, opt=opt)
key = kwargs['key']
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for FitsHeader: num = %d"%num)
if num >= len(base['fits_header']):
raise ValueError("Invalid num supplied for FitsHeader (too large): num = %d"%num)
header = base['fits_header'][num]
if key not in header.keys():
raise ValueError("key %s not found in the FITS header in %s"%(key,kwargs['file_name']))
return header[key], safe
def _GenerateFromRandom(param, param_name, base, value_type):
"""@brief Return a random value drawn from a uniform distribution
"""
if 'rng' not in base:
raise ValueError("No base['rng'] available for %s.type = Random"%param_name)
rng = base['rng']
ud = galsim.UniformDeviate(rng)
# Each value_type works a bit differently:
if value_type is galsim.Angle:
import math
CheckAllParams(param, param_name)
val = ud() * 2 * math.pi * galsim.radians
#print 'Random angle = ',val
return val, False
elif value_type is bool:
CheckAllParams(param, param_name)
val = ud() < 0.5
#print 'Random bool = ',val
return val, False
else:
req = { 'min' : value_type , 'max' : value_type }
kwargs, safe = GetAllParams(param, param_name, base, req=req)
min = kwargs['min']
max = kwargs['max']
if value_type is int:
import math
val = int(math.floor(ud() * (max-min+1))) + min
# In case ud() == 1
if val > max: val = max
else:
val = ud() * (max-min) + min
#print 'Random = ',val
return val, False
def _GenerateFromRandomGaussian(param, param_name, base, value_type):
"""@brief Return a random value drawn from a Gaussian distribution
"""
if 'rng' not in base:
raise ValueError("No base['rng'] available for %s.type = RandomGaussian"%param_name)
rng = base['rng']
req = { 'sigma' : float }
opt = { 'mean' : float, 'min' : float, 'max' : float }
kwargs, safe = GetAllParams(param, param_name, base, req=req, opt=opt)
sigma = kwargs['sigma']
if 'gd' in base:
# Minor subtlety here. GaussianDeviate requires two random numbers to
# generate a single Gaussian deviate. But then it gets a second
# deviate for free. So it's more efficient to store gd than to make
# a new one each time. So check if we did that.
gd = base['gd']
if base['current_gdsigma'] != sigma:
gd.setSigma(sigma)
base['current_gdsigma'] = sigma
else:
# Otherwise, just go ahead and make a new one.
gd = galsim.GaussianDeviate(rng,sigma=sigma)
base['gd'] = gd
base['current_gdsigma'] = sigma
if 'min' in kwargs or 'max' in kwargs:
# Clip at min/max.
# However, special cases if min == mean or max == mean
# -- can use fabs to double the chances of falling in the range.
mean = kwargs.get('mean',0.)
min = kwargs.get('min',-float('inf'))
max = kwargs.get('max',float('inf'))
do_abs = False
do_neg = False
if min == mean:
do_abs = True
max -= mean
min = -max
elif max == mean:
do_abs = True
do_neg = True
min -= mean
max = -min
else:
min -= mean
max -= mean
# Emulate a do-while loop
#print 'sigma = ',sigma
import math
while True:
val = gd()
#print 'val = ',val
if do_abs: val = math.fabs(val)
if val >= min and val <= max: break
if do_neg: val = -val
val += mean
else:
val = gd()
if 'mean' in kwargs: val += kwargs['mean']
#print 'RandomGaussian: ',val
return val, False
def _GenerateFromRandomDistribution(param, param_name, base, value_type):
"""@brief Return a random value drawn from a user-defined probability distribution
"""
if 'rng' not in base:
raise ValueError("No rng available for %s.type = RandomDistribution"%param_name)
rng = base['rng']
opt = {'function' : str, 'interpolant' : str, 'npoints' : int,
'x_min' : float, 'x_max' : float }
kwargs, safe = GetAllParams(param, param_name, base, opt=opt)
if 'distdev' in base:
# The overhead for making a DistDeviate is large enough that we'd rather not do it every
# time, so first check if we've already made one:
distdev = base['distdev']
if base['distdev_kwargs'] != kwargs:
distdev=galsim.DistDeviate(rng,**kwargs)
base['distdev'] = distdev
base['distdev_kwargs'] = kwargs
else:
# Otherwise, just go ahead and make a new one.
distdev=galsim.DistDeviate(rng,**kwargs)
base['distdev'] = distdev
base['distdev_kwargs'] = kwargs
# Typically, the rng will change between successive calls to this, so reset the
# seed. (The other internal calculations don't need to be redone unless the rest of the
# kwargs have been changed.)
distdev.reset(rng)
val = distdev()
#print 'distdev = ',val
return val, False
def _GenerateFromRandomCircle(param, param_name, base, value_type):
"""@brief Return a PositionD drawn from a circular top hat distribution.
"""
if 'rng' not in base:
raise ValueError("No base['rng'] available for %s.type = RandomCircle"%param_name)
rng = base['rng']
req = { 'radius' : float }
opt = { 'inner_radius' : float, 'center' : galsim.PositionD }
kwargs, safe = GetAllParams(param, param_name, base, req=req, opt=opt)
radius = kwargs['radius']
ud = galsim.UniformDeviate(rng)
max_rsq = radius**2
if 'inner_radius' in kwargs:
inner_radius = kwargs['inner_radius']
min_rsq = inner_radius**2
else:
min_rsq = 0.
# Emulate a do-while loop
while True:
x = (2*ud()-1) * radius
y = (2*ud()-1) * radius
#print 'x,y = ',x,y
rsq = x**2 + y**2
if rsq >= min_rsq and rsq <= max_rsq: break
pos = galsim.PositionD(x,y)
if 'center' in kwargs:
pos += kwargs['center']
#print 'RandomCircle: ',pos
return pos, False
def _GenerateFromSequence(param, param_name, base, value_type):
"""@brief Return next in a sequence of integers
"""
#print 'Start Sequence for ',param_name,' -- param = ',param
opt = { 'first' : value_type, 'last' : value_type, 'step' : value_type,
'repeat' : int, 'nitems' : int }
kwargs, safe = GetAllParams(param, param_name, base, opt=opt)
step = kwargs.get('step',1)
first = kwargs.get('first',0)
repeat = kwargs.get('repeat',1)
last = kwargs.get('last',None)
nitems = kwargs.get('nitems',None)
#print 'first, step, last, repeat, nitems = ',first,step,last,repeat,nitems
if repeat <= 0:
raise ValueError(
"Invalid repeat=%d (must be > 0) for %s.type = Sequence"%(repeat,param_name))
if last is not None and nitems is not None:
raise AttributeError(
"At most one of the attributes last and nitems is allowed for %s.type = Sequence"%(
param_name))
if value_type is bool:
# Then there are only really two valid sequences: Either 010101... or 101010...
# Aside from the repeat value of course.
if first:
first = 1
step = -1
nitems = 2
else:
first = 0
step = 1
nitems = 2
#print 'bool sequence: first, step, repeat, n => ',first,step,repeat,nitems
elif value_type is float:
if last is not None:
nitems = int( (last-first)/step + 0.5 ) + 1
#print 'float sequence: first, step, repeat, n => ',first,step,repeat,nitems
else:
if last is not None:
nitems = (last - first)/step + 1
#print 'int sequence: first, step, repeat, n => ',first,step,repeat,nitems
k = base['seq_index']
#print 'k = ',k
k = k / repeat
#print 'k/repeat = ',k
if nitems is not None and nitems > 0:
#print 'nitems = ',nitems
k = k % nitems
#print 'k%nitems = ',k
index = first + k*step
#print 'first + k*step = ',index
return index, False
def _GenerateFromNumberedFile(param, param_name, base, value_type):
"""@brief Return a file_name using a root, a number, and an extension
"""
#print 'Start NumberedFile for ',param_name,' -- param = ',param
if 'num' not in param:
param['num'] = { 'type' : 'Sequence' }
req = { 'root' : str , 'num' : int }
opt = { 'ext' : str , 'digits' : int }
kwargs, safe = GetAllParams(param, param_name, base, req=req, opt=opt)
template = kwargs['root']
if 'digits' in kwargs:
template += '%%0%dd'%kwargs['digits']
else:
template += '%d'
if 'ext' in kwargs:
template += kwargs['ext']
#print 'template = ',template
s = eval("'%s'%%%d"%(template,kwargs['num']))
#print 'num = ',kwargs['num']
#print 's = ',s
return s, safe
def _GenerateFromFormattedStr(param, param_name, base, value_type):
"""@brief Create a string from a format string
"""
#print 'Start FormattedStr for ',param_name,' -- param = ',param
req = { 'format' : str }
# Ignore items for now, we'll deal with it differently.
ignore = [ 'items' ]
params, safe = GetAllParams(param, param_name, base, req=req, ignore=ignore)
#print 'params = ',params
format = params['format']
#print 'format = ',format
# Check that items is present and is a list.
if 'items' not in param:
raise AttributeError("Attribute items is required for %s.type = FormattedStr"%param_name)
items = param['items']
if not isinstance(items,list):
raise AttributeError("items entry for parameter %s is not a list."%param_name)
# Figure out what types we are expecting for the list elements:
tokens = format.split('%')
val_types = []
skip = False
for token in tokens[1:]: # skip first one.
# It we have set skip, then skip this one.
if skip:
skip = False
continue
# If token == '', then this is a %% in the original string. Skip this and the next token.
if len(token) == 0:
skip = True
continue
token = token.lstrip('0123456789lLh') # ignore field size, and long/short specification
if len(token) == 0:
raise ValueError("Unable to parse '%s' as a valid format string"%format)
if token[0].lower() in 'diouxX':
val_types.append(int)
elif token[0].lower() in 'eEfFgG':
val_types.append(float)
elif token[0].lower() in 'rs':
val_types.append(str)
else:
raise ValueError("Unable to parse '%s' as a valid format string"%format)
#print 'val_types = ',val_types
if len(val_types) != len(items):
raise ValueError(
"Number of items for FormatStr (%d) does not match number expected from "%len(items)+
"format string (%d)"%len(val_types))
vals = []
for index in range(len(items)):
#print 'index = ',index,', val_type = ',val_types[index]
val, safe1 = ParseValue(items, index, base, val_types[index])
#print 'val = ',val
safe = safe and safe1
vals.append(val)
#print 'vals = ',vals
final_str = format%tuple(vals)
#print 'final_str = ',final_str
return final_str, safe
def _GenerateFromNFWHaloShear(param, param_name, base, value_type):
"""@brief Return a shear calculated from an NFWHalo object.
"""
if 'sky_pos' not in base:
raise ValueError("NFWHaloShear requested, but no position defined.")
pos = base['sky_pos']
#print 'nfw pos = ',pos
if 'gal' not in base or 'redshift' not in base['gal']:
raise ValueError("NFWHaloShear requested, but no gal.redshift defined.")
redshift = GetCurrentValue(base['gal'],'redshift')
if 'nfw_halo' not in base:
raise ValueError("NFWHaloShear requested, but no input.nfw_halo defined.")
opt = { 'num' : int }
kwargs = GetAllParams(param, param_name, base, opt=opt)[0]
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for NFWHalowShear: num = %d"%num)
if num >= len(base['nfw_halo']):
raise ValueError("Invalid num supplied for NFWHaloShear (too large): num = %d"%num)
nfw_halo = base['nfw_halo'][num]
#print 'NFWHaloShear: pos = ',pos,' z = ',redshift
try:
g1,g2 = nfw_halo.getShear(pos,redshift)
#print 'g1,g2 = ',g1,g2
shear = galsim.Shear(g1=g1,g2=g2)
except Exception as e:
#print e
import warnings
warnings.warn("Warning: NFWHalo shear is invalid -- probably strong lensing! " +
"Using shear = 0.")
shear = galsim.Shear(g1=0,g2=0)
#print 'shear = ',shear
return shear, False
def _GenerateFromNFWHaloMagnification(param, param_name, base, value_type):
"""@brief Return a magnification calculated from an NFWHalo object.
"""
if 'sky_pos' not in base:
raise ValueError("NFWHaloMagnification requested, but no position defined.")
pos = base['sky_pos']
#print 'nfw pos = ',pos
if 'gal' not in base or 'redshift' not in base['gal']:
raise ValueError("NFWHaloMagnification requested, but no gal.redshift defined.")
redshift = GetCurrentValue(base['gal'],'redshift')
if 'nfw_halo' not in base:
raise ValueError("NFWHaloMagnification requested, but no input.nfw_halo defined.")
opt = { 'max_mu' : float, 'num' : int }
kwargs = GetAllParams(param, param_name, base, opt=opt)[0]
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for NFWHaloMagnification: num = %d"%num)
if num >= len(base['nfw_halo']):
raise ValueError("Invalid num supplied for NFWHaloMagnification (too large): num = %d"%num)
nfw_halo = base['nfw_halo'][num]
#print 'NFWHaloMagnification: pos = ',pos,' z = ',redshift
mu = nfw_halo.getMagnification(pos,redshift)
max_mu = kwargs.get('max_mu', 25.)
if not max_mu > 0.:
raise ValueError(
"Invalid max_mu=%f (must be > 0) for %s.type = NFWHaloMagnification"%(
max_mu,param_name))
if mu < 0 or mu > max_mu:
#print 'mu = ',mu
import warnings
warnings.warn("Warning: NFWHalo mu = %f means strong lensing! Using mu=%f"%(mu,max_mu))
mu = max_mu
#print 'mu = ',mu
return mu, False
def _GenerateFromPowerSpectrumShear(param, param_name, base, value_type):
"""@brief Return a shear calculated from a PowerSpectrum object.
"""
if 'sky_pos' not in base:
raise ValueError("PowerSpectrumShear requested, but no position defined.")
pos = base['sky_pos']
if 'power_spectrum' not in base:
raise ValueError("PowerSpectrumShear requested, but no input.power_spectrum defined.")
opt = { 'num' : int }
kwargs = GetAllParams(param, param_name, base, opt=opt)[0]
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for PowerSpectrumShear: num = %d"%num)
if num >= len(base['power_spectrum']):
raise ValueError("Invalid num supplied for PowerSpectrumShear (too large): num = %d"%num)
power_spectrum = base['power_spectrum'][num]
#print 'PowerSpectrumShear: pos = ',pos
try:
g1,g2 = power_spectrum.getShear(pos)
#print 'g1,g2 = ',g1,g2
shear = galsim.Shear(g1=g1,g2=g2)
except Exception as e:
#print e
import warnings
warnings.warn("Warning: PowerSpectrum shear is invalid -- probably strong lensing! " +
"Using shear = 0.")
shear = galsim.Shear(g1=0,g2=0)
#print 'shear = ',shear
return shear, False
def _GenerateFromPowerSpectrumMagnification(param, param_name, base, value_type):
"""@brief Return a magnification calculated from a PowerSpectrum object.
"""
if 'sky_pos' not in base:
raise ValueError("PowerSpectrumMagnification requested, but no position defined.")
pos = base['sky_pos']
if 'power_spectrum' not in base:
raise ValueError("PowerSpectrumMagnification requested, but no input.power_spectrum "
"defined.")
opt = { 'max_mu' : float, 'num' : int }
kwargs = GetAllParams(param, param_name, base, opt=opt)[0]
num = kwargs.get('num',0)
if num < 0:
raise ValueError("Invalid num < 0 supplied for PowerSpectrumMagnification: num = %d"%num)
if num >= len(base['power_spectrum']):
raise ValueError(
"Invalid num supplied for PowerSpectrumMagnification (too large): num = %d"%num)
power_spectrum = base['power_spectrum'][num]
mu = power_spectrum.getMagnification(pos)
max_mu = kwargs.get('max_mu', 25.)
if not max_mu > 0.:
raise ValueError(
"Invalid max_mu=%f (must be > 0) for %s.type = PowerSpectrumMagnification"%(
max_mu,param_name))
if mu < 0 or mu > max_mu:
#print 'mu = ',mu
import warnings
warnings.warn("Warning: PowerSpectrum mu = %f means strong lensing! Using mu=%f"%(
mu,max_mu))
mu = max_mu
return mu, False
def _GenerateFromList(param, param_name, base, value_type):
"""@brief Return next item from a provided list
"""
req = { 'items' : list }
opt = { 'index' : int }
# Only Check, not Get. We need to handle items a bit differently, since it's a list.
CheckAllParams(param, param_name, req=req, opt=opt)
items = param['items']
if not isinstance(items,list):
raise AttributeError("items entry for parameter %s is not a list."%param_name)
# Setup the indexing sequence if it hasn't been specified using the length of items.
SetDefaultIndex(param, len(items))
index, safe = ParseValue(param, 'index', base, int)
if index < 0 or index >= len(items):
raise AttributeError("index %d out of bounds for parameter %s"%(index,param_name))
val, safe1 = ParseValue(items, index, base, value_type)
safe = safe and safe1
return val, safe
def _type_by_letter(key):
if len(key) < 2:
raise AttributeError("Invalid user-defined variable %r"%key)
if key[0] == 'f':
return float
elif key[0] == 'i':
return int
elif key[0] == 'b':
return bool
elif key[0] == 's':
return str
elif key[0] == 'a':
return galsim.Angle
elif key[0] == 'p':
return galsim.PositionD
elif key[0] == 'g':
return galsim.Shear
else:
raise AttributeError("Invalid Eval variable: %s (starts with an invalid letter)"%key)
def _GenerateFromEval(param, param_name, base, value_type):
"""@brief Evaluate a string as the provided type
"""
#print 'Start Eval for ',param_name
req = { 'str' : str }
opt = {}
ignore = [ 'type' , 'current_val' ]
for key in param.keys():
if key not in (ignore + req.keys()):
opt[key] = _type_by_letter(key)
#print 'opt = ',opt
#print 'base has ',base.keys()
params, safe = GetAllParams(param, param_name, base, req=req, opt=opt, ignore=ignore)
#print 'params = ',params
string = params['str']
#print 'string = ',string
# Bring the user-defined variables into scope.
for key in opt.keys():
exec(key[1:] + ' = params[key]')
#print key[1:],'=',eval(key[1:])
# Also bring in any top level eval_variables
if 'eval_variables' in base:
#print 'found eval_variables = ',base['eval_variables']
if not isinstance(base['eval_variables'],dict):
raise AttributeError("eval_variables must be a dict")
opt = {}
for key in base['eval_variables'].keys():
if key not in ignore:
opt[key] = _type_by_letter(key)
#print 'opt = ',opt
params, safe1 = GetAllParams(base['eval_variables'], 'eval_variables', base, opt=opt,
ignore=ignore)
#print 'params = ',params
safe = safe and safe1
for key in opt.keys():
exec(key[1:] + ' = params[key]')
#print key[1:],'=',eval(key[1:])
# Also, we allow the use of math functions
import math
import numpy
import os
# Try evaluating the string as is.
try:
val = value_type(eval(string))
#print 'Simple success: val = ',val
return val, safe
except:
pass
# Then try bringing in the allowed variables to see if that works:
if 'image_pos' in base:
image_pos = base['image_pos']
if 'sky_pos' in base:
sky_pos = base['sky_pos']
if 'rng' in base:
rng = base['rng']
for key in galsim.config.valid_input_types.keys():
if key in base:
exec(key + ' = base[key]')
try:
val = value_type(eval(string))
#print 'Needed pos: val = ',val
return val, False
except:
raise ValueError("Unable to evaluate string %r as a %s for %s"%(
string,value_type,param_name))
def SetDefaultIndex(config, num):
"""
When the number of items in a list is known, we allow the user to omit some of
the parameters of a Sequence or Random and set them automatically based on the
size of the list, catalog, etc.
"""
if 'index' not in config:
config['index'] = { 'type' : 'Sequence', 'nitems' : num }
elif isinstance(config['index'],dict) and 'type' in config['index'] :
index = config['index']
type = index['type']
if ( type == 'Sequence' and
('step' not in index or (isinstance(index['step'],int) and index['step'] > 0) ) and
'last' not in index and 'nitems' not in index ):
index['last'] = num-1
elif ( type == 'Sequence' and
('step' in index and (isinstance(index['step'],int) and index['step'] < 0) ) ):
if 'first' not in index:
index['first'] = num-1
if 'last' not in index and 'nitems' not in index:
index['last'] = 0
elif type == 'Random':
if 'max' not in index:
index['max'] = num-1
if 'min' not in index:
index['min'] = 0
|
mardom/GalSim
|
galsim/config/value.py
|
Python
|
gpl-3.0
| 38,704
|
[
"Galaxy",
"Gaussian"
] |
cd0f46bbc9cbb8f63c0a24d4c48e14c522f21a26c901e60cc7c4b55f9084117d
|
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import vtk
from Options import Options
def get_options():
"""
Retuns options for vtkAxis objects.
"""
opt = Options()
opt.add('num_ticks', 5, "The number of tick marks to place on the axis.", vtype=int)
opt.add('lim', "The axis extents.", vtype=list)
opt.add('color', [1, 1, 1], "The color of the axis, ticks, and labels.")
opt.add('title', "The axis label.", vtype=str)
opt.add('font_size', "The axis title and label font sizes, in points.", vtype=int)
opt.add('title_font_size', "The axis title font size, in points.", vtype=int)
opt.add('tick_font_size', "The axis tick label font size, in points.", vtype=int)
opt.add('grid', True, "Show/hide the grid lines for this axis.")
opt.add('grid_color', [0.25, 0.25, 0.25], "The color for the grid lines.")
opt.add('precision', 3, "The axis numeric precision.", vtype=int)
opt.add('notation', "The type of notation, leave empty to let VTK decide", vtype=str,
allow=['standard', 'scientific', 'fixed', 'printf'])
opt.add('ticks_visible', True, "Control visibility of tickmarks on colorbar axis.")
opt.add('axis_visible', True, "Control visibility of axis line on colorbar axis.")
opt.add('labels_visible', True, "Control visibility of the numeric labels.")
opt.add('axis_position', 'left', "Set the axis position (left, right, top, bottom)", vtype=str,
allow=['left', 'right', 'top', 'bottom'])
opt.add('axis_point1', [0, 0], 'Starting location of axis, in absolute viewport coordinates.')
opt.add('axis_point2', [0, 0], 'Ending location of axis, in absolute viewport coordinates.')
return opt
def set_options(vtkaxis, opt):
"""
Set the options for vtkAxis object.
"""
# Visibility
vtkaxis.SetTicksVisible(opt['ticks_visible'])
vtkaxis.SetAxisVisible(opt['axis_visible'])
vtkaxis.SetLabelsVisible(opt['labels_visible'])
# Ticks
if opt.isOptionValid('num_ticks'):
vtkaxis.SetNumberOfTicks(opt['num_ticks'])
# Limits
if opt.isOptionValid('lim'):
vtkaxis.SetBehavior(vtk.vtkAxis.FIXED)
vtkaxis.SetRange(*opt['lim'])
vtkaxis.RecalculateTickSpacing()
else:
vtkaxis.SetBehavior(vtk.vtkAxis.AUTO)
# Color
if opt.isOptionValid('color'):
clr = opt['color']
vtkaxis.GetTitleProperties().SetColor(*clr)
vtkaxis.GetLabelProperties().SetColor(*clr)
vtkaxis.GetPen().SetColorF(*clr)
# Axis title
if opt.isOptionValid('title'):
vtkaxis.SetTitle(opt['title'])
# Font sizes
if opt.isOptionValid('font_size'):
vtkaxis.GetTitleProperties().SetFontSize(opt['font_size'])
vtkaxis.GetLabelProperties().SetFontSize(opt['font_size'])
if opt.isOptionValid('title_font_size'):
vtkaxis.GetTitleProperties().SetFontSize(opt['title_font_size'])
if opt.isOptionValid('tick_font_size'):
vtkaxis.GetLabelProperties().SetFontSize(opt['tick_font_size'])
# Precision/notation
vtkaxis.SetPrecision(opt['precision'])
if opt.isOptionValid('notation'):
notation = opt['notation'].upper()
vtk_notation = getattr(vtk.vtkAxis, notation + '_NOTATION')
vtkaxis.SetNotation(vtk_notation)
# Grid lines
vtkaxis.SetGridVisible(opt['grid'])
vtkaxis.GetGridPen().SetColorF(opt['grid_color'])
# Set the position and points
if opt.isOptionValid('axis_position'):
pos = {'left':vtk.vtkAxis.LEFT, 'right':vtk.vtkAxis.RIGHT, 'top':vtk.vtkAxis.TOP,
'bottom':vtk.vtkAxis.BOTTOM}
vtkaxis.SetPosition(pos[opt['axis_position']])
if opt.isOptionValid('axis_point1'):
vtkaxis.SetPoint1(*opt['axis_point1'])
if opt.isOptionValid('axis_point2'):
vtkaxis.SetPoint2(*opt['axis_point2'])
|
backmari/moose
|
python/chigger/utils/AxisOptions.py
|
Python
|
lgpl-2.1
| 4,689
|
[
"MOOSE",
"VTK"
] |
81833a0caaf94ac2822ecb7c106c57d96db39a88213c339bd47d35756e279ab5
|
"""
dmath v0.9.1
Python math module for Decimal numbers. All functions should return Decimal
numbers. Probably only works with real numbers.
pi, exp, cos, sin from Decimal recipes:
http://docs.python.org/lib/decimal-recipes.html
float_to_decimal from Decimal FAQ:
http://docs.python.org/lib/decimal-faq.html
Copyright (c) 2006 Brian Beck <[email protected]>,
Christopher Hesse <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# TODO all inputs should be converted using convert_other to Decimal, and all results should be returned as Decimal (don't bother matching input types)
# TODO context should be taken as an argument when appropriate, especially when throwing an error, look at decimal.py for hints
# TODO should use custom convert_other that has the option of converting floats (using float_to_decimal) if an option is set in advance (just not by default)
# TODO try implementing something, say pi, in pyrex to compare the speed
import math
import decimal
from decimal import Decimal, getcontext, setcontext, _convert_other
D = Decimal
context = getcontext()
#
# utility functions
#
def float_to_decimal(f):
"""Convert a floating point number to a Decimal with no loss of information.
"""
# Transform (exactly) a float to a mantissa (0.5 <= abs(m) < 1.0) and an
# exponent. Double the mantissa until it is an integer. Use the integer
# mantissa and exponent to compute an equivalent Decimal. If this cannot
# be done exactly, then retry with more precision.
mantissa, exponent = math.frexp(f)
while mantissa != int(mantissa):
mantissa *= 2.0
exponent -= 1
mantissa = int(mantissa)
oldcontext = getcontext()
setcontext(Context(traps=[Inexact]))
try:
while True:
try:
return mantissa * Decimal(2) ** exponent
except Inexact:
getcontext().prec += 1
finally:
setcontext(oldcontext)
#
# constants
#
def pi(context=None):
"""Compute Pi to the current precision."""
getcontext().prec += 2
lasts = 0; t = D(3); s = 3; n = 1; na = 0; d = 0; da = 24
while s != lasts:
lasts = s
n, na = n + na, na + 8
d, da = d + da, da + 32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s
def e():
"""Compute the base of the natural logarithm to the current precision."""
return exp(D(1))
def golden_ratio():
"""Calculate the golden ratio to the current precision."""
return (1 + D(5).sqrt()) / 2
#
# transcendental functions
#
def exp(x, context=None):
"""Return e raised to the power of x."""
if context is None:
context = getcontext()
context.prec += 2
i = 0; lasts = 0; s = 1; fact = 1; num = 1
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x
s += num / fact
context.prec -= 2
return +s
def log(x, base=None, context=None):
"""Return the logarithm of x to the given base.
If the base not specified, return the natural logarithm (base e) of x.
"""
if context is None:
context = getcontext()
if x < 0:
return D('NaN', context=context)
elif base == 1:
raise ValueError("Base was 1!")
elif x == base:
return D(1, context=context)
elif x == 0:
return D('-Inf', context=context)
context.prec += 2
if base is None:
log_base = 1
approx = math.log(x)
else:
log_base = log(base, context=context)
approx = math.log(x, base)
lasts, s = 0, D(repr(approx), context=context)
while lasts != s:
lasts = s
s -= 1 - x / exp(s, context=context)
s /= log_base
context.prec -= 2
return +s
def log10(x):
"""Return the base 10 logarithm of x."""
return log(x, D(10))
#
# trigonometric functions
#
def sin(x, context=None):
"""Return the sine of x in radians."""
if context is None:
context = getcontext()
# Uses the series definition of cos, see:
# http://en.wikipedia.org/wiki/Trigonometric_function#Series_definitions
context.prec += 2
i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign *= -1
s += num / fact * sign
context.prec -= 2
return +s
def cos(x, context=None):
"""Return the cosine of x in radians."""
if context is None:
context = getcontext()
# Uses the series definition of cos, see:
# http://en.wikipedia.org/wiki/Trigonometric_function#Series_definitions
context.prec += 2
i = 0; lasts = 0; s = 1; fact = 1; num = 1; sign = 1
while s != lasts:
lasts = s
i += 2
fact *= i * (i - 1)
num *= x * x
sign = -sign
s += num / fact * sign
context.prec -= 2
return +s
def tan(x, context=None):
"""Return the tangent of x in radians."""
if context is None:
context = getcontext()
context.prec += 2
t = sin(x, context=context) / cos(x, context=context)
context.prec -= 2
return +t
#
# inverse trigonometric functions
#
# The version below is actually overwritten by the version using atan2 below
# it, since it is much faster. If possible, I'd like to write a fast version
# independent of atan2.
#def asin(x):
# """Return the arc sine (measured in radians) of Decimal x."""
# if abs(x) > 1:
# raise ValueError("Domain error: asin accepts -1 <= x <= 1")
#
# if x == -1:
# return pi() / -2
# elif x == 0:
# return D(0)
# elif x == 1:
# return pi() / 2
#
# getcontext().prec += 2
# one_half = D('0.5')
# i, lasts, s, gamma, fact, num = D(0), 0, x, 1, 1, x
# while s != lasts:
# lasts = s
# i += 1
# fact *= i
# num *= x * x
# gamma *= i - one_half
# coeff = gamma / ((2 * i + 1) * fact)
# s += coeff * num
# getcontext().prec -= 2
# return +s
# This is way faster, I wonder if there's a downside?
def asin(x, context=None):
"""Return the arcsine of x in radians."""
if abs(x) > 1:
raise ValueError("Domain error: asin accepts -1 <= x <= 1")
if context is None:
context = getcontext()
if x == -1:
return pi(context=context) / -2
elif x == 0:
return D(0, context=context)
elif x == 1:
return pi(context=context) / 2
return atan2(x, D.sqrt(1 - x ** 2), context=context)
# The version below is actually overwritten by the version using atan2 below
# it, since it is much faster. If possible, I'd like to write a fast version
# independent of atan2.
#def acos(x):
# """Return the arc cosine (measured in radians) of Decimal x."""
# if abs(x) > 1:
# raise ValueError("Domain error: acos accepts -1 <= x <= 1")
#
# if x == -1:
# return pi()
# elif x == 0:
# return pi() / 2
# elif x == 1:
# return D(0)
#
# getcontext().prec += 2
# one_half = D('0.5')
# i, lasts, s, gamma, fact, num = D(0), 0, pi() / 2 - x, 1, 1, x
# while s != lasts:
# lasts = s
# i += 1
# fact *= i
# num *= x * x
# gamma *= i - one_half
# coeff = gamma / ((2 * i + 1) * fact)
# s -= coeff * num
# getcontext().prec -= 2
# return +s
# This is way faster, I wonder if there's a downside?
def acos(x, context=None):
"""Return the arccosine of x in radians."""
if abs(x) > 1:
raise ValueError("Domain error: acos accepts -1 <= x <= 1")
if context is None:
context = getcontext()
if x == 1:
return D(0, context=context)
else:
PI = pi(context=context)
if x == -1:
return PI
elif x == 0:
return PI / 2
return PI / 2 - atan2(x, sqrt(1 - x ** 2, context=context), context=context)
def atan(x, context=None):
"""Return the arctangent of x in radians."""
if context is None:
context = getcontext()
c = 0
if x == 0:
return D(0, context=context)
elif abs(x) > 1:
PI = pi(context=context)
x_is_inf = x._isinfinity()
if x_is_inf:
return PI / D((x._sign, (2,), 0), context=context)
else:
c = PI / D((x._sign, (2,), 0), context=context)
x = 1 / x
context.prec += 2
x_squared = x ** 2
y = x_squared / (1 + x_squared)
y_over_x = y / x
i = D(0); lasts = 0; s = y_over_x; coeff = 1; num = y_over_x
while s != lasts:
lasts = s
i += 2
coeff *= i / (i + 1)
num *= y
s += coeff * num
if c:
s = c - s
context.prec -= 2
return +s
def atan2(y, x, context=context):
"""Return the arctangent of y/x in radians.
Unlike atan(y/x), the signs of both x and y are considered.
"""
# TODO check the sign function make sure this still works
# decimal zero has a sign
abs_y = abs(y)
abs_x = abs(x)
y_is_real = not x._isinfinity()
if x != 0:
if y_is_real:
a = y and atan(y / x, context=context) or D(0)
if x < 0:
a += D((y._sign, (1,), 0)) * pi(context=context)
return a
elif abs_y == abs_x:
x = D((x._sign, (1,), 0))
y = D((y._sign, (1,), 0))
return pi(context=context) * (2 - x) / (4 * y)
if y != 0:
return atan(D((y._sign, (0,), 'F')))
elif x < 0:
return D((y._sign, (1,), 0)) * pi()
else:
return D(0)
#
# hyperbolic trigonometric functions
#
def sinh(x):
"""Return the hyperbolic sine of x."""
if x == 0:
return D(0)
# Uses the taylor series expansion of sinh, see:
# http://en.wikipedia.org/wiki/Hyperbolic_function#Taylor_series_expressions
getcontext().prec += 2
i, lasts, s, fact, num = 1, 0, x, 1, x
while s != lasts:
lasts = s
i += 2
num *= x * x
fact *= i * (i - 1)
s += num / fact
getcontext().prec -= 2
return +s
def cosh(x):
"""Return the hyperbolic cosine of x."""
if x == 0:
return D(1)
# Uses the taylor series expansion of cosh, see:
# http://en.wikipedia.org/wiki/Hyperbolic_function#Taylor_series_expressions
getcontext().prec += 2
i, lasts, s, fact, num = 0, 0, 1, 1, 1
while s != lasts:
lasts = s
i += 2
num *= x * x
fact *= i * (i - 1)
s += num / fact
getcontext().prec -= 2
return +s
def tanh(x):
"""Return the hyperbolic tangent of x."""
return +(sinh(x) / cosh(x))
#
# miscellaneous functions
#
def sgn(x):
"""Return -1 for negative numbers, 1 for positive numbers and 0 for zero."""
# the signum function, see:
# http://en.wikipedia.org/wiki/Sign_function
if x > 0:
return D(1)
elif x < 0:
return D(-1)
else:
return D(0)
def degrees(x):
"""Return angle x converted from radians to degrees."""
return x * 180 / pi()
def radians(x):
"""Return angle x converted from degrees to radians."""
return x * pi() / 180
def ceil(x):
"""Return the smallest integral value >= x."""
return x.to_integral(rounding=decimal.ROUND_CEILING)
def floor(x):
"""Return the largest integral value <= x."""
return x.to_integral(rounding=decimal.ROUND_FLOOR)
def hypot(x, y):
"""Return the Euclidean distance, sqrt(x**2 + y**2)."""
return sqrt(x * x + y * y)
def modf(x):
"""Return the fractional and integer parts of x."""
int_part = x.to_integral(rounding=decimal.ROUND_FLOOR)
frac_part = x-int_part
return frac_part,int_part
def ldexp(s, e):
"""Return s*(10**e), the value of a decimal floating point number with
significand s and exponent e.
This function is the inverse of frexp. Note that this is different from
math.ldexp, which uses 2**e instead of 10**e.
"""
return s*(10**e)
def frexp(x):
"""Return s and e where s*(10**e) == x.
s and e are the significand and exponent, respectively of x.
This function is the inverse of ldexp. Note that this is different from
math.frexp, which uses 2**e instead of 10**e.
"""
e = D(x.adjusted())
s = D(10)**(-x.adjusted())*x
return s, e
def pow(x, y, context=None):
"""Returns x**y (x to the power of y).
x cannot be negative if y is fractional.
"""
context, x, y = _initialize(context, x, y)
# if y is an integer, just call regular pow
if y._isinteger():
return x**y
# if x is negative, the result is complex
if x < 0:
return context._raise_error(decimal.InvalidOperation, 'x (negative) ** y (fractional)')
return exp(y * log(x))
def tetrate(x, y, context=None):
"""Return x recursively raised to the power of x, y times. ;)
y must be a natural number.
"""
context, x, y = _initialize(context, x, y)
if not y._isinteger():
return context._raise_error(decimal.InvalidOperation, 'x *** (non-integer)')
def _tetrate(x,y):
if y == -1:
return D(-1)
if y == 0:
return D(1)
if y == 1:
return x
return x**_tetrate(x,y-1)
return _tetrate(x,y)
#
# internal functions
#
def _initialize(context, *args):
if context is None:
context = getcontext()
r = [context]
for arg in args:
# TODO should something else be seeing NotImplemented?
e = _convert_other(arg)
if e is NotImplemented:
raise TypeError("unsupported operand type: '%s'" \
"(if it's a float, try the float_to_decimal function)" % (type(e).__name__,))
r.append(e)
return r
def _sign(x):
"""Return -1 for negative numbers and 1 for positive numbers."""
# brian's sign function
if x._sign == 0:
return D(1)
else:
return D(-1)
sqrt = D.sqrt
fabs = abs
fmod = D.__mod__
__all__ = ['acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'degrees',
'e', 'exp', 'fabs', 'floor', 'fmod', 'frexp', 'golden_ratio',
'hypot', 'ldexp', 'log', 'log10', 'modf', 'pi', 'pow', 'radians',
'sgn', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tetrate']
if __name__ == '__main__':
# TODO put some test functions down here
pass
|
twright/C1000-Intelligent-Calculator
|
dmath.py
|
Python
|
gpl-3.0
| 15,548
|
[
"Brian"
] |
a7b7ee8be038b998a6aaaf6dcdcf2e9a1521bf6781810a72c31596e9f0ce2c1e
|
"""
------------
PyMine 1.0.1
------------
PyMine lets you integrate and visualize biological data used for drug discovery using PyMOL.
------------
REQUIREMENTS
------------
1) Ubuntu 11.04 or above OR Mac OS X 10.7 or above
2) Pymol 1.7 or above
3) PyMine 1.0.1
------------
INSTALLATION
------------
1) Download and install pymol. http://sourceforge.net/projects/pymol/
2) Download and unzip PyMine. https://github.com/rrchaudhari/PyMine
3) Open PyMol. Install PyMine: Plugins -> Manage Plugins -> Install -> (locate pymine.py file).
4) Restart PyMol
Using MacPyMOL
1) Rename the "MacPyMOL.app" to "PyMOLX11Hybrid.app" in Applications folder.
2) install XQuartz found at http://xquartz.macosforge.org/landing/
3) Follow the installatin procedure of plugin mentioned above.
-------
History
-------
- v1.0.0: Initial public release
-------------
Licence (MIT)
-------------
Copyright (c) 2015 Rajan Chaudhari and Zhijun Li
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import fileinput
import Tkinter
import tkMessageBox
import urllib2
import pymol
from Tkinter import PhotoImage as PI
import xml.etree.ElementTree as ET
import webbrowser
import tkFileDialog
#initialize pymol plugin
def __init__(self):
self.menuBar.addmenuitem('Plugin', 'command',
'Gather information',
label = 'PyMine',
command = main)
class PyMine(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.grid()
self.createGUI()
# GLOBAL VARIABLE ARE DEFINED HERE SO THAT THEY CAN BE USED IN ANY FUNCTION
self.flag=0
self.pdb_id=''
self.pdb_chain_id=''
self.pdb_file=''
self.smiles=''
self.name=list()
self.summary=list()
self.symbol=list()
self.uniprot=list()
self.binding_sites=list()
self.ppi_bs_residues=''
self.lig_bs_residues=''
self.dna_bs_residues=''
self.rna_bs_residues=''
self.ion_bs_residues=''
self.pep_bs_residues=''
self.ki_comps=list()
self.ec50_comps=list()
self.ic50_comps=list()
self.pathways=list()
self.saps=list()
self.ligands=list()
self.ligand_chemblid=list()
self.target_chemblID=''
self.approved_drugs=list()
self.ligand_images=list()
self.kegg_info=''
self.userpdbfile=None
#self.label4.config(text=None)
self.entryVariable5.set(None)
def createGUI(self):
## Create Frame
self.frame1=Tkinter.Frame(self)
self.frame1.grid(sticky='nswe')
## INPUT
self.label1=Tkinter.Label(self.frame1, text="Enter PDB ID") #LABEL
self.label1.grid(row=1, column=0, sticky=Tkinter.W)
self.entryVariable1=Tkinter.StringVar(master=self.frame1) #INPUT Variable
self.entryVariable1.set('1RTK')
self.entry1=Tkinter.Entry(self.frame1, textvariable=self.entryVariable1, width=4) #INPUT Box
self.entry1.grid(row=1, column=1, sticky=Tkinter.W)
self.label1_1=Tkinter.Label(self.frame1, text="Enter Chain ID") #LABEL
self.label1_1.grid(row=1, column=2, sticky=Tkinter.W)
self.entryVariable2=Tkinter.StringVar(master=self.frame1) #Input Variable 2
self.entryVariable2.set('A')
self.entry2=Tkinter.Entry(self.frame1, textvariable=self.entryVariable2, width=2) #input box 2
self.entry2.grid(row=1, column=3, sticky=Tkinter.W)
self.label2_2=Tkinter.Label(self.frame1, text="OR") #LABEL
self.label2_2.grid(row=1, column=4, sticky=Tkinter.W)
#self.button1=Tkinter.Button(self.frame1, text="Submit", command=self.get_results) #Button1
#self.button1.grid(row=1, column=4, sticky=Tkinter.W)
self.button2=Tkinter.Button(self.frame1, text="Clear", command=self.clear) #Button2
self.button2.grid(row=4, column=4, sticky=Tkinter.W)
self.label3=Tkinter.Label(self.frame1, text="Select PDB File") #LABEL
self.label3.grid(row=2, column=0, sticky=Tkinter.W)
self.button1_1=Tkinter.Button(self.frame1, text="Browse", command=self.file_upload) #Button2
self.button1_1.grid(row=2, column=1, sticky=Tkinter.W)
self.label5=Tkinter.Label(self.frame1, text="Enter Uniprot ID") #LABEL
self.label5.grid(row=2, column=2, sticky=Tkinter.W)
self.entryVariable5=Tkinter.StringVar(master=self.frame1) #Input Variable 3
self.entryVariable5.set('')
self.entry5=Tkinter.Entry(self.frame1, textvariable=self.entryVariable5, width=6) #input box 3
self.entry5.grid(row=2, column=3, sticky=Tkinter.W)
self.button2_2=Tkinter.Button(self.frame1, text="Submit", command=self.get_results) #Button1
self.button2_2.grid(row=2, column=4, sticky=Tkinter.W)
self.label4=Tkinter.Label(self.frame1, width=10, anchor=Tkinter.W, justify=Tkinter.LEFT) #LABEL
self.label4.grid(row=3, column=1, sticky=Tkinter.W)
self.label2=Tkinter.Label(self.frame1, text="Enter Smile String") #LABEL
self.label2.grid(row=4, column=0, sticky=Tkinter.W)
self.entryVariable3=Tkinter.StringVar(master=self.frame1) #Input Variable 3
self.entryVariable3.set('')
self.entry3=Tkinter.Entry(self.frame1, textvariable=self.entryVariable3, width=10) #input box 3
self.entry3.grid(row=4, column=1, columnspan=2, sticky=Tkinter.W)
self.button3=Tkinter.Button(self.frame1, text="Find Similar Ligands", command=self.get_similar_ligands) #Button2
self.button3.grid(row=4, column=2, sticky=Tkinter.W)
self.button11=Tkinter.Button(self.frame1, text="?", command=self.smiles_help)
self.button11.grid(row=4, column=3, sticky=Tkinter.W)
## OUTPUT
self.rframe=Tkinter.LabelFrame(master=self.frame1, text="Data Panel")
self.rframe.grid(row=6, columnspan=6, sticky='nswe')
self.button5=Tkinter.Button(self.rframe, text="Protein", command=self.lift_prot_info)
self.button5.grid(row=0, column=0)
self.button6=Tkinter.Button(self.rframe, text="Ligands", command=self.lift_lig_info)
self.button6.grid(row=0, column=1)
self.button7=Tkinter.Button(self.rframe, text="PDB", command=self.lift_pdb_file)
self.button7.grid(row=0, column=3)
self.button8=Tkinter.Button(self.rframe, text="Uniprot", command=self.lift_uniprot_file)
self.button8.grid(row=0, column=2)
self.button9=Tkinter.Button(self.rframe, text="Pathways", command=self.lift_kegg_info)
self.button9.grid(row=0, column=4)
self.button10=Tkinter.Button(self.rframe, text="Similar Ligands", command=self.lift_ligss_info)
self.button10.grid(row=0, column=5)
self.text1=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text1.grid(row=5, column=0, columnspan=10, stick='ns')
scrollbar1=Tkinter.Scrollbar(self.rframe, command=self.text1.yview)
scrollbar1.grid(row=5, column=11, sticky='nswe')
self.text1.configure(yscrollcommand=scrollbar1.set)
self.text1.lower()
self.text2=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text2.grid(row=5, column=0, columnspan=10, stick='ns')
self.text2.lower()
self.text3=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text3.grid(row=5, column=0, columnspan=10, stick='ns')
self.text3.lower()
self.text4=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text4.grid(row=5, column=0, columnspan=10, stick='ns')
self.text4.lower()
self.text5=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text5.grid(row=5, column=0, columnspan=10, stick='ns')
self.text5.lower()
self.text6=Tkinter.Text(master=self.rframe, wrap=Tkinter.WORD)
self.text6.grid(row=5, column=0, columnspan=10, stick='ns')
self.text6.lower()
def lift_prot_info(self):
self.text1.lift()
scrollbar1=Tkinter.Scrollbar(self.rframe, command=self.text1.yview)
scrollbar1.grid(row=5, column=11, sticky='nswe')
self.text1.configure(yscrollcommand=scrollbar1.set)
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_lig_info(self):
self.text1.lower()
self.text2.lift()
scrollbar2=Tkinter.Scrollbar(self.rframe, command=self.text2.yview)
scrollbar2.grid(row=5, column=11, sticky='nswe')
self.text2.configure(yscrollcommand=scrollbar2.set)
self.text3.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_pdb_file(self):
self.text3.lift()
scrollbar3=Tkinter.Scrollbar(self.rframe, command=self.text3.yview)
scrollbar3.grid(row=5, column=11, sticky='nswe')
self.text3.configure(yscrollcommand=scrollbar3.set)
self.text1.lower()
self.text2.lower()
self.text4.lower()
self.text5.lower()
self.text6.lower()
def lift_uniprot_file(self):
self.text4.lift()
scrollbar4=Tkinter.Scrollbar(self.rframe, command=self.text4.yview)
scrollbar4.grid(row=5, column=11, sticky='nswe')
self.text4.configure(yscrollcommand=scrollbar4.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text5.lower()
self.text6.lower()
def lift_kegg_info(self):
self.text5.lift()
scrollbar5=Tkinter.Scrollbar(self.rframe, command=self.text5.yview)
scrollbar5.grid(row=5, column=11, sticky='nswe')
self.text5.configure(yscrollcommand=scrollbar5.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text6.lower()
def lift_ligss_info(self):
self.text6.lift()
scrollbar6=Tkinter.Scrollbar(self.rframe, command=self.text6.yview)
scrollbar6.grid(row=5, column=11, sticky='nswe')
self.text6.configure(yscrollcommand=scrollbar6.set)
self.text1.lower()
self.text2.lower()
self.text3.lower()
self.text4.lower()
self.text5.lower()
def file_upload(self):
toplevel1 = Tkinter.Toplevel()
toplevel1.withdraw()
self.userpdbfile = tkFileDialog.askopenfile(parent=toplevel1,mode='rb',title='Choose a file')
self.userpdbfile_path=self.userpdbfile.name
print self.userpdbfile_path
self.userpdb_filename=os.path.basename(self.userpdbfile_path)
self.userpdb_filename_noext=self.userpdb_filename.split('.')[0]
if self.userpdbfile != None:
self.label4.config(text=self.userpdb_filename)
def smiles_help(self):
#dnlkd
tkMessageBox.showinfo(title = 'Smiles', message = "To find similar ligands, paste your smile string in the entry box and hit Find Similar Ligands button. \n On Mac use Command+C to copy from the Data Panel and use Control+V to paste in the entry box")
def showLink(self, event, arg):
#fgdfg
webbrowser.open_new(arg)
def show_pathway(self, path):
toplevel = Tkinter.Toplevel()
#toplevel.grid(sticky='nswe')
toplevel.columnconfigure(0, weight=1)
toplevel.rowconfigure(0, weight=1)
Tframe=Tkinter.Frame(toplevel)
Tframe.grid(sticky='nswe')
Tframe.columnconfigure(0, weight=1)
Tframe.rowconfigure(0, weight=1)
PathwayImage=Tkinter.PhotoImage(file=path)
PhotoImage=Tkinter.Text(Tframe)
PhotoImage.image_create(Tkinter.END, image=PathwayImage)
PhotoImage.img=PathwayImage
PhotoImage.grid(row = 0, column=0, sticky='nswe')
scrollbar1=Tkinter.Scrollbar(Tframe, command=PhotoImage.yview)
scrollbar1.grid(row=0, column=1, sticky='nswe')
scrollbar2=Tkinter.Scrollbar(Tframe, orient=Tkinter.HORIZONTAL, command=PhotoImage.xview)
scrollbar2.grid(row=1, column=0, sticky='nswe')
PhotoImage.columnconfigure(0, weight=1)
PhotoImage.rowconfigure(0, weight=1)
PhotoImage.configure(yscrollcommand=scrollbar1.set)
PhotoImage.configure(xscrollcommand=scrollbar2.set)
PhotoImage.lift()
def get_similar_ligands(self):
self.ligssdir=os.path.join(self.outdir, "Similar_Ligands")
if os.path.exists(self.ligssdir):
os.chdir(self.ligssdir)
else:
os.mkdir(self.ligssdir)
os.chdir(self.ligssdir)
#print "Aquiring similar ligands...."
self.smiles=self.entryVariable3.get()
#print self.smiles
self.lift_ligss_info()
url="https://www.ebi.ac.uk/chemblws/compounds/similarity/"+self.smiles+"/70"
#print url
try:
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
response_assay_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_assay_xml)
self.text6.insert(Tkinter.INSERT, "Similar Ligands: "+"\n\n")
self.text6.insert(Tkinter.INSERT, "ChemblID \t Similarity \t smiles \n\n")
fileh=open("Similar_Ligands.smi", "w")
idh=open("Similar_ligands.txt", "w")
idh.write("smiles \t ChemblID \t Similarity \n")
for i in root:
self.text6.insert(Tkinter.INSERT, i[1].text+"\t"+i[4].text+"\t"+i[0].text+"\n\n")
fileh.write(i[0].text+"\n")
idh.write(i[0].text+"\t"+i[1].text+"\t"+i[4].text+"\n")
fileh.close()
idh.close()
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for similar ligands!"
elif err.code == 403:
print "Access denied for similar ligands!"
else:
print "Something happened in similar ligands! Error code", err.code
def get_smiles(self, chembl_id):
print "Aquiring smiles....\n THIS COULD TAKE LONG TIME DEPENDING ON NUMBER OF MOLECULES THAT MATCHES CRITERION!!"
#tkMessageBox.showinfo(title="Aquiring smiles...", message="THIS COULD TAKE LONG TIME DEPENDING ON NUMBER OF MOLECULES THAT MATCHES CRITERION!!")
ids=chembl_id
smiles=list()
for i in ids:
url="http://www.ebi.ac.uk/chemblws/compounds/"+str(i)
try:
response_ligsmi_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_ligsmi_xml)
smiles.append(root[0].text)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for smiles!"
elif err.code == 403:
print "Access denied for smiles!"
else:
print "Something else happened in smiles! Error code", err.code
return smiles
def get_info(self):
#print "1 Aquiring uniprot id from pdb id...."
self.pdb_id=self.entryVariable1.get().upper()
self.pdb_chain_id=self.entryVariable2.get().upper()
cwd = os.path.expanduser("~/Desktop/")
self.outdir = os.path.join(cwd, 'PyMine_Output_'+str(self.pdb_id))
if not os.path.exists(self.outdir):
os.mkdir(self.outdir)
os.chdir(self.outdir)
for line in urllib2.urlopen('http://www.uniprot.org/docs/pdbtosp.txt'):
if len(line.split())>1:
if self.pdb_id == str(line.split()[0]):
self.uniprot.append(str(line.split()[5])[1:-1])
self.text1.insert(Tkinter.INSERT, "PDB ID: "+self.pdb_id+ "\n\n")
if self.uniprot:
self.text1.insert(Tkinter.END, "Uniprot: " +', '.join(map(str, self.uniprot))+"\n\n")
else:
self.text1.insert(Tkinter.END, "Uniprot id not found " +"\n\n")
def get_user_info(self):
#print "1 Aquiring uniprot id from pdb id...."
self.pdb_id=self.userpdb_filename_noext
print self.pdb_id
#self.pdb_chain_id=self.entryVariable2.get().upper()
cwd = os.path.expanduser("~/Desktop/")
self.outdir = os.path.join(cwd, 'PyMine_Output_'+str(self.pdb_id))
if not os.path.exists(self.outdir):
os.mkdir(self.outdir)
os.chdir(self.outdir)
self.uniprot.append(self.entryVariable5.get().upper())
self.text1.insert(Tkinter.INSERT, "PDB File: "+self.pdb_id+ "\n\n")
if self.uniprot:
self.text1.insert(Tkinter.END, "Uniprot: " +', '.join(map(str, self.uniprot))+"\n\n")
else:
self.text1.insert(Tkinter.END, "Uniprot id not found " +"\n\n")
def show_pdb(self):
#print "2 Importing 3d structure...."
pymol.cmd.cd(self.outdir)
#print pymol.cmd.pwd()
current_pdb=self.pdb_id
#pymol.finish_launching()
if self.flag==1:
pymol.cmd.load(self.userpdbfile_path)
else:
pymol.cmd.fetch(current_pdb) #pymol.cmd.load("/Users/rrc/Desktop/pymol_plugin/2RH1.pdb",current_pdb)
pdbfilename=str(self.pdb_id)+".pdb"
#pymol.cmd.save(pdbfilename, current_pdb)
pymol.cmd.hide('everything', current_pdb)
#pymol.cmd.select("selection", current_pdb)
pymol.cmd.show('cartoon')
pymol.cmd.select('ligand', 'organic')
def get_pdb_file(self):
#print "3 Aquiring pdb and uniprot file...."
if self.flag==1:
pdbfile=open(self.userpdbfile_path, "r")
for line in pdbfile:
self.text3.insert(Tkinter.END, line)
else:
filename=str(self.pdb_id.lower())+".pdb"
pdbfile=open(filename, "r")
for line in pdbfile:
self.text3.insert(Tkinter.END, line)
def get_uniprot_file(self):
#print self.uniprot[0]
if self.uniprot:
fh=open(self.uniprot[0]+".txt", "w")
for line in urllib2.urlopen('http://www.uniprot.org/uniprot/'+self.uniprot[0]+'.txt'):
self.text4.insert(Tkinter.END, line)
fh.write(line)
fh.close()
else:
print "Error in uniprot id"
def get_ligands(self):
#print "4 Aquiring pdb ligands...."
try:
url="http://www.rcsb.org/pdb/rest/ligandInfo?structureId="+self.pdb_id
response_xml = urllib2.urlopen(url).read()
root=ET.fromstring(response_xml)
for i in root[0]:
chemid = i.attrib['chemicalID']
for j in i:
if j.tag=="smiles":
smiles=j.text
if j.tag=="chemicalName":
chem_name=j.text
self.ligands.append([chemid, chem_name, smiles])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pdb ligands!"
elif err.code == 403:
print "Access denied for pdb ligands!"
else:
print "Something else happened in pdb ligands! Error code", err.code
if self.ligands:
self.text2.insert(Tkinter.END, "Ligands in PDB: \n\n")
for i in self.ligands:
self.text2.insert(Tkinter.END, ' '.join(map(str, i))+"\n\n")
else:
self.text2.insert(Tkinter.END, "Ligands not found\n\n")
def get_ligand_images(self):
#print "5 Aquiring pdb ligand images...."
self.ligdir=os.path.join(self.outdir, "Ligands")
if not os.path.exists(self.ligdir):
os.mkdir(self.ligdir)
os.chdir(self.ligdir)
if self.ligands:
for i in self.ligands:
chid=i[0]
#print "Working on "+ str(chid)
try:
url="http://www.ebi.ac.uk/chemblws/compounds/smiles/"+i[2]
#print url
response_xml_chemblids=urllib2.urlopen(url).read()
root=ET.fromstring(response_xml_chemblids)
if len(root)>0:
lig_chemblID=root[0].find("chemblId").text
self.ligand_chemblid.append([chid, lig_chemblID])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for ligand images!"
elif err.code == 403:
print "Access denied for ligand images!"
else:
print "Ignored smiles retrieval for ions!"
else:
print "Ligands not present"
if self.ligand_chemblid:
for i in self.ligand_chemblid:
url="http://www.ebi.ac.uk/chemblws/compounds/"+i[1]+"/image"
#print url
imgRequest = urllib2.Request(url)
imgData=urllib2.urlopen(imgRequest).read()
self.ligand_images.append(imgData)
fh=open(str(i[1])+".gif", "w")
fh.write(imgData)
fh.close()
else:
print "Ligand chembl id not found"
def get_target_chembl_id(self):
#print "6 Aquiring target chembl id...."
if self.uniprot:
url="http://www.ebi.ac.uk/chemblws/targets/uniprot/"+str(self.uniprot[0])
#print url
try:
response_assay_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_assay_xml)
for i in root:
#print i.tag
if i.tag =="preferredName":
self.common_name=str(i.text)
if i.tag =="organism":
self.organism=str(i.text)
if i.tag=="chemblId":
self.target_chemblID=i.text
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for target chembl id!"
elif err.code == 403:
print "Access denied for target chembl id!"
else:
print "Something happened in target chembl id! Error code", err.code
else:
print "Error in uniprot id!"
self.text2.insert(Tkinter.END, "Could not retrieve assay information because uniprot id missing\n\n")
if self.target_chemblID:
#print self.target_chemblID
self.get_assay_info()
if self.ec50_comps:
#print "EC50 data available"
self.text2.insert(Tkinter.END, "Compounds with EC50 values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ec50_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "EC50 data not available"+"\n\n")
if self.ic50_comps:
#print "IC50 data available"
self.text2.insert(Tkinter.END, "Compounds with IC50 values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ic50_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "IC50 data not avaialble"+"\n\n")
if self.ki_comps:
#print "KI data available"
self.text2.insert(Tkinter.END, "Compounds with Ki values <=100 nM:"+"\n\n")
self.text2.insert(Tkinter.END, ' '.join(map(str, self.ki_comps))+"\n\n")
else:
self.text2.insert(Tkinter.END, "Ki data not available"+"\n\n")
else:
self.text2.insert(Tkinter.END, "Assay data not available"+"\n\n")
def get_approved_drugs(self):
#print "7 Aquiring approved drugs...."
try:
url="http://www.ebi.ac.uk/chemblws/targets/"+self.target_chemblID+"/approvedDrug"
response_approved_xml=urllib2.urlopen(url).read()
root=ET.fromstring(response_approved_xml)
for i in root:
molecule =list()
if len(i)==0:
break
else:
for j in i:
molecule.append([j.tag, j.text])
self.approved_drugs.append(molecule)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for approved drugs!"
elif err.code == 403:
print "Access denied for approved drugs!"
else:
print "Something happened in aquiring approved_drugs! Error code", err.code
def show_lig_info(self):
#print "8 Showing approved drug information...."
os.chdir(self.ligdir)
self.agonist=list()
self.antagonist=list()
if not self.approved_drugs:
self.text2.insert(Tkinter.END, "No Approved Drugs found for this receptor"+"\n\n")
else:
self.text2.insert(Tkinter.END, "Approved Drugs: \n\n")
for i in self.approved_drugs:
#print i[2][1].split()[-1]
if i[2][1].split()[-1] == "agonist":
self.agonist.append([i])
if i[2][1].split()[-1] =="antagonist":
self.antagonist.append([i])
#self.text2.insert(Tkinter.END, ''.join(map(str, self.approved_drugs))+"\n\n")
if self.agonist:
self.text2.insert(Tkinter.END, "Agonists: \n\n")
for i in self.agonist:
for j in i:
for k in j:
self.text2.insert(Tkinter.END, ' '.join(map(str, k))+"\n")
self.text2.insert(Tkinter.END, "\n\n")
if self.antagonist:
self.text2.insert(Tkinter.END, "Antagonists: \n\n")
for i in self.antagonist:
for j in i:
for k in j:
self.text2.insert(Tkinter.END, ' '.join(map(str, k))+"\n")
self.text2.insert(Tkinter.END, "\n\n")
if self.agonist:
fh=open("Approved_agonist.txt", "w")
for i in self.agonist:
for j in i:
fh.write(str(j[0][1]+"\n"))
fh.close()
if (self.antagonist):
fh=open("Approved_antagonist.txt", "w")
for i in self.antagonist:
for j in i:
fh.write(str(j[0][1]+"\n"))
fh.close()
def get_saps(self):
#print "9 Aquiring saps...."
for line in urllib2.urlopen('http://www.uniprot.org/docs/humsavar.txt'):
if len(line.split())>1:
if str(self.uniprot[0]) == str(line.split()[1]):
gene_name=line.split()[0]
mutation=line.split()[3][2:]
origres=mutation[:3]
num=mutation[3:-3]
changedres=mutation[-3:]
disease=line.split()[6:]
self.saps.append([origres, num, changedres, disease])
#print gene_name, mutation, origres, num, changedres
if self.saps:
self.show_saps()
self.text1.insert(Tkinter.END, "Single Amino Acid Polymoprphism:\n\n"+ '\n'.join(map(str, self.saps))+"\n\n")
else:
print "SAPs not found"
self.text1.insert(Tkinter.END, "Single Amino Acid Polymoprphism not found"+"\n\n")
def show_saps(self):
#print "10 Showing SAPS...."
sap_residues=list()
sap_res_str=''
for i in self.saps:
if i[1] not in sap_residues:
sap_residues.append(i[1])
for i in sap_residues:
sap_res_str="resi " + str(i)
#print sap_res_str
pymol.cmd.select("SAPs", sap_res_str)
pymol.cmd.show("spheres", sap_res_str)
pymol.cmd.deselect()
def get_bs(self):
#print "11 Aquiring binding site information...."
lig_bs=list()
ppi_bs=list()
dna_bs=list()
rna_bs=list()
ion_bs=list()
pep_bs=list()
try:
for line in urllib2.urlopen("https://dl.dropboxusercontent.com/u/61033253/ibisdown/"+self.pdb_id[1:-1]+"/"+self.pdb_id+".txt"):
spline=line.split(":") #Query:Interaction_type:Mmdb_Residue_No:PDB_Residue_No:Binding_Site_Residues:Binding_Site_Conservation:Avg_PercentID:Singleton:PISA_validation:Biol_Chemical_validation:Site_CDD_Annotation:Interaction_Partner:PDB_Evidence:Is_Observed:Ranking_Score:Query_Domain
if spline[1]=="LIG" and spline[0][-1:]==self.pdb_chain_id:
lig_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="PPI" and spline[0][-1:]==self.pdb_chain_id:
ppi_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="DNA" and spline[0][-1:]==self.pdb_chain_id:
dna_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="RNA" and spline[0][-1:]==self.pdb_chain_id:
rna_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="ION" and spline[0][-1:]==self.pdb_chain_id:
ion_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
if spline[1]=="PEP" and spline[0][-1:]==self.pdb_chain_id:
pep_bs.append([spline[1], spline[0], spline[3], spline[11], spline[12]])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for binding sites!"
elif err.code == 403:
print "Access denied for binding sites!"
else:
print "Something else happened in getting binding site information! Error code", err.code
self.binding_sites=[lig_bs, ppi_bs, dna_bs, rna_bs, ion_bs, pep_bs]
def show_bs(self):
#print "12 Showing binding sites...."
counter=0
for i in self.binding_sites[0]:
counter+=1
self.lig_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("lig_bs"+str(counter), self.lig_bs_residues)
pymol.cmd.deselect()
pymol.cmd.group("Ligand_Binding_Sites", "lig_bs*")
counter=0
for i in self.binding_sites[1]:
counter+=1
self.ppi_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("ppi_bs"+str(counter), self.ppi_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("PPI_Sites", "ppi_bs*")
counter=0
for i in self.binding_sites[2]:
counter+=1
self.dna_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("dna_bs"+str(counter), self.dna_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("DNA_Binding_Sites", "dna_bs*")
counter=0
for i in self.binding_sites[3]:
counter+=1
self.rna_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("rna_bs"+str(counter), self.rna_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("RNA_Binding_Sites", "rna_bs*")
counter=0
for i in self.binding_sites[4]:
counter+=1
#print counter
self.ion_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("ion_bs"+str(counter), self.ion_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("ION_Bindins_Sites", "ion_bs*")
counter=0
for i in self.binding_sites[5]:
counter+=1
self.pep_bs_residues="resi "+ ','.join(map(str, i[2].lstrip().split(' ')))
pymol.cmd.select("pep_bs"+str(counter), self.pep_bs_residues) #pymol.cmd.create() would create objects instead of selection for coloring
pymol.cmd.deselect()
pymol.cmd.group("PEP_Binding_Sites", "pep_bs*")
if len(self.binding_sites[0])==0 and len(self.binding_sites[1])==0 and len(self.binding_sites[2])==0 and len(self.binding_sites[3])==0 and len(self.binding_sites[4])==0 and len(self.binding_sites[5])==0:
self.text1.insert(Tkinter.END, "Binding site data not found\n")
else:
self.text1.insert(Tkinter.END, "Binding Sites/Similar Binding Sites: \n\n")
for i in self.binding_sites:
for j in i:
self.text1.insert(Tkinter.END, '\t'.join(map(str, j))+"\n\n")
def get_assay_info(self):
#print "13 Aquiring assay information...."
self.ligdir=os.path.join(self.outdir, "Ligands")
if not os.path.exists(self.ligdir):
os.mkdir(self.ligdir)
os.chdir(self.ligdir)
#os.chdir(self.ligdir)
url="http://www.ebi.ac.uk/chemblws/targets/"+self.target_chemblID+"/bioactivities"
try:
response_xml_chemblids=urllib2.urlopen(url).read()
root=ET.fromstring(response_xml_chemblids)
for i in root:
if i[6].text=="EC50" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ec50_comps.append(i[4].text)
elif i[6].text=="IC50" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ic50_comps.append(i[4].text)
elif i[6].text=="Ki" and i[13].text=="=" and i[12].text!="Unspecified" and float(i[12].text)<=10 and i[9].text=="nM":
self.ki_comps.append(i[4].text)
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for assay data!"
elif err.code == 403:
print "Access denied for assay data!"
else:
print "Something else happened in get_assay_info! Error code", err.code
if self.ec50_comps:
#print self.ec50_comps
#print "EC50 data available"
ec50_fh=open("EC50.txt", "w")
for i in self.ec50_comps:
ec50_fh.write(str(i)+"\n")
ec50_fh.close()
ec50_smi=self.get_smiles(self.ec50_comps)
if ec50_smi:
ec50smi_fh=open("EC50.smi", "w")
ec50smi_fh.write('\n'.join(map(str, ec50_smi)))
ec50smi_fh.close()
else:
print "EC50 data not available"
if self.ic50_comps:
#print "IC50 data available"
ic50_fh=open("IC50.txt", "w")
for i in self.ic50_comps:
ic50_fh.write(str(i)+"\n")
ic50_fh.close()
ic50_smi=self.get_smiles(self.ic50_comps)
if ic50_smi:
ic50smi_fh=open("IC50.smi", "w")
ic50smi_fh.write('\n'.join(map(str, ic50_smi)))
ic50smi_fh.close()
else:
print "IC50 data not available"
if self.ki_comps:
#print "Ki data available"
ki_fh=open("KI.txt", "w")
for i in self.ki_comps:
ki_fh.write(str(i)+"\n")
ki_fh.close()
ki_smi=self.get_smiles(self.ki_comps)
if ki_smi:
ki_smi_fh=open("KI.smi", "w")
ki_smi_fh.write('\n'.join(map(str, ki_smi)))
ki_smi_fh.close()
else:
print "Ki data not available"
def get_kegg_info(self):
#print "15 Aquiring pathway information...."
#print os.getcwd()
url = 'http://rest.genome.jp/link/genes/uniprot:'+self.uniprot[0]+'/original'
#print "Aquiring genes...."
self.kegg_genes=list()
try:
response = urllib2.urlopen(url)
for line in response:
self.kegg_genes.append(line.split()[1])
"""
for i in self.kegg_genes:
self.text5.insert(Tkinter.INSERT, str(i)+ "\n\n")
"""
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for genes!"
elif err.code == 403:
print "Access denied for genes!"
else:
print "Something happened in Kegg genes! Error code", err.code
#### genes to pathway ids
if self.kegg_genes:
for i in self.kegg_genes:
url = 'http://rest.genome.jp/link/path/'+i+'/original'
#print "Aquiring kegg pathaway id...."
self.kegg_pathway_ids=list()
try:
response = urllib2.urlopen(url)
for line in response:
self.kegg_pathway_ids.append(line.split()[1])
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pathway ids!"
elif err.code == 403:
print "Access denied!"
else:
print "Something happened in Kegg pathway ids! Error code", err.code
### get pathway information
if self.kegg_pathway_ids:
for i in self.kegg_pathway_ids:
url= 'http://rest.kegg.jp/get/'+i
#print "Aquiring Kegg Pathways...."
try:
response = urllib2.urlopen(url)
for line in response:
if line.startswith('CLASS'):
break
self.text5.insert(Tkinter.INSERT, line +"\n")
# For the pathway information hyperlink
self.text5.insert(Tkinter.INSERT, url+"\n\n", ('link'))
self.text5.tag_config('link', foreground="blue", underline=1)
self.text5.tag_bind('link', '<Button-1>', lambda event, arg=url: self.showLink(event, arg))
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for kegg pathways!"
elif err.code == 403:
print "Access denied for kegg pathways!"
else:
print "Something happened in Kegg pathways! Error code", err.code
### Get pathway images
url= 'http://rest.kegg.jp/get/'+i+'/image'
#print "Aquiring pathaway images...."
try:
imgRequest = urllib2.Request(url)
imgData=urllib2.urlopen(imgRequest).read()
self.pathwaydir=os.path.join(self.outdir, "Pathways")
if os.path.exists(self.pathwaydir):
os.chdir(self.pathwaydir)
else:
os.mkdir(self.pathwaydir)
os.chdir(self.pathwaydir)
filename=i.split(':')[1]
fh=open(filename+".gif", "w")
fh.write(imgData)
fh.close()
path_image=self.pathwaydir+"/"+filename+".gif"
#print path_image
ButtonImage=Tkinter.PhotoImage(file=path_image)
#print ButtonImage
path_button=Tkinter.Button(self.text5, text="Pathway Image", command=lambda j=path_image: self.show_pathway(j))
#path_button.img=ButtonImage
self.text5.window_create(Tkinter.INSERT, window=path_button)
self.text5.insert(Tkinter.INSERT, "\n----------------------------X--------------------------\n\n\n")
except urllib2.HTTPError, err:
if err.code == 404:
print "Page not found for pathway images!"
elif err.code == 403:
print "Access denied for pathway images!"
else:
print "Something happened in Kegg pathway images! Error code", err.code
else:
print "Kegg pathway ids not found"
self.text5.insert(Tkinter.INSERT, "Kegg data not found.\n\n")
else:
print "Kegg gene not available"
self.text5.insert(Tkinter.INSERT, "Kegg data not found.\n\n")
#print os.getcwd()
def get_results(self):
self.text1.lift()
self.text1.config(state=Tkinter.NORMAL)
self.text1.delete(1.0, Tkinter.END)
self.text2.config(state=Tkinter.NORMAL)
self.text2.delete(1.0, Tkinter.END)
self.text3.config(state=Tkinter.NORMAL)
self.text3.delete(1.0, Tkinter.END)
self.text4.config(state=Tkinter.NORMAL)
self.text4.delete(1.0, Tkinter.END)
self.text5.config(state=Tkinter.NORMAL)
self.text5.delete(1.0, Tkinter.END)
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
if self.userpdbfile!=None and self.entryVariable5.get()!=None:
self.flag=1
self.get_user_info()
self.show_pdb()
self.get_pdb_file()
self.get_uniprot_file()
self.get_target_chembl_id()
self.get_approved_drugs()
self.show_lig_info()
self.get_saps()
#self.get_bs()
#self.show_bs()
self.get_kegg_info()
else:
self.get_info()
self.show_pdb()
self.get_pdb_file()
self.get_uniprot_file()
self.get_ligands()
self.get_ligand_images()
self.get_target_chembl_id()
self.get_approved_drugs()
self.show_lig_info()
self.get_saps()
self.get_bs()
self.show_bs()
self.get_kegg_info()
self.text1.config(state=Tkinter.DISABLED)
self.text2.config(state=Tkinter.DISABLED)
self.text3.config(state=Tkinter.DISABLED)
self.text4.config(state=Tkinter.DISABLED)
self.text5.config(state=Tkinter.DISABLED)
self.text6.config(state=Tkinter.DISABLED)
def clear(self):
#Clear All the variables so that if we change the pdbid it will recreate the screen.
self.text1.config(state=Tkinter.NORMAL)
self.text1.delete(1.0, Tkinter.END)
self.text2.config(state=Tkinter.NORMAL)
self.text2.delete(1.0, Tkinter.END)
self.text3.config(state=Tkinter.NORMAL)
self.text3.delete(1.0, Tkinter.END)
self.text4.config(state=Tkinter.NORMAL)
self.text4.delete(1.0, Tkinter.END)
self.text5.config(state=Tkinter.NORMAL)
self.text5.delete(1.0, Tkinter.END)
self.text6.config(state=Tkinter.NORMAL)
self.text6.delete(1.0, Tkinter.END)
self.flag=0
self.pdb_id=''
self.pdb_chain_id=''
self.entryVariable1.set('')
self.entryVariable2.set('')
self.entryVariable3.set('')
self.entryVariable5.set(None)
self.userpdbfile=None
self.userpdbfile_path=''
self.userpdb_filename=''
self.userpdb_filename_noext=''
self.label4.config(text='')
cwd=os.path.expanduser("~/Desktop/")
self.pdb_file=''
self.smiles=''
self.name=list()
self.summary=list()
self.symbol=list()
self.uniprot=list()
self.binding_sites=list()
self.ppi_bs_residues=''
self.lig_bs_residues=''
self.dna_bs_residues=''
self.rna_bs_residues=''
self.ion_bs_residues=''
self.pep_bs_residues=''
self.pathways=list()
self.saps=list()
self.ligands=list()
self.ligand_chemblid=list()
self.ligand_images=list()
self.agonist=list()
self.antagonist=list()
self.ki_comps=list()
self.ec50_comps=list()
self.ic50_comps=list()
self.outdir=None
pdbfile=None
pymol.cmd.delete('all')
pymol.cmd.reinitialize()
self.text1.lift()
def main():
app = PyMine(None)
app.title('PyMine Data Integration')
app.mainloop()
if __name__ == "__main__":
main()
|
rrchaudhari/PyMine
|
pymine.py
|
Python
|
mit
| 46,677
|
[
"PyMOL"
] |
a895ab1e73e3dd63b011a4222a188605dcf0b78d471951073964845dcd66f265
|
from ase.ga.startgenerator import StartGenerator
from ase.ga.utilities import closest_distances_generator
from ase.ga.standardmutations import RattleMutation, PermutationMutation
import numpy as np
from ase.lattice.surface import fcc111
from ase.constraints import FixAtoms
# first create two random starting candidates
slab = fcc111('Au', size=(4, 4, 2), vacuum=10.0, orthogonal=True)
slab.set_constraint(FixAtoms(mask=slab.positions[:, 2] <= 10.))
pos = slab.get_positions()
cell = slab.get_cell()
p0 = np.array([0., 0., max(pos[:, 2]) + 2.])
v1 = cell[0, :] * 0.8
v2 = cell[1, :] * 0.8
v3 = cell[2, :]
v3[2] = 3.
cd = closest_distances_generator(atom_numbers=[47, 79],
ratio_of_covalent_radii=0.7)
atom_numbers = 2 * [47] + 2 * [79]
n_top = len(atom_numbers)
sg = StartGenerator(slab=slab,
atom_numbers=atom_numbers,
closest_allowed_distances=cd,
box_to_place_in=[p0, [v1, v2, v3]])
c1 = sg.get_new_candidate()
c1.info['confid'] = 1
# first verify that the rattle mutation works
rmut = RattleMutation(cd, n_top, rattle_strength=0.8, rattle_prop=0.4)
c2, desc = rmut.get_new_individual([c1])
assert np.all(c1.numbers == c2.numbers)
top1 = c1[-n_top:]
top2 = c2[-n_top:]
slab2 = c2[0:(len(c1) - n_top)]
assert len(slab) == len(slab2)
assert np.all(slab.get_positions() == slab2.get_positions())
dp = np.sum((top2.get_positions() - top1.get_positions())**2, axis=1)**0.5
# check that all displacements are smaller than the rattle strength we
# cannot check if 40 % of the structures have been rattled since it is
# probabilistic and because the probability will be lower if two atoms
# get too close
for p in dp:
assert p < 0.8 * 3**0.5
# now we check the permutation mutation
mmut = PermutationMutation(n_top, probability=0.5)
c3, desc = mmut.get_new_individual([c1])
assert np.all(c1.numbers == c3.numbers)
top1 = c1[-n_top:]
top2 = c3[-n_top:]
slab2 = c3[0:(len(c1) - n_top)]
assert len(slab) == len(slab2)
assert np.all(slab.get_positions() == slab2.get_positions())
dp = np.sum((top2.get_positions() - top1.get_positions())**2, axis=1)**0.5
# verify that two positions have been changed
assert len(dp[dp > 0]) == 2
|
askhl/ase
|
ase/test/ga/mutations.py
|
Python
|
gpl-2.0
| 2,235
|
[
"ASE"
] |
0c758b708a382c49f6b380e6f8051d2b2e5c64ffd3227aa9752937819ed5a311
|
#!/usr/bin/env python
""" toggle between two images by pressing "t"
The basic idea is to load two images (they can be different shapes) and plot
them to the same axes with hold "on". Then, toggle the visible property of
them using keypress event handling
If you want two images with different shapes to be plotted with the same
extent, they must have the same "extent" property
As usual, we'll define some random images for demo. Real data is much more
exciting!
Note, on the wx backend on some platforms (eg linux), you have to
first click on the figure before the keypress events are activated.
If you know how to fix this, please email us!
"""
from pylab import *
# two images x1 is initially visible, x2 is not
x1 = rand(100, 100)
x2 = rand(150, 175)
# arbitrary extent - both images must have same extent if you want
# them to be resampled into the same axes space
extent = (0,1,0,1)
im1 = imshow(x1, extent=extent)
im2 = imshow(x2, extent=extent, hold=True)
im2.set_visible(False)
def toggle_images(event):
'toggle the visible state of the two images'
if event.key != 't': return
b1 = im1.get_visible()
b2 = im2.get_visible()
im1.set_visible(not b1)
im2.set_visible(not b2)
draw()
connect('key_press_event', toggle_images)
show()
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/toggle_images.py
|
Python
|
mit
| 1,277
|
[
"exciting"
] |
b8c20b4b89dabf322952a0e8c6ec30efa5aa78c4fb5c1ce68c01a98ea9a5f063
|
# Filtering BLAST table file (arg1) by coverage & identity (arg2 & arg3)
import sys
inFile = open(sys.argv[1])
thCov = float(sys.argv[2])
thId = float(sys.argv[3])
for line in inFile:
tabs = line.rstrip('\n').split('\t')
cov = 100*(float(tabs[5])-float(tabs[4])) / float(tabs[2])
hid = float(tabs[8])
if hid > thId and cov > thCov:
print '\t'.join(tabs[0:8])+'\t'+str(cov)+'\t'+'\t'.join(tabs[8:])
# print '\t'.join(tabs)
|
HaseloffLab/MarpoDB
|
scripts/filterBlastByCl.py
|
Python
|
mit
| 434
|
[
"BLAST"
] |
43e387ea0a1097e8bc5559f855904166bdc71a40ad5c7944a29b0dee00501596
|
from paraview.simple import *
from paraview import coprocessing
#--------------------------------------------------------------
# Code generated from cpstate.py to create the CoProcessor.
# ParaView 4.2.0-15-g46ac001 64 bits
# ----------------------- CoProcessor definition -----------------------
def CreateCoProcessor():
def _CreatePipeline(coprocessor, datadescription):
class Pipeline:
# state file generated using paraview version 4.2.0-15-g46ac001
# ----------------------------------------------------------------
# setup the data processing pipelines
# ----------------------------------------------------------------
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# writer for 2D rectilinear grid
h02D = coprocessor.CreateProducer(datadescription, 'input')
h02DWriter = servermanager.writers.XMLPUnstructuredGridWriter(Input=h02D)
h02DWriter.FileName = 'h02D_%t.pvtu'
coprocessor.RegisterWriter(h02DWriter, filename=h02DWriter.FileName, freq=48)
# writer for 3D rectilinear grid
h03D = coprocessor.CreateProducer(datadescription, 'input3D')
h03DWriter = servermanager.writers.XMLPUnstructuredGridWriter(Input=h03D)
h03DWriter.FileName = 'h03D_%t.pvtu'
coprocessor.RegisterWriter(h03DWriter, filename=h03DWriter.FileName, freq=48)
return Pipeline()
class CoProcessor(coprocessing.CoProcessor):
def CreatePipeline(self, datadescription):
self.Pipeline = _CreatePipeline(self, datadescription)
coprocessor = CoProcessor()
# these are the frequencies at which the coprocessor updates.
freqs = {'input': [24],
'input3D': [24]}
coprocessor.SetUpdateFrequencies(freqs)
return coprocessor
#--------------------------------------------------------------
# Global variables that will hold the pipeline for each timestep
# Creating the CoProcessor object, doesn't actually create the ParaView pipeline.
# It will be automatically setup when coprocessor.UpdateProducers() is called the
# first time.
coprocessor = CreateCoProcessor()
#--------------------------------------------------------------
# Enable Live-Visualizaton with ParaView
coprocessor.EnableLiveVisualization(True, 1)
# ---------------------- Data Selection method ----------------------
def RequestDataDescription(datadescription):
"Callback to populate the request for current timestep"
global coprocessor
if datadescription.GetForceOutput() == True:
# We are just going to request all fields and meshes from the simulation
# code/adaptor.
for i in range(datadescription.GetNumberOfInputDescriptions()):
print 'input dname ', datadescription.GetInputDescriptionName(i)
datadescription.GetInputDescription(i).AllFieldsOn()
datadescription.GetInputDescription(i).GenerateMeshOn()
return
# setup requests for all inputs based on the requirements of the
# pipeline.
coprocessor.LoadRequestedData(datadescription)
# ------------------------ Processing method ------------------------
def DoCoProcessing(datadescription):
"Callback to do co-processing for current timestep"
global coprocessor
# Update the coprocessor by providing it the newly generated simulation data.
# If the pipeline hasn't been setup yet, this will setup the pipeline.
coprocessor.UpdateProducers(datadescription)
# Write output data, if appropriate.
coprocessor.WriteData(datadescription);
# Write image capture (Last arg: rescale lookup table), if appropriate.
coprocessor.WriteImages(datadescription, rescale_lookuptable=False)
# Live Visualization, if enabled.
coprocessor.DoLiveVisualization(datadescription, "localhost", 22222)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/CoProcessing/Adaptors/CamAdaptor/se_coprocess.py
|
Python
|
gpl-3.0
| 3,813
|
[
"ParaView"
] |
99b3875f0bc019818a66cc8f8a6ee3751638cfb79de5b5cd27957e88668e68c3
|
"""
Flask-Tus
-------------
Implements the tus.io server-side file-upload protocol
visit http://tus.io for more information
"""
from setuptools import setup
setup(
name='Flask-Tus',
version='0.6.1',
url='http://github.com/matthoskins1980/Flask-Tus/',
license='MIT',
author='Matt Hoskins',
author_email='[email protected]',
description='TUS protocol implementation',
long_description=__doc__,
py_modules=['flask_tus'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'Redis'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
matthoskins1980/Flask-Tus
|
setup.py
|
Python
|
mit
| 920
|
[
"VisIt"
] |
5d2fd86aff6101def5097f9cb9e01c0c5ebbab83adae5dcd3b95d510440a0d32
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to compile targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print 'finding compile targets for match', target.name
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
if 'compile_targets' in values:
values['compile_targets'].sort()
print 'Targets that need to be built:'
for target in values['compile_targets']:
print '\t', target
if 'test_targets' in values:
values['test_targets'].sort()
print 'Test targets:'
for target in values['test_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print 'supplied test_targets'
for target_name in self._test_target_names:
print '\t', target_name
print 'found test_targets'
for target in test_targets:
print '\t', target.name
print 'searching for matching test targets'
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print 'matched test_targets'
for target in matching_test_targets:
print '\t', target.name
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print '\tall'
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.itervalues():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print 'Supplied test_targets & compile_targets'
for target in supplied_targets:
print '\t', target.name
print 'Finding compile targets'
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
mikemcdaid/getonupband
|
sites/all/themes/getonupband/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py
|
Python
|
gpl-2.0
| 31,308
|
[
"VisIt"
] |
ee54264cf37483fe8d47a0fe515e507837097aecbf8eb0e5bf33b92687744f94
|
__author__ = 'Rob'
def atom_pair_energy(atom1, atom2):
energy = 0
import math
sep = math.sqrt((atom1[0] - atom2[0]) ** 2 + (atom1[1] - atom2[1]) ** 2 + (atom1[2] - atom2[2]) ** 2)
if sep != 0: # avoid divide by zero for an atom being paired with itself
energy = (atom1[3] * atom2[3]) / sep
return energy
def unit_cell_pair_energy(atom_list1, atom_list2):
energy = 0
for atom1 in atom_list1:
for atom2 in atom_list2:
energy += atom_pair_energy(atom1, atom2)
return energy
def separation(atom1, atom2):
import math
sep = math.sqrt((atom1[0] - atom2[0]) ** 2 + (atom1[1] - atom2[1]) ** 2 + (atom1[2] - atom2[2]) ** 2)
return sep
def madelung_sum(atoms_in_unit_cell, limit):
# Eventually we want to add (plane, offset) to the arguments.
# This functions takes a unit cell (assumed to be cubic) of atoms in the form of a list of atoms, where each atom is
# a list consisting of it's position and charge: [x,y,z,q]. It creates a copy of the list of atoms which I have
# termed current_atoms. It is this set that are offset with each iteration of the function. Each atom in the current
# cell are compared back to the original unit cell, atoms_in_unit_cell using the pair energy function defined above.
# Finally I will be adding a test just before the calculation in energy that will allow us to identify atoms on
# one side of an arbitrary plane, as a list [h,k,l,d] following the crystallography conventions for h,k,l and d
# being the offset from zero along that normal vector. In a cubic crystal the h,k,l is also a vector perpendicular
# to the plane in question. We can apply an offset, another vector, to those atoms on one side and not the other.
# The energy vs offset might be relevant to the plastic properties of the material.
current_atoms = [] # Here's an empty list to fill with atoms which we can then move around
for atom in atoms_in_unit_cell:
current_atoms.append(atom[:])
energy = 0 # Initial value for energy
for z in range(-limit, limit): # Stepping across z
for atom in current_atoms:
atom[2] += z
for y in range(-limit, limit): # Stepping across y
for atom in current_atoms:
atom[1] += y
for x in range(-limit, limit): # Stepping across x
for atom in current_atoms:
atom[0] += x
energy += unit_cell_pair_energy(current_atoms, atoms_in_unit_cell)
return energy
|
rpt26/coulomb_energy
|
coulomb_utils.py
|
Python
|
mit
| 2,557
|
[
"CRYSTAL"
] |
d343322e7a690633129a9023894a1788c77f90502bc2703efaa8b18c29f5a8e2
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkExtractSelectedFrustum(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkExtractSelectedFrustum(), 'Processing.',
('vtkDataSet', 'vtkSelection'), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkExtractSelectedFrustum.py
|
Python
|
bsd-3-clause
| 518
|
[
"VTK"
] |
4344d9ae0c8e62de6ab0f3356d10a6df7e97e602e7994556be98709c2e5aad88
|
"""
===============================================
Compute all-to-all connectivity in sensor space
===============================================
Computes the Phase Lag Index (PLI) between all gradiometers and shows the
connectivity in 3D using the helmet geometry. The left visual stimulation data
are used which produces strong connectvitiy in the right occipital sensors.
"""
# Author: Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
import socket
import mne
import numpy as np
from mayavi import mlab # noqa
from mne import io
from mne.connectivity import spectral_connectivity
from scipy import linalg
# Setup paths and prepare raw data
hostname = socket.gethostname()
if hostname == "Wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
save_folder = data_path + "filter_ica_data/"
maxfiltered_folder = data_path + "maxfiltered_data/"
epochs_folder = data_path + "epoched_data/"
tf_folder = data_path + "tf_data/"
###############################################################################
# Set parameters
epochs = mne.read_epochs(epochs_folder + "0004_filtered_ica_mc_tsss-epo.fif")
# Compute connectivity for band containing the evoked response.
# We exclude the baseline period
fmin, fmax = 8., 12.
sfreq = epochs.info['sfreq'] # the sampling frequency
tmin, tmax = 0.0, 0.7 # exclude the baseline period
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
epochs["ent_left"], method='pli', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax,
faverage=True, tmin=tmin, tmax=tmax, mt_adaptive=False, n_jobs=1)
# the epochs contain an EOG channel, which we remove now
ch_names = epochs.ch_names
idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')]
con = con[idx][:, idx]
# con is a 3D array where the last dimension is size one since we averaged
# over frequencies in a single band. Here we make it 2D
con = con[:, :, 0]
# Now, visualize the connectivity in 3D
mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
# Plot the sensor locations
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(1, 1, 1), opacity=1, scale_factor=0.005)
# Get the strongest connections
n_con = 20 # show up to 20 connections
min_dist = 0.05 # exclude sensors that are less than 5cm apart
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=0.001,
colormap='RdBu')
points.module_manager.scalar_lut_manager.reverse_lut = True
mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
|
MadsJensen/malthe_alpha_project
|
connectivity_analysis.py
|
Python
|
mit
| 3,720
|
[
"Mayavi"
] |
4a5f1fd391bfba736c1da34d5b8143b53165469173636c8ac53652f708e92df1
|
#!/usr/bin/env python
"""
This example shows how to create an unstructured grid.
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
x = [[0, 0, 0], [1, 0, 0], [2, 0, 0], [0, 1, 0], [1, 1, 0], [2, 1, 0], [0, 0, 1], [1, 0, 1], [2, 0, 1], [0, 1, 1],
[1, 1, 1], [2, 1, 1], [0, 1, 2], [1, 1, 2], [2, 1, 2], [0, 1, 3], [1, 1, 3], [2, 1, 3], [0, 1, 4], [1, 1, 4],
[2, 1, 4], [0, 1, 5], [1, 1, 5], [2, 1, 5], [0, 1, 6], [1, 1, 6], [2, 1, 6]]
# Here we have kept consistency with the Cxx example of the same name.
# This means we will use slicing in ugrid.InsertNextCell to ensure that the correct
# number of points are used.
pts = [[0, 1, 4, 3, 6, 7, 10, 9], [1, 2, 5, 4, 7, 8, 11, 10], [6, 10, 9, 12, 0, 0, 0, 0],
[8, 11, 10, 14, 0, 0, 0, 0], [16, 17, 14, 13, 12, 15, 0, 0], [18, 15, 19, 16, 20, 17, 0, 0],
[22, 23, 20, 19, 0, 0, 0, 0], [21, 22, 18, 0, 0, 0, 0, 0], [22, 19, 18, 0, 0, 0, 0, 0],
[23, 26, 0, 0, 0, 0, 0, 0], [21, 24, 0, 0, 0, 0, 0, 0], [25, 0, 0, 0, 0, 0, 0, 0]]
print(len(x), len(pts))
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
points = vtk.vtkPoints()
for i in range(0, len(x)):
points.InsertPoint(i, x[i])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(100)
ugrid.InsertNextCell(vtk.VTK_HEXAHEDRON, 8, pts[0])
ugrid.InsertNextCell(vtk.VTK_HEXAHEDRON, 8, pts[1])
ugrid.InsertNextCell(vtk.VTK_TETRA, 4, pts[2][:4])
ugrid.InsertNextCell(vtk.VTK_TETRA, 4, pts[3][:4])
ugrid.InsertNextCell(vtk.VTK_POLYGON, 6, pts[4][:6])
ugrid.InsertNextCell(vtk.VTK_TRIANGLE_STRIP, 6, pts[5][:6])
ugrid.InsertNextCell(vtk.VTK_QUAD, 4, pts[6][:4])
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, pts[7][:3])
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, pts[8][:3])
ugrid.InsertNextCell(vtk.VTK_LINE, 2, pts[9][:2])
ugrid.InsertNextCell(vtk.VTK_LINE, 2, pts[10][:2])
ugrid.InsertNextCell(vtk.VTK_VERTEX, 1, pts[11][:1])
ugrid.SetPoints(points)
ugridMapper = vtk.vtkDataSetMapper()
ugridMapper.SetInputData(ugrid)
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
ugridActor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
ugridActor.GetProperty().EdgeVisibilityOn()
renderer.AddActor(ugridActor)
renderer.SetBackground(colors.GetColor3d("Beige"))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
# Interact with the data.
renWin.Render()
iren.Start()
if __name__ == "__main__":
main()
|
lorensen/VTKExamples
|
src/Python/UnstructuredGrid/UGrid.py
|
Python
|
apache-2.0
| 2,790
|
[
"VTK"
] |
1fa10bf8111eb6fd7694c1103b12bafc7194c57548045c7cd68a3505a8225c2d
|
# Copyright 2014 Mark Chilenski
# This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provides the base :py:class:`Profile` class and other utilities.
"""
from __future__ import division
import scipy
import scipy.stats
import scipy.io
import scipy.linalg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import gptools
import sys
import os.path
import csv
import warnings
import re
import copy
def average_points(X, y, err_X, err_y, T=None, ddof=1, robust=False,
y_method='sample', X_method='sample', weighted=False):
"""Find the average of the points with the given uncertainties using a variety of techniques.
Parameters
----------
X : array, (`M`, `D`) or (`M`, `N`, `D`)
Abscissa values to average.
y : array, (`M`)
Data values to average.
err_X : array, same shape as `X`
Uncertainty in `X`.
err_y : array, same shape as `y`
Uncertainty in `y`.
T : array, (`M`, `N`), optional
Transform for `y`. Default is None (`y` is not transformed).
ddof : int, optional
The degree of freedom correction used in computing the standard
deviation. The default is 1, the standard Bessel correction to
give an unbiased estimate of the variance.
robust : bool, optional
Set this flag to use robust estimators (median, IQR). Default is False.
y_method : {'sample', 'RMS', 'total', 'of mean', 'of mean sample'}, optional
The method to use in computing the uncertainty in the averaged `y`.
* 'sample' computes the sample standard deviation.
* 'RMS' computes the root-mean-square of the individual error bars.
* 'total' computes the square root of the sum of the sample variance and
the mean variance. This is only statistically reasonable if the points
represent sample means/variances already.
* 'of mean' computes the uncertainty in the mean using error propagation
with the given uncertainties.
* 'of mean sample' computes the uncertainty in the mean using error
propagation with the sample variance. Should not be used with weighted
estimators!
Default is 'sample' (use sample variance).
X_method : {'sample', 'RMS', 'total', 'of mean', 'of mean sample'}, optional
The method to use in computing the uncertainty in the averaged `X`.
Options are the same as `y_method`. Default is 'sample' (use sample
variance).
weighted : bool, optional
Set this flag to use weighted estimators. The weights are 1/err_y^2.
Default is False (use unweighted estimators).
Returns
-------
mean_X : array, (`D`,) or (`N`, `D`)
Mean of abscissa values.
mean_y : float
Mean of data values.
err_X : array, same shape as `mean_X`
Uncertainty in abscissa values.
err_y : float
Uncertainty in data values.
T : array, (`N`,) or None
Mean of transformation.
"""
allowed_methods = ['sample', 'RMS', 'total', 'of mean', 'of mean sample']
if y_method not in allowed_methods:
raise ValueError("Unsupported y_method '%s'!" % (y_method,))
if X_method not in allowed_methods:
raise ValueError("Unsupported X_method '%s'!" % (X_method,))
if weighted:
weights = 1.0 / err_y**2
if scipy.isinf(weights).any() or scipy.isnan(weights).any():
weights = None
warnings.warn("Invalid weight, setting weights equal!")
else:
weights = None
if not robust:
# Process y:
mean_y = meanw(y, weights=weights)
# If there is only one member, just carry its uncertainty forward:
if len(y) == 1:
err_y = err_y[0]
elif y_method == 'sample':
err_y = stdw(y, weights=weights, ddof=ddof)
elif y_method == 'RMS':
err_y = scipy.sqrt(meanw(err_y**2, weights=weights))
elif y_method == 'total':
err_y = scipy.sqrt(
varw(y, weights=weights, ddof=ddof) +
meanw(err_y**2, weights=weights)
)
elif y_method == 'of mean':
if weighted:
err_y = (weights.sum())**(-0.5)
else:
err_y = scipy.sqrt((err_y**2).sum()) / len(y)
elif y_method == 'of mean sample':
if weighted:
err_y = scipy.sqrt((weights**2).sum()) * stdw(y, weights=weights, ddof=ddof) / weights.sum()
else:
err_y = stdw(y, weights=weights, ddof=ddof) / scipy.sqrt(len(y))
# Similar picture for X:
if weights is not None:
weights = scipy.atleast_2d(weights).T
mean_X = meanw(X, weights=weights, axis=0)
if len(y) == 1:
err_X = err_X[0]
elif X_method == 'sample':
err_X = stdw(X, weights=weights, ddof=ddof, axis=0)
elif X_method == 'RMS':
err_X = scipy.sqrt(meanw(err_X**2, weights=weights, axis=0))
elif X_method == 'total':
err_X = scipy.sqrt(
varw(X, weights=weights, ddof=ddof, axis=0) +
meanw(err_X**2, weights=weights, axis=0)
)
elif X_method == 'of mean':
if weighted:
err_X = scipy.sqrt((weights**2 * err_X**2).sum(axis=0)) / weights.sum()
else:
err_X = scipy.sqrt((err_X**2).sum(axis=0)) / len(y)
elif X_method == 'of mean sample':
if weighted:
err_X = scipy.sqrt((weights**2).sum()) * stdw(X, weights=weights, ddof=ddof, axis=0) / weights.sum()
else:
err_X = stdw(X, weights=weights, ddof=ddof, axis=0) / scipy.sqrt(len(y))
# And again for T:
if T is not None:
T = meanw(T, weights=weights, axis=0)
else:
mean_y = medianw(y, weights=weights)
if len(y) == 1:
err_y = err_y[0]
elif y_method == 'sample':
err_y = robust_stdw(y, weights=weights)
elif y_method == 'RMS':
err_y = scipy.sqrt(medianw(err_y**2, weights=weights))
elif y_method == 'total':
err_y = scipy.sqrt((robust_stdw(y, weights=weights))**2 + medianw(err_y**2, weights=weights))
elif y_method == 'of mean':
# TODO: This is a very sketchy approximation!
if weighted:
err_y = (weights.sum())**(-0.5)
else:
err_y = scipy.sqrt((err_y**2).sum()) / len(y)
elif y_method == 'of mean sample':
if weighted:
err_y = scipy.sqrt((weights**2).sum()) * robust_stdw(y, weights=weights) / weights.sum()
else:
err_y = robust_std(y) / scipy.sqrt(len(y))
mean_X = scipy.median(X, axis=0)
if len(y) == 1:
err_X = err_X[0]
elif X_method == 'sample':
err_X = robust_stdw(X, weights=weights, axis=0)
elif X_method == 'RMS':
err_X = scipy.sqrt(medianw(err_X**2, weights=weights, axis=0))
elif X_method == 'total':
err_X = scipy.sqrt(
(robust_stdw(X, weights=weights, axis=0))**2 +
scipy.median(err_X**2, axis=0)
)
elif X_method == 'of mean':
if weighted:
err_X = scipy.sqrt((weights**2 * err_X**2).sum(axis=0)) / weights.sum()
else:
err_X = scipy.sqrt((err_X**2).sum(axis=0)) / len(y)
elif X_method == 'of mean sample':
if weighted:
err_X = scipy.sqrt((weights**2).sum()) * robust_stdw(X, weights=weights, ddof=ddof, axis=0) / weights.sum()
else:
err_X = robust_stdw(X, weights=weights, axis=0) / scipy.sqrt(len(y))
if T is not None:
T = medianw(T, weights=weights, axis=0)
return (mean_X, mean_y, err_X, err_y, T)
class Channel(object):
"""Class to store data from a single channel.
This is particularly useful for storing linearly transformed data, but
should work for general data just as well.
Parameters
----------
X : array, (`M`, `N`, `D`)
Abscissa values to use.
y : array, (`M`,)
Data values.
err_X : array, same shape as `X`
Uncertainty in `X`.
err_y : array, (`M`,)
Uncertainty in data.
T : array, (`M`, `N`), optional
Linear transform to get from latent variables to data in `y`. Default is
that `y` represents untransformed data.
y_label : str, optional
Label for the `y` data. Default is empty string.
y_units : str, optional
Units of the `y` data. Default is empty string.
"""
def __init__(self, X, y, err_X=0, err_y=0, T=None, y_label='', y_units=''):
self.y_label = y_label
self.y_units = y_units
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if y.ndim != 1:
raise ValueError(
"Dependent variables y must have only one dimension! Shape of y "
"given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s."
% (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar independent variable or convert array input into matrix.
X = scipy.atleast_3d(scipy.asarray(X, dtype=float))
if T is None and X.shape[0] != len(y):
raise ValueError(
"Shape of independent variables must be (len(y), D)! "
"X given has shape %s, shape of y is %s."
% (X.shape, y.shape,)
)
if T is not None:
# Promote T if it is a single observation:
T = scipy.atleast_2d(scipy.asarray(T, dtype=float))
if T.ndim != 2:
raise ValueError("T must have exactly 2 dimensions!")
if T.shape[0] != len(y):
raise ValueError("Length of first dimension of T must match length of y!")
if T.shape[1] != X.shape[1]:
raise ValueError("Second dimension of T must match second dimension of X!")
else:
T = scipy.eye(len(y))
# Process uncertainty in X:
try:
iter(err_X)
except TypeError:
err_X = err_X * scipy.ones_like(X, dtype=float)
else:
err_X = scipy.asarray(err_X, dtype=float)
if err_X.ndim == 1 and X.shape[2] != 1:
err_X = scipy.tile(err_X, (X.shape[0], 1))
err_X = scipy.atleast_2d(scipy.asarray(err_X, dtype=float))
if err_X.shape != X.shape:
raise ValueError(
"Shape of uncertainties on independent variables must be "
"(len(y), self.X_dim)! X given has shape %s, shape of y is %s."
% (X.shape, y.shape,)
)
if (err_X < 0).any():
raise ValueError("All elements of err_X must be non-negative!")
self.X = X
self.y = y
self.err_X = err_X
self.err_y = err_y
self.T = T
def keep_slices(self, axis, vals, tol=None, keep_mixed=False):
"""Only keep the indices closest to given `vals`.
Parameters
----------
axis : int
The column in `X` to check values on.
vals : float or 1-d array
The value(s) to keep the points that are nearest to.
keep_mixed : bool, optional
Set this flag to keep transformed quantities that depend on multiple
values of `X[:, :, axis]`. Default is False (drop mixed quantities).
Returns
-------
still_good : bool
Returns True if there are still any points left in the channel,
False otherwise.
"""
unique_vals = []
num_unique = []
for pt in self.X:
unique_vals += [scipy.unique(pt[:, axis])]
num_unique += [len(unique_vals[-1])]
if max(num_unique) > 1:
if keep_mixed:
return True
else:
return False
else:
# TODO: Make sure raveling doesn't have unexpected consequences...
unique_vals = scipy.asarray(unique_vals).ravel()
keep_idxs = get_nearest_idx(vals, unique_vals)
if tol is not None:
keep_idxs = keep_idxs[
scipy.absolute(unique_vals[keep_idxs] - vals) <= tol
]
keep_idxs = scipy.unique(keep_idxs)
self.X = self.X[keep_idxs, :, :]
self.y = self.y[keep_idxs]
self.err_X = self.err_X[keep_idxs, :, :]
self.err_y = self.err_y[keep_idxs]
self.T = self.T[keep_idxs, :]
return True
def average_data(self, axis=0, **kwargs):
"""Average the data along the given `axis`.
Parameters
----------
axis : int, optional
Axis to average along. Default is 0.
**kwargs : optional keyword arguments
All additional kwargs are passed to :py:func:`average_points`.
"""
reduced_X = scipy.delete(self.X, axis, axis=2)
reduced_err_X = scipy.delete(self.err_X, axis, axis=2)
self.X, self.y, self.err_X, self.err_y, self.T = average_points(
reduced_X,
self.y,
reduced_err_X,
self.err_y,
T=self.T,
**kwargs
)
self.X = scipy.expand_dims(self.X, axis=0)
self.y = scipy.expand_dims(self.y, axis=0)
self.err_X = scipy.expand_dims(self.err_X, axis=0)
self.err_y = scipy.expand_dims(self.err_y, axis=0)
self.T = scipy.expand_dims(self.T, axis=0)
def remove_points(self, conditional):
"""Remove points satisfying `conditional`.
Parameters
----------
conditional : array, same shape as `self.y`
Boolean array with True wherever a point should be removed.
Returns
-------
bad_X : array
The removed `X` values.
bad_err_X : array
The uncertainty in the removed `X` values.
bad_y : array
The removed `y` values.
bad_err_y : array
The uncertainty in the removed `y` values.
bad_T : array
The transformation matrix of the removed `y` values.
"""
keep_idxs = ~conditional
bad_X = self.X[conditional, :, :]
bad_y = self.y[conditional]
bad_err_X = self.err_X[conditional, :, :]
bad_err_y = self.err_y[conditional]
bad_T = self.T[conditional, :]
self.X = self.X[keep_idxs, :, :]
self.y = self.y[keep_idxs]
self.err_X = self.err_X[keep_idxs, :, :]
self.err_y = self.err_y[keep_idxs]
self.T = self.T[keep_idxs, :]
return (bad_X, bad_err_X, bad_y, bad_err_y, bad_T)
class Profile(object):
"""Object to abstractly represent a profile.
Parameters
----------
X_dim : positive int, optional
Number of dimensions of the independent variable. Default value is 1.
X_units : str, list of str or None, optional
Units for each of the independent variables. If `X_dim`=1, this should
given as a single string, if `X_dim`>1, this should be given as a list
of strings of length `X_dim`. Default value is `None`, meaning a list
of empty strings will be used.
y_units : str, optional
Units for the dependent variable. Default is an empty string.
X_labels : str, list of str or None, optional
Descriptive label for each of the independent variables. If `X_dim`=1,
this should be given as a single string, if `X_dim`>1, this should be
given as a list of strings of length `X_dim`. Default value is `None`,
meaning a list of empty strings will be used.
y_label : str, optional
Descriptive label for the dependent variable. Default is an empty string.
weightable : bool, optional
Whether or not it is valid to use weighted estimators on the data, or if
the error bars are too suspect for this to be valid. Default is True
(allow use of weighted estimators).
Attributes
----------
y : :py:class:`Array`, (`M`,)
The `M` dependent variables.
X : :py:class:`Matrix`, (`M`, `X_dim`)
The `M` independent variables.
err_y : :py:class:`Array`, (`M`,)
The uncertainty in the `M` dependent variables.
err_X : :py:class:`Matrix`, (`M`, `X_dim`)
The uncertainties in each dimension of the `M` independent variables.
channels : :py:class:`Matrix`, (`M`, `X_dim`)
The logical groups of points into channels along each of the independent variables.
X_dim : positive int
The number of dimensions of the independent variable.
X_units : list of str, (X_dim,)
The units for each of the independent variables.
y_units : str
The units for the dependent variable.
X_labels : list of str, (X_dim,)
Descriptive labels for each of the independent variables.
y_label : str
Descriptive label for the dependent variable.
weightable : bool
Whether or not weighted estimators can be used.
transformed : list of :py:class:`Channel`
The transformed quantities associated with the :py:class:`Profile` instance.
gp : :py:class:`gptools.GaussianProcess` instance
The Gaussian process with the local and transformed data included.
"""
def __init__(self, X_dim=1, X_units=None, y_units='', X_labels=None, y_label='',
weightable=True):
self.X_dim = X_dim
self.weightable = weightable
if X_units is None:
X_units = [''] * X_dim
elif X_dim == 1:
X_units = [X_units]
elif len(X_units) != X_dim:
raise ValueError("The length of X_units must be equal to X_dim!")
if X_labels is None:
X_labels = [''] * X_dim
elif X_dim == 1:
X_labels = [X_labels]
elif len(X_labels) != X_dim:
raise ValueError("The length of X_labels must be equal to X_dim!")
self.X_units = X_units
self.y_units = y_units
self.X_labels = X_labels
self.y_label = y_label
self.y = scipy.array([], dtype=float)
self.X = None
self.err_y = scipy.array([], dtype=float)
self.err_X = None
self.channels = None
self.transformed = scipy.array([], dtype=Channel)
self.gp = None
def add_data(self, X, y, err_X=0, err_y=0, channels=None):
"""Add data to the training data set of the :py:class:`Profile` instance.
Will also update the Profile's Gaussian process instance (if it exists).
Parameters
----------
X : array-like, (`M`, `N`)
`M` independent variables of dimension `N`.
y : array-like, (`M`,)
`M` dependent variables.
err_X : array-like, (`M`, `N`), or scalar float, or single array-like (`N`,), optional
Non-negative values only. Error given as standard deviation for
each of the `N` dimensions in the `M` independent variables. If a
scalar is given, it is used for all of the values. If a single
array of length `N` is given, it is used for each point. The
default is to assign zero error to each point.
err_y : array-like (`M`,) or scalar float, optional
Non-negative values only. Error given as standard deviation in the
`M` dependent variables. If `err_y` is a scalar, the data set is
taken to be homoscedastic (constant error). Otherwise, the length
of `err_y` must equal the length of `y`. Default value is 0
(noiseless observations).
channels : dict or array-like (`M`, `N`)
Keys to logically group points into "channels" along each dimension
of `X`. If not passed, channels are based simply on which points
have equal values in `X`. If only certain dimensions have groupings
other than the simple default equality conditions, then you can
pass a dict with integer keys in the interval [0, `X_dim`-1] whose
values are the arrays of length `M` indicating the channels.
Otherwise, you can pass in a full (`M`, `N`) array.
Raises
------
ValueError
Bad shapes for any of the inputs, negative values for `err_y` or `n`.
"""
# Verify y has only one non-trivial dimension:
y = scipy.atleast_1d(scipy.asarray(y, dtype=float))
if y.ndim != 1:
raise ValueError(
"Dependent variables y must have only one dimension! Shape of y "
"given is %s" % (y.shape,)
)
# Handle scalar error or verify shape of array error matches shape of y:
try:
iter(err_y)
except TypeError:
err_y = err_y * scipy.ones_like(y, dtype=float)
else:
err_y = scipy.asarray(err_y, dtype=float)
if err_y.shape != y.shape:
raise ValueError(
"When using array-like err_y, shape must match shape of y! "
"Shape of err_y given is %s, shape of y given is %s."
% (err_y.shape, y.shape)
)
if (err_y < 0).any():
raise ValueError("All elements of err_y must be non-negative!")
# Handle scalar independent variable or convert array input into matrix.
X = scipy.atleast_2d(scipy.asarray(X, dtype=float))
# Correct single-dimension inputs:
if self.X_dim == 1 and X.shape[0] == 1:
X = X.T
if X.shape != (len(y), self.X_dim):
raise ValueError(
"Shape of independent variables must be (len(y), self.X_dim)! "
"X given has shape %s, shape of y is %s and X_dim=%d."
% (X.shape, y.shape, self.X_dim)
)
# Process uncertainty in X:
try:
iter(err_X)
except TypeError:
err_X = err_X * scipy.ones_like(X, dtype=float)
else:
err_X = scipy.asarray(err_X, dtype=float)
# TODO: Steal this idiom for handling n in gptools!
if err_X.ndim == 1 and self.X_dim != 1:
err_X = scipy.tile(err_X, (X.shape[0], 1))
err_X = scipy.atleast_2d(scipy.asarray(err_X, dtype=float))
if self.X_dim == 1 and err_X.shape[0] == 1:
err_X = err_X.T
if err_X.shape != X.shape:
raise ValueError(
"Shape of uncertainties on independent variables must be "
"(len(y), self.X_dim)! X given has shape %s, shape of y is %s "
"and X_dim=%d." % (X.shape, y.shape, self.X_dim)
)
if (err_X < 0).any():
raise ValueError("All elements of err_X must be non-negative!")
# Process channel flags:
if channels is None:
channels = scipy.tile(scipy.arange(0, len(y)), (X.shape[1], 1)).T
# channels = scipy.copy(X)
else:
if isinstance(channels, dict):
d_channels = channels
channels = scipy.tile(scipy.arange(0, len(y)), (X.shape[1], 1)).T
# channels = scipy.copy(X)
for idx in d_channels:
channels[:, idx] = d_channels[idx]
else:
channels = scipy.asarray(channels)
if channels.shape != (len(y), X.shape[1]):
raise ValueError("Shape of channels and X must be the same!")
if self.X is None:
self.X = X
else:
self.X = scipy.vstack((self.X, X))
if self.channels is None:
self.channels = channels
else:
self.channels = scipy.vstack((self.channels, channels))
if self.err_X is None:
self.err_X = err_X
else:
self.err_X = scipy.vstack((self.err_X, err_X))
self.y = scipy.append(self.y, y)
self.err_y = scipy.append(self.err_y, err_y)
if self.gp is not None:
self.gp.add_data(X, y, err_y=err_y)
def add_profile(self, other):
"""Absorbs the data from one profile object.
Parameters
----------
other : :py:class:`Profile`
:py:class:`Profile` to absorb.
"""
if self.X_dim != other.X_dim:
raise ValueError(
"When merging profiles, X_dim must be equal between the two "
"profiles!"
)
if self.y_units != other.y_units:
raise ValueError("When merging profiles, the y_units must agree!")
if self.X_units != other.X_units:
raise ValueError("When merging profiles, the X_units must agree!")
if len(other.y) > 0:
# Modify the channels of self.channels to avoid clashes:
if other.channels is not None and self.channels is not None:
self.channels = (
self.channels - self.channels.min(axis=0) +
other.channels.max(axis=0) + 1
)
self.add_data(other.X, other.y, err_X=other.err_X, err_y=other.err_y,
channels=other.channels)
if len(other.transformed) > 0:
self.transformed = scipy.append(self.transformed, other.transformed)
def drop_axis(self, axis):
"""Drops a selected axis from `X`.
Parameters
----------
axis : int
The index of the axis to drop.
"""
if self.X_dim == 1:
raise ValueError("Can't drop axis from a univariate profile!")
self.X_dim -= 1
if self.X is not None:
self.channels = scipy.delete(self.channels, axis, axis=1)
self.X = scipy.delete(self.X, axis, axis=1)
self.err_X = scipy.delete(self.err_X, axis, axis=1)
self.X_labels.pop(axis)
self.X_units.pop(axis)
for p in self.transformed:
p.X = scipy.delete(p.X, axis, axis=2)
p.err_X = scipy.delete(p.err_X, axis, axis=2)
def keep_slices(self, axis, vals, tol=None, **kwargs):
"""Keeps only the nearest points to vals along the given axis for each channel.
Parameters
----------
axis : int
The axis to take the slice(s) of.
vals : array of float
The values the axis should be close to.
tol : float or None
Tolerance on nearest values -- if the nearest value is farther than
this, it is not kept. If None, this is not applied.
**kwargs : optional kwargs
All additional kwargs are passed to :py:meth:`~gptools.core.Channel.keep_slices`.
"""
try:
iter(vals)
except TypeError:
vals = [vals]
# Only handle single points if they are present...
if self.X is not None:
new_X = []
new_y = []
new_err_X = []
new_err_y = []
new_channels = []
reduced_channels = scipy.delete(self.channels, axis, axis=1)
channels = unique_rows(reduced_channels)
for ch in channels:
channel_idxs = (reduced_channels == ch.flatten()).all(axis=1)
ch_axis_X = self.X[channel_idxs, axis].flatten()
keep_idxs = get_nearest_idx(vals, ch_axis_X)
if tol is not None:
keep_idxs = keep_idxs[
scipy.absolute(ch_axis_X[keep_idxs] - vals) <= tol
]
keep_idxs = scipy.unique(keep_idxs)
new_X.extend(self.X[channel_idxs, :][keep_idxs, :])
new_y.extend(self.y[channel_idxs][keep_idxs])
new_err_X.extend(self.err_X[channel_idxs, :][keep_idxs, :])
new_err_y.extend(self.err_y[channel_idxs][keep_idxs])
new_channels.extend(self.channels[channel_idxs, :][keep_idxs, :])
# Raise a warning if there aren't any points to keep:
if new_X:
self.X = scipy.vstack(new_X)
self.y = scipy.asarray(new_y)
self.err_X = scipy.vstack(new_err_X)
self.err_y = scipy.asarray(new_err_y)
self.channels = scipy.vstack(new_channels)
else:
self.X = None
self.y = scipy.array([], dtype=float)
self.err_X = None
self.err_y = scipy.array([], dtype=float)
self.channels = None
warnings.warn("No valid points!", RuntimeWarning)
mask = [p.keep_slices(axis, vals, tol=tol, **kwargs) for p in self.transformed]
self.transformed = self.transformed[scipy.asarray(mask, dtype=bool)]
def average_data(self, axis=0, **kwargs):
"""Computes the average of the profile over the desired axis.
If `X_dim` is already 1, this returns the average of the quantity.
Otherwise, the :py:class:`Profile` is mutated to contain the
desired averaged data. `err_X` and `err_y` are populated with the
standard deviations of the respective quantities. The averaging is
carried out within the groupings defined by the `channels` attribute.
Parameters
----------
axis : int, optional
The index of the dimension to average over. Default is 0.
**kwargs : optional kwargs
All additional kwargs are passed to :py:func:`average_points`.
"""
kwargs['weighted'] = self.weightable and kwargs.get('weighted', False)
# TODO: Add support for custom bins!
if self.X is not None:
reduced_channels = scipy.delete(self.channels, axis, axis=1)
reduced_X = scipy.delete(self.X, axis, axis=1)
reduced_err_X = scipy.delete(self.err_X, axis, axis=1)
channels = unique_rows(reduced_channels)
X = scipy.zeros((len(channels), self.X_dim - 1))
y = scipy.zeros(len(channels))
err_X = scipy.zeros_like(X)
err_y = scipy.zeros_like(y)
for i, chan in zip(range(0, len(channels)), channels):
chan_mask = (
reduced_channels == chan.flatten()
).all(axis=1)
X[i, :], y[i], err_X[i, :], err_y[i], dum = average_points(
reduced_X[chan_mask, :],
self.y[chan_mask],
reduced_err_X[chan_mask, :],
self.err_y[chan_mask],
**kwargs
)
self.X = X
self.y = y
self.err_X = err_X
self.err_y = err_y
self.channels = channels
self.X_dim -= 1
self.X_units.pop(axis)
self.X_labels.pop(axis)
for p in self.transformed:
p.average_data(axis=axis, **kwargs)
def plot_data(self, ax=None, label_axes=True, **kwargs):
"""Plot the data stored in this Profile. Only works for X_dim = 1 or 2.
Parameters
----------
ax : axis instance, optional
Axis to plot the result on. If no axis is passed, one is created.
If the string 'gca' is passed, the current axis (from plt.gca())
is used. If X_dim = 2, the axis must be 3d.
label_axes : bool, optional
If True, the axes will be labelled with strings constructed from
the labels and units set when creating the Profile instance.
Default is True (label axes).
**kwargs : extra plotting arguments, optional
Extra arguments that are passed to errorbar/errorbar3d.
Returns
-------
The axis instance used.
"""
if self.X is not None:
if self.X_dim > 2:
raise ValueError("Plotting is not supported for X_dim > 2!")
if ax is None:
f = plt.figure()
if self.X_dim == 1:
ax = f.add_subplot(1, 1, 1)
elif self.X_dim == 2:
ax = f.add_subplot(111, projection='3d')
elif ax == 'gca':
ax = plt.gca()
if 'label' not in kwargs:
kwargs['label'] = self.y_label
if 'fmt' not in kwargs and 'marker' not in kwargs:
kwargs['fmt'] = 'o'
if self.X_dim == 1:
ax.errorbar(self.X.ravel(), self.y,
yerr=self.err_y, xerr=self.err_X.flatten(),
**kwargs)
if label_axes:
ax.set_xlabel(
"%s [%s]" % (self.X_labels[0], self.X_units[0],) if self.X_units[0]
else self.X_labels[0]
)
ax.set_ylabel(
"%s [%s]" % (self.y_label, self.y_units,) if self.y_units
else self.y_label
)
elif self.X_dim == 2:
errorbar3d(ax, self.X[:, 0], self.X[:, 1], self.y,
xerr=self.err_X[:, 0], yerr=self.err_X[:, 1], zerr=self.err_y,
**kwargs)
if label_axes:
ax.set_xlabel(
"%s [%s]" % (self.X_labels[0], self.X_units[0],) if self.X_units[0]
else self.X_labels[0]
)
ax.set_ylabel(
"%s [%s]" % (self.X_labels[1], self.X_units[1],) if self.X_units[1]
else self.X_labels[1]
)
ax.set_zlabel(
"%s [%s]" % (self.y_label, self.y_units,) if self.y_units
else self.y_label
)
return ax
def remove_points(self, conditional):
"""Remove points where conditional is True.
Note that this does NOT remove anything from the GP -- you either need
to call :py:meth:`create_gp` again or act manually on the :py:attr:`gp`
attribute.
Also note that this does not include any provision for removing points
that represent linearly-transformed quantities -- you will need to
operate directly on :py:attr:`transformed` to remove such points.
Parameters
----------
conditional : array-like of bool, (`M`,)
Array of booleans corresponding to each entry in `y`. Where an
entry is True, that value will be removed.
Returns
-------
X_bad : matrix
Input values of the bad points.
y_bad : array
Bad values.
err_X_bad : array
Uncertainties on the abcissa of the bad values.
err_y_bad : array
Uncertainties on the bad values.
"""
idxs = ~conditional
y_bad = self.y[conditional]
X_bad = self.X[conditional, :]
err_y_bad = self.err_y[conditional]
err_X_bad = self.err_X[conditional, :]
self.y = self.y[idxs]
self.X = self.X[idxs, :]
self.err_y = self.err_y[idxs]
self.err_X = self.err_X[idxs, :]
self.channels = self.channels[idxs, :]
# Cause other methods to fail gracefully if this causes all pointlike
# data to be removed:
if len(self.y) == 0:
self.X = None
self.err_X = None
return (X_bad, y_bad, err_X_bad, err_y_bad)
def remove_outliers(self, thresh=3, check_transformed=False,
force_update=False, mask_only=False, gp_kwargs={}, MAP_kwargs={},
**predict_kwargs):
"""Remove outliers from the Gaussian process.
The Gaussian process is created if it does not already exist. The
chopping of values assumes that any artificial constraints that have
been added to the GP are at the END of the GP's data arrays.
The values removed are returned.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 3 (i.e.,
throw away all 3-sigma points).
check_transformed : bool, optional
Set this flag to check if transformed quantities are outliers.
Default is False (don't check transformed quantities).
force_update : bool, optional
If True, a new Gaussian process will be created even if one already
exists. Set this if you have added data or constraints since you
created the Gaussian process. Default is False (use current Gaussian
process if it exists).
mask_only : bool, optional
Set this flag to return only a mask of the non-transformed points
that are flagged. Default is False (completely remove bad points).
In either case, the bad transformed points will ALWAYS be removed if
`check_transformed` is True.
gp_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`create_gp` if it gets called. Default is {}.
MAP_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`find_gp_MAP_estimate` if it gets called. Default is {}.
**predict_kwargs : optional parameters
All other parameters are passed to the Gaussian process'
:py:meth:`predict` method.
Returns
-------
X_bad : matrix
Input values of the bad points.
y_bad : array
Bad values.
err_X_bad : array
Uncertainties on the abcissa of the bad values.
err_y_bad : array
Uncertainties on the bad values.
transformed_bad : array of :py:class:`Channel`
Transformed points that were removed.
"""
if force_update or self.gp is None:
self.create_gp(**gp_kwargs)
if not remove_kwargs.get('use_MCMC', False):
self.find_gp_MAP_estimate(**MAP_kwargs)
# Handle single points:
mean = self.gp.predict(
self.X,
return_std=False,
**predict_kwargs
)
deltas = scipy.absolute(mean - self.y) / self.err_y
deltas[self.err_y == 0] = 0
bad_idxs = (deltas >= thresh)
if not mask_only:
# Delete offending single points:
X_bad, y_bad, err_X_bad, err_y_bad = self.remove_points(bad_idxs)
# Handle transformed points:
if check_transformed:
bad_transformed = scipy.zeros_like(self.transformed, dtype=Channel)
for k, pt in zip(range(0, len(self.transformed)), self.transformed):
mean = self.gp.predict(
scipy.vstack(pt.X),
return_std=False,
output_transform=scipy.linalg.block_diag(*pt.T),
**predict_kwargs
)
deltas = scipy.absolute(mean - pt.y) / pt.err_y
deltas[pt.err_y == 0] = 0
bad_idxs = (deltas >= thresh)
bad_X, bad_err_X, bad_y, bad_err_y, bad_T = pt.remove_points(bad_idxs)
bad_transformed[k] = Channel(
bad_X, bad_y, err_X=bad_err_X, err_y=bad_err_y, T=bad_T,
y_label=pt.y_label, y_units=pt.y_units
)
# TODO: Need to do something to return/re-merge the removed points!
# TODO: Need to flag points that no longer have contents!
# TODO: Finish this!
# Re-create the GP now that the points have been removed:
# if 'k' not in gp_kwargs:
# gp_kwargs['k'] = self.gp.k
# if 'noise_k' not in gp_kwargs:
# gp_kwargs['noise_k'] = self.gp.noise_k
# if 'diag_factor' not in gp_kwargs:
# gp_kwargs['diag_factor'] = self.gp.diag_factor
# self.create_gp(**gp_kwargs)
# TODO: This will screw up edge constraints!
if check_transformed:
if mask_only:
return (bad_idxs, bad_transformed)
else:
return (X_bad, y_bad, err_X_bad, err_y_bad, bad_transformed)
else:
if mask_only:
return bad_idxs
else:
return (X_bad, y_bad, err_X_bad, err_y_bad)
# TODO: Re-run MAP estimate and see what to put back in!
def remove_extreme_changes(self, thresh=10, logic='and', mask_only=False):
"""Removes points at which there is an extreme change.
Only for univariate data!
This operation is performed by looking for points who differ by more
than `thresh` * `err_y` from each of their neighbors. This operation
will typically only be useful with large values of thresh. This is
useful for eliminating bad channels.
Note that this will NOT update the Gaussian process.
Parameters
----------
thresh : float, optional
The threshold as a multiplier times `err_y`. Default is 10 (i.e.,
throw away all 10-sigma changes).
logic : {'and', 'or'}, optional
Whether the logical operation performed should be an and or an or
when looking at left-hand and right-hand differences. 'and' is more
conservative, but 'or' will help if you have multiple bad channels
in a row. Default is 'and' (point must have a drastic change in both
directions to be rejected).
mask_only : bool, optional
If True, only the boolean mask indicated where the bad points are
will be removed, and it is up to the user to remove them. Default is
False (actually remove the bad points).
"""
if self.X_dim != 1:
raise NotImplementedError("Extreme change removal is not supported "
"for X_dim = %d" % (self.X_dim,))
sort_idx = self.X.ravel().argsort()
y_sort = self.y[sort_idx]
err_y_sort = self.err_y[sort_idx]
forward_diff = y_sort[:-1] - y_sort[1:]
backward_diff = -forward_diff
forward_diff = scipy.absolute(scipy.append(forward_diff, 0) / err_y_sort)
backward_diff = scipy.absolute(scipy.insert(backward_diff, 0, 0) / err_y_sort)
if logic == 'and':
extreme_changes = (forward_diff >= thresh) & (backward_diff >= thresh)
elif logic == 'or':
extreme_changes = (forward_diff >= thresh) | (backward_diff >= thresh)
else:
raise ValueError("Unsupported logic '%s'." % (logic,))
if mask_only:
return extreme_changes[sort_idx.argsort()]
else:
return self.remove_points(extreme_changes[sort_idx.argsort()])
def create_gp(self, k=None, noise_k=None, upper_factor=5, lower_factor=5,
x0_bounds=None, mask=None, k_kwargs={}, **kwargs):
"""Create a Gaussian process to handle the data.
Parameters
----------
k : :py:class:`Kernel` instance, optional
Covariance kernel (from :py:mod:`gptools`) with the appropriate
number of dimensions, or None. If None, a squared exponential kernel
is used. Can also be a string from the following table:
========= ==============================
SE Squared exponential
gibbstanh Gibbs kernel with tanh warping
RQ Rational quadratic
SEsym1d 1d SE with symmetry constraint
========= ==============================
The bounds for each hyperparameter are selected as follows:
============== =============================================
sigma_f [1/lower_factor, upper_factor]*range(y)
l1 [1/lower_factor, upper_factor]*range(X[:, 1])
... And so on for each length scale
============== =============================================
Here, eps is sys.float_info.epsilon. The initial guesses for each
parameter are set to be halfway between the upper and lower bounds.
For the Gibbs kernel, the uniform prior for sigma_f is used, but
gamma priors are used for the remaining hyperparameters. Default is
None (use SE kernel).
noise_k : :py:class:`Kernel` instance, optional
The noise covariance kernel. Default is None (use the default zero
noise kernel, with all noise being specified by `err_y`).
upper_factor : float, optional
Factor by which the range of the data is multiplied for the upper
bounds on both length scales and signal variances. Default is 5,
which seems to work pretty well for C-Mod data.
lower_factor : float, optional
Factor by which the range of the data is divided for the lower
bounds on both length scales and signal variances. Default is 5,
which seems to work pretty well for C-Mod data.
x0_bounds : 2-tuple, optional
Bounds to use on the x0 (transition location) hyperparameter of the
Gibbs covariance function with tanh warping. This is the
hyperparameter that tends to need the most tuning on C-Mod data.
Default is None (use range of X).
mask : array of bool, optional
Boolean mask of values to actually include in the GP. Default is to
include all values.
k_kwargs : dict, optional
All entries are passed as kwargs to the constructor for the kernel
if a kernel instance is not provided.
**kwargs : optional kwargs
All additional kwargs are passed to the constructor of
:py:class:`gptools.GaussianProcess`.
"""
# TODO: Create more powerful way of specifying kernels!
# TODO: Set ranges intelligently when using all transformed data!
# Save some time by only building these arrays once:
# Note that using this form only gets the non-transformed values.
y = self.y
X = self.X
err_y = self.err_y
if mask is not None and X is not None:
y = y[mask]
X = X[mask, :]
err_y = err_y[mask]
if isinstance(k, gptools.Kernel):
# Skip to the end for pure kernel instances, no need to do all the
# testing...
pass
elif k is None or k == 'SE':
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k = gptools.SquaredExponentialKernel(
num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
elif k == 'gibbstanhlegacy':
# This is the old version of gibbstanh, which was found to not work
# quite as well, but is needed to keep the legacy version of
# fit_profile working.
# TODO: This is a very hackish way of supporting transformed data. Fix it!
if self.X_dim != 1:
raise ValueError('Gibbs kernel is only supported for univariate data!')
try:
y_range = y.max() - y.min()
except (TypeError, ValueError):
y_range = 10
sigma_f_bounds = (0, upper_factor * y_range)
try:
X_range = X[:, 0].max() - X[:, 0].min()
except TypeError:
X_range = 1.2
l1_bounds = (0.0, upper_factor * X_range)
l2_bounds = (0.0, l1_bounds[1])
lw_bounds = (l2_bounds[0], l1_bounds[1] / 50.0)
if x0_bounds is None:
x0_bounds = (X[:, 0].min(), X[:, 0].max())
bounds = [sigma_f_bounds, l1_bounds, l2_bounds, lw_bounds, x0_bounds]
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
initial[2] = initial[2] / 2
k = gptools.GibbsKernel1dTanh(
initial_params=initial,
hyperprior=gptools.CoreEdgeJointPrior(bounds),
**k_kwargs
)
elif k == 'gibbstanh':
# TODO: This is a very hackish way of supporting transformed data. Fix it!
if self.X_dim != 1:
raise ValueError('Gibbs kernel is only supported for univariate data!')
try:
y_range = y.max() - y.min()
except (TypeError, ValueError):
y_range = 10
sigma_f_bounds = (0, upper_factor * y_range)
hp = (
gptools.UniformJointPrior([sigma_f_bounds]) *
gptools.GammaJointPriorAlt(
[1.0, 0.5, 0.0, 1.0],
[0.3, 0.25, 0.1, 0.1]
)
)
initial = [sigma_f_bounds[1] / 2.0, 1.0, 0.5, 0.05, 1.0]
# try:
# X_range = X[:, 0].max() - X[:, 0].min()
# except TypeError:
# X_range = 1.2
# l1_bounds = (0.0, upper_factor * X_range)
# l2_bounds = (0.0, l1_bounds[1])
# lw_bounds = (l2_bounds[0], l1_bounds[1] / 50.0)
# if x0_bounds is None:
# x0_bounds = (X[:, 0].min(), X[:, 0].max())
# bounds = [sigma_f_bounds, l1_bounds, l2_bounds, lw_bounds, x0_bounds]
# initial = [(b[1] - b[0]) / 2.0 for b in bounds]
# initial[2] = initial[2] / 2
k = gptools.GibbsKernel1dTanh(
initial_params=initial,
hyperprior=hp,
**k_kwargs
)
elif k == 'gibbsdoubletanh':
if self.X_dim != 1:
raise ValueError('Gibbs kernel is only supported for univariate data!')
y_range = y.max() - y.min()
sigma_f_bounds = (0.0, upper_factor * y_range)
X_range = X[:, 0].max() - X[:, 0].min()
lcore_bounds = (0.0, upper_factor * X_range)
la_bounds = (0.0, lcore_bounds[1] / 50.0)
if x0_bounds is None:
x0_bounds = (X[:, 0].min(), X[:, 0].max())
bounds = [
sigma_f_bounds,
lcore_bounds,
lcore_bounds,
lcore_bounds,
la_bounds,
la_bounds,
x0_bounds,
x0_bounds
]
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k = gptools.GibbsKernel1dDoubleTanh(
initial_params=initial,
hyperprior=gptools.CoreMidEdgeJointPrior(bounds),
**k_kwargs
)
elif k == 'RQ':
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range), (0.0, 1e2)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k = gptools.RationalQuadraticKernel(
num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
# Try to avoid some issues that were coming up during MCMC sampling:
if 'diag_factor' not in kwargs:
kwargs['diag_factor'] = 1e3
elif k == 'SEsym1d':
if self.X_dim != 1:
raise ValueError("Symmetric SE kernel only supported for univariate data!")
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k_base = gptools.SquaredExponentialKernel(
num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
kM1 = gptools.MaskedKernel(k_base, mask=[0], total_dim=1, scale=[1, 1])
kM2 = gptools.MaskedKernel(k_base, mask=[0], total_dim=1, scale=[-1, 1])
k = kM1 + kM2
elif k == 'SEbeta':
# TODO: Add support for k_kwargs on warp steps!
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k_SE = gptools.SquaredExponentialKernel(
num_dim=self.X_dim,
param_bounds=bounds,
initial_params=initial,
**k_kwargs
)
# TODO: Put in hooks to vary the hyperhyperparameters/hyperprior!
lognormal_prior = gptools.LogNormalJointPrior([0, 1], [0.25, 1])
k_SE_beta = gptools.BetaWarpedKernel(k_SE, hyperprior=lognormal_prior)
# TODO: Make this more intelligent!
k = gptools.LinearWarpedKernel(k_SE_beta, -1e-3, 1.5)
elif k == 'matern':
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range), (1.0, 50)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k = gptools.MaternKernel1d(
# num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
elif k == 'matern52':
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k = gptools.Matern52Kernel(
num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
elif k == 'matern52beta':
y_range = y.max() - y.min()
bounds = [(0.0, upper_factor * y_range)]
for i in xrange(0, self.X_dim):
X_range = X[:, i].max() - X[:, i].min()
bounds.append((0.0, upper_factor * X_range))
initial = [(b[1] - b[0]) / 2.0 for b in bounds]
k_M = gptools.Matern52Kernel(
num_dim=self.X_dim,
initial_params=initial,
param_bounds=bounds,
**k_kwargs
)
# TODO: Put in hooks to vary the hyperhyperparameters!
lognormal_prior = gptools.LogNormalJointPrior([0.0, 1.0], [0.25, 1.0])
k_M_beta = gptools.BetaWarpedKernel(k_M, hyperprior=lognormal_prior)
# TODO: Make this more intelligent!
k = gptools.LinearWarpedKernel(k_M_beta, -1e-3, 1.5)
# TODO: I can probably just handle all of the beta-warps at once...
elif isinstance(k, str):
raise NotImplementedError("That kernel specification is not supported!")
self.gp = gptools.GaussianProcess(k, noise_k=noise_k, **kwargs)
if self.X is not None:
self.gp.add_data(X, y, err_y=err_y)
for p in self.transformed:
if len(p.y) > 0:
self.gp.add_data(
scipy.vstack(p.X),
p.y,
err_y=p.err_y,
T=scipy.linalg.block_diag(*p.T)
)
if len(self.transformed) > 0:
self.gp.condense_duplicates()
def find_gp_MAP_estimate(self, force_update=False, gp_kwargs={}, **kwargs):
"""Find the MAP estimate for the hyperparameters of the Profile's Gaussian process.
If this :py:class:`Profile` instance does not already have a Gaussian
process, it will be created. Note that the user is responsible for
manually updating the Gaussian process if more data are added or the
:py:class:`Profile` is otherwise mutated. This can be accomplished
directly using the `force_update` keyword.
Parameters
----------
force_update : bool, optional
If True, a new Gaussian process will be created even if one already
exists. Set this if you have added data or constraints since you
created the Gaussian process. Default is False (use current Gaussian
process if it exists).
gp_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`create_gp` if it gets called. Default is {}.
**kwargs : optional parameters
All other parameters are passed to the Gaussian process'
:py:meth:`optimize_hyperparameters` method.
"""
if force_update or self.gp is None:
self.create_gp(**gp_kwargs)
return self.gp.optimize_hyperparameters(**kwargs)
def plot_gp(self, force_update=False, gp_kwargs={}, MAP_kwargs={}, **kwargs):
"""Plot the current state of the Profile's Gaussian process.
If this :py:class:`Profile` instance does not already have a Gaussian
process, it will be created. Note that the user is responsible for
manually updating the Gaussian process if more data are added or the
:py:class:`Profile` is otherwise mutated. This can be accomplished
directly using the `force_update` keyword.
Parameters
----------
force_update : bool, optional
If True, a new Gaussian process will be created even if one already
exists. Set this if you have added data or constraints since you
created the Gaussian process. Default is False (use current Gaussian
process if it exists).
gp_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`create_gp` if it gets called. Default is {}.
MAP_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`find_gp_MAP_estimate` if it gets called. Default is {}.
**kwargs : optional parameters
All other parameters are passed to the Gaussian process'
:py:meth:`plot` method.
"""
if force_update or self.gp is None:
self.create_gp(**gp_kwargs)
if not kwargs.get('use_MCMC', False):
self.find_gp_MAP_estimate(**MAP_kwargs)
return self.gp.plot(**kwargs)
def smooth(self, X, n=0, force_update=False, plot=False, gp_kwargs={},
MAP_kwargs={}, **kwargs):
"""Evaluate the underlying smooth curve at a given set of points using Gaussian process regression.
If this :py:class:`Profile` instance does not already have a Gaussian
process, it will be created. Note that the user is responsible for
manually updating the Gaussian process if more data are added or the
:py:class:`Profile` is otherwise mutated. This can be accomplished
directly using the `force_update` keyword.
Parameters
----------
X : array-like (`N`, `X_dim`)
Points to evaluate smooth curve at.
n : non-negative int, optional
The order of derivative to evaluate at. Default is 0 (return value).
See the documentation on :py:meth:`gptools.GaussianProcess.predict`.
force_update : bool, optional
If True, a new Gaussian process will be created even if one already
exists. Set this if you have added data or constraints since you
created the Gaussian process. Default is False (use current Gaussian
process if it exists).
plot : bool, optional
If True, :py:meth:`gptools.GaussianProcess.plot` is called to
produce a plot of the smoothed curve. Otherwise,
:py:meth:`gptools.GaussianProcess.predict` is called directly.
gp_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`create_gp` if it gets called. Default is {}.
MAP_kwargs : dict, optional
The entries of this dictionary are passed as kwargs to
:py:meth:`find_gp_MAP_estimate` if it gets called. Default is {}.
**kwargs : optional parameters
All other parameters are passed to the Gaussian process'
:py:meth:`plot` or :py:meth:`predict` method according to the
state of the `plot` keyword.
Returns
-------
ax : axis instance
The axis instance used. This is only returned if the `plot`
keyword is True.
mean : :py:class:`Array`, (`M`,)
Predicted GP mean. Only returned if `full_output` is False.
std : :py:class:`Array`, (`M`,)
Predicted standard deviation, only returned if `return_std` is True and `full_output` is False.
full_output : dict
Dictionary with fields for mean, std, cov and possibly random samples. Only returned if `full_output` is True.
"""
if force_update or self.gp is None:
self.create_gp(**gp_kwargs)
if not kwargs.get('use_MCMC', False):
self.find_gp_MAP_estimate(**MAP_kwargs)
if plot:
kwargs.pop('return_prediction', True)
return self.gp.plot(X=X, n=n, return_prediction=True, **kwargs)
else:
return self.gp.predict(X, n=n, **kwargs)
def write_csv(self, filename):
"""Writes this profile to a CSV file.
Parameters
----------
filename : str
Path of the file to write. If the file exists, it will be
overwritten without warning.
"""
# TODO: Add support for transformed quantities!
# TODO: Add metadata (probably in CMod...)!
# Could put metadata as a kwarg...
# Only build these arrays once to save a bit of time:
# Note that this form does not write any of the transformed quantities!
X = self.X
err_X = self.err_X
y = self.y
err_y = self.err_y
filename = os.path.expanduser(filename)
with open(filename, 'wb') as outfile:
writer = csv.writer(outfile)
X_labels = [l + ' [' + u + ']' for l, u in zip(self.X_labels, self.X_units)]
err_X_labels = ['err_' + l for l in X_labels]
writer.writerow(self.X_labels + err_X_labels +
[self.y_label + ' [' + self.y_units + ']'] +
['err_' + self.y_label])
for k in xrange(0, len(self.y)):
writer.writerow(
[x for x in X[k, :]] + [x for x in err_X[k, :]] + [y[k], err_y[k]]
)
def read_csv(filename, X_names=None, y_name=None, metadata_lines=None):
"""Reads a CSV file into a :py:class:`Profile`.
If names are not provided for the columns holding the `X` and `y` values and
errors, the names are found automatically by looking at the header row, and
are used in the order found, with the last column being `y`. Otherwise, the
columns will be read in the order specified. The column names should be of
the form "name [units]", which will be automatically parsed to populate the
:py:class:`Profile`. In either case, there can be a corresponding column
"err_name [units]" which holds the 1-sigma uncertainty in that quantity.
There can be an arbitrary number of lines of metadata at the beginning of
the file which are read into the :py:attr:`metadata` attribute of the
:py:class:`Profile` created. This is most useful when using
:py:class:`BivariatePlasmaProfile` as you can store the shot and time window.
Parameters
----------
X_names : list of str, optional
Ordered list of the column names containing the independent variables.
The default behavior is to infer the names and ordering from the header
of the CSV file. See the discussion above. Note that if you provide
`X_names` you must also provide `y_name`.
y_name : str, optional
Name of the column containing the dependent variable. The default
behavior is to infer this name from the header of the CSV file. See the
discussion above. Note that if you provide `y_name` you must also
provide `X_names`.
metadata_lines : non-negative int, optional
Number of lines of metadata to read from the beginning of the file.
These are read into the :py:attr:`metadata` attribute of the profile
created.
"""
if X_names and not y_name:
raise ValueError("If supplying an ordered list of names for the X "
"columns, you must also specify the name for the y "
"column.")
if y_name and not X_names:
raise ValueError("If supplying a name for the y column you must also "
"supply an ordered list of names for the X columns.")
filename = os.path.expanduser(filename)
X = []
y = []
err_X = []
err_y = []
metadata = []
with open(filename, 'rb') as infile:
# Capture metadata, if present:
if metadata_lines is None:
first_line = infile.readline()
if first_line.startswith("metadata"):
try:
metadata_lines = int(first_line.split(None, 1)[1])
except ValueError:
metadata_lines = 1
else:
metadata_lines = 0
infile.seek(0)
for k in xrange(0, metadata_lines):
metadata.append(infile.readline())
if not (X_names and y_name):
X_names = infile.readline().split(',')
X_names = [name for name in X_names if not name.startswith('err_')]
y_name = X_names.pop(-1)
infile.seek(0)
# Need to skip the metadata again:
for k in xrange(0, metadata_lines):
infile.readline()
rdr = csv.DictReader(infile)
for row in rdr:
X.append([row[l] for l in X_names])
err_X_row = []
for l in X_names:
try:
err_X_row.append(row['err_' + l])
except KeyError:
err_X_row.append(0)
err_X.append(err_X_row)
y.append(row[y_name])
try:
err_y.append(row['err_' + y_name])
except KeyError:
err_y.append(0)
y_label, y_units = parse_column_name(y_name)
X_labels = []
X_units = []
for X_name in X_names:
n, u = parse_column_name(X_name)
X_labels.append(n)
X_units.append(n)
X_dim = len(X_labels)
if X_dim == 1:
X_labels = X_labels[0]
X_units = X_units[0]
p = Profile(X_dim=X_dim, X_units=X_units, y_units=y_units,
X_labels=X_labels, y_label=y_label)
p.add_data(X, y, err_X=err_X, err_y=err_y)
p.metadata = metadata
return p
def read_NetCDF(filename, X_names, y_name, metadata=[]):
"""Reads a NetCDF file into a :py:class:`Profile`.
The file must contain arrays of equal length for each of the independent and
the dependent variable. The units of each variable can either be specified
as the units attribute on the variable, or the variable name can be of the
form "name [units]", which will be automatically parsed to populate the
:py:class:`Profile`. For each independent and the dependent variable there
can be a corresponding column "err_name" or "err_name [units]" which holds
the 1-sigma uncertainty in that quantity. There can be an arbitrary number
of metadata attributes in the file which are read into the corresponding
attributes of the :py:class:`Profile` created. This is most useful when using
:py:class:`BivariatePlasmaProfile` as you can store the shot and time window.
Be careful that you do not overwrite attributes needed by the class, however!
Parameters
----------
X_names : list of str
Ordered list of the column names containing the independent variables.
See the discussion above regarding name conventions.
y_name : str
Name of the column containing the dependent variable. See the discussion
above regarding name conventions.
metadata : list of str, optional
List of attribute names to read into the corresponding attributes of the
:py:class:`Profile` created.
"""
with scipy.io.netcdf.netcdf_file(os.path.expanduser(filename), mode='r') as infile:
X = []
err_X = []
X_labels = []
X_units = []
for l in X_names:
vXl = infile.variables[l]
X.append(vXl[:])
n, u = parse_column_name(l)
X_labels.append(n)
try:
X_units.append(vXl.units)
except AttributeError:
X_units.append(u)
try:
err_X.append(infile.variables['err_' + l])
except KeyError:
err_X.append(scipy.zeros_like(X[0]))
X = scipy.hstack(X)
err_X = scipy.hstack(err_X)
vy = infile.variables[y_name]
# Explicitly convert, since I've been having strange segfaults here:
y = scipy.array(vy[:])
y_label, u = parse_column_name(y_name)
try:
y_units = vy.units
except AttributeError:
y_units = u
try:
err_y = scipy.array(infile.variables['err_' + y_name][:])
except KeyError:
err_y = 0
X_dim = len(X_labels)
if X_dim == 1:
X_labels = X_labels[0]
X_units = X_units[0]
p = Profile(X_dim=X_dim, X_units=X_units, y_units=y_units,
X_labels=X_labels, y_label=y_label)
p.add_data(X, y, err_X=err_X, err_y=err_y)
for m in metadata:
try:
if hasattr(p, m):
warnings.warn("Profile class already has metadata attribute %s. "
"Existing value is being overwritten. This may "
"lead to undesirable behavior." % (m,),
RuntimeWarning)
setattr(p, m, infile.m)
except AttributeError:
warnings.warn("Could not find metadata attribute %s in NetCDF file %s." %
(m, filename,), RuntimeWarning)
return p
def parse_column_name(name):
"""Parse a column header `name` into label and units.
"""
name_split = re.split(r'^([^ \t]*)[ \t]*\[(.*)\]$', name)
if len(name_split) == 1:
name = name_split[0]
units = ''
else:
assert len(name_split) == 4
name = name_split[1]
units = name_split[2]
return (name, units)
def errorbar3d(ax, x, y, z, xerr=None, yerr=None, zerr=None, **kwargs):
"""Draws errorbar plot of z(x, y) with errorbars on all variables.
Parameters
----------
ax : 3d axis instance
The axis to draw the plot on.
x : array, (`M`,)
x-values of data.
y : array, (`M`,)
y-values of data.
z : array, (`M`,)
z-values of data.
xerr : array, (`M`,), optional
Errors in x-values. Default value is 0.
yerr : array, (`M`,), optional
Errors in y-values. Default value is 0.
zerr : array, (`M`,), optional
Errors in z-values. Default value is 0.
**kwargs : optional
Extra arguments are passed to the plot command used to draw the
datapoints.
"""
fmt = kwargs.pop('fmt', kwargs.pop('marker', 'o'))
if xerr is None:
no_x = True
xerr = scipy.zeros_like(x)
else:
no_x = False
if yerr is None:
no_y = True
yerr = scipy.zeros_like(y)
else:
no_y = False
if zerr is None:
no_z = True
zerr = scipy.zeros_like(z)
else:
no_z = False
pts = ax.plot(x, y, z, fmt, **kwargs)
color = plt.getp(pts[0], 'color')
# Only draw the lines if the error is nonzero:
for X, Y, Z, Xerr, Yerr, Zerr in zip(x, y, z, xerr, yerr, zerr):
if not no_x:
ax.plot([X - Xerr, X + Xerr], [Y, Y], [Z, Z], color=color, marker='_')
if not no_y:
ax.plot([X, X], [Y - Yerr, Y + Yerr], [Z, Z], color=color, marker='_')
if not no_z:
ax.plot([X, X], [Y, Y], [Z - Zerr, Z + Zerr], color=color, marker='_')
def unique_rows(arr):
"""Returns a copy of arr with duplicate rows removed.
From Stackoverflow "Find unique rows in numpy.array."
Parameters
----------
arr : :py:class:`Array`, (`m`, `n`). The array to find the unique rows of.
Returns
-------
unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m`
The array `arr` with duplicate rows removed.
"""
b = scipy.ascontiguousarray(arr).view(
scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1]))
)
try:
dum, idx = scipy.unique(b, return_index=True)
except TypeError:
# Handle bug in numpy 1.6.2:
rows = [_Row(row) for row in b]
srt_idx = sorted(range(len(rows)), key=rows.__getitem__)
rows = scipy.asarray(rows)[srt_idx]
row_cmp = [-1]
for k in xrange(1, len(srt_idx)):
row_cmp.append(rows[k-1].__cmp__(rows[k]))
row_cmp = scipy.asarray(row_cmp)
transition_idxs = scipy.where(row_cmp != 0)[0]
idx = scipy.asarray(srt_idx)[transition_idxs]
return arr[idx]
def get_nearest_idx(v, a):
"""Returns the array of indices of the nearest value in `a` corresponding to each value in `v`.
Parameters
----------
v : Array
Input values to match to nearest neighbors in `a`.
a : Array
Given values to match against.
Returns
-------
Indices in `a` of the nearest values to each value in `v`. Has the same shape as `v`.
"""
# Gracefully handle single-value versus array inputs, returning in the
# corresponding type.
try:
return scipy.array([(scipy.absolute(a - val)).argmin() for val in v])
except TypeError:
return (scipy.absolute(a - v)).argmin()
class RejectionFunc(object):
"""Rejection function for use with `full_MC` mode of :py:func:`GaussianProcess.predict`.
Parameters
----------
mask : array of bool
Mask for the values to include in the test.
positivity : bool, optional
Set this to True to impose a positivity constraint on the sample.
Default is True.
monotonicity : bool, optional
Set this to True to impose a positivity constraint on the samples.
Default is True.
"""
def __init__(self, mask, positivity=True, monotonicity=True):
self.mask = mask
self.positivity = positivity
self.monotonicity = monotonicity
def __call__(self, samp):
"""Returns True if the sample meets the constraints, False otherwise.
"""
k = len(self.mask)
if ((self.positivity and (samp[:k][self.mask].min() < 0)) or
(self.monotonicity and (samp[k:2*k][self.mask].max() > 0))):
return False
else:
return True
def leading_axis_product(w, x):
"""Perform a product along the leading axis, as is needed when applying weights.
"""
return scipy.einsum('i...,i...->i...', w, x)
def meanw(x, weights=None, axis=None):
r"""Weighted mean of data.
Defined as
.. math::
\mu = \frac{\sum_i w_i x_i}{\sum_i w_i}
Parameters
----------
x : array-like
The vector to find the mean of.
weights : array-like, optional
The weights. Must be broadcastable with `x`. Default is to use the
unweighted mean.
axis : int, optional
The axis to take the mean along. Default is to use the whole data set.
"""
if weights is None:
return scipy.mean(x, axis=axis)
else:
x = scipy.asarray(x)
weights = scipy.asarray(weights)
return leading_axis_product(weights, x).sum(axis=axis) / weights.sum(axis=axis)
def varw(x, weights=None, axis=None, ddof=1, mean=None):
r"""Weighted variance of data.
Defined (for `ddof` = 1) as
.. math::
s^2 = \frac{\sum_i w_i}{(\sum_i w_i)^2 - \sum_i w_i^2}\sum_i w_i (x_i - \mu)^2
Parameters
----------
x : array-like
The vector to find the mean of.
weights : array-like, optional
The weights. Must be broadcastable with `x`. Default is to use the
unweighted mean.
axis : int, optional
The axis to take the mean along. Default is to use the whole data set.
ddof : int, optional
The degree of freedom correction to use. If no weights are given, this
is the standard Bessel correction. If weights are given, this uses an
approximate form based on the assumption that the weights are inverse
variances for each data point. In this case, the value has no effect
other than being True or False. Default is 1 (apply correction assuming
normal noise dictated weights).
mean : array-like, optional
The weighted mean to use. If you have already computed the weighted mean
with :py:func:`meanw`, you can pass the result in here to save time.
"""
if weights is None:
return scipy.var(x, axis=axis, ddof=1)
else:
x = scipy.asarray(x)
weights = scipy.asarray(weights)
if mean is None:
mean = meanw(x, weights=weights, axis=axis)
else:
mean = scipy.asarray(mean)
V1 = weights.sum(axis=axis)
M = leading_axis_product(weights, (x - mean)**2).sum(axis=axis)
if ddof:
res = V1 / (V1**2 - (weights**2).sum(axis=axis)) * M
# Put nan where the result blows up to be consistent with scipy:
try:
res[scipy.isinf(res)] = scipy.nan
except TypeError:
if scipy.isinf(res):
res = scipy.nan
return res
else:
return M / V1
def stdw(*args, **kwargs):
r"""Weighted standard deviation of data.
Defined (for `ddof` = 1) as
.. math::
s = \sqrt{\frac{\sum_i w_i}{(\sum_i w_i)^2 - \sum_i w_i^2}\sum_i w_i (x_i - \mu)^2}
Parameters
----------
x : array-like
The vector to find the mean of.
weights : array-like, optional
The weights. Must be broadcastable with `x`. Default is to use the
unweighted mean.
axis : int, optional
The axis to take the mean along. Default is to use the whole data set.
ddof : int, optional
The degree of freedom correction to use. If no weights are given, this
is the standard Bessel correction. If weights are given, this uses an
approximate form based on the assumption that the weights are inverse
variances for each data point. In this case, the value has no effect
other than being True or False. Default is 1 (apply correction assuming
normal noise dictated weights).
mean : array-like, optional
The weighted mean to use. If you have already computed the weighted mean
with :py:func:`meanw`, you can pass the result in here to save time.
"""
return scipy.sqrt(varw(*args, **kwargs))
# Conversion factor to get from interquartile range to standard deviation:
IQR_TO_STD = 2.0 * scipy.stats.norm.isf(0.25)
def robust_std(y, axis=None):
r"""Computes the robust standard deviation of the given data.
This is defined as :math:`IQR/(2\Phi^{-1}(0.75))`, where :math:`IQR` is the
interquartile range and :math:`\Phi` is the inverse CDF of the standard
normal. This is an approximation based on the assumption that the data are
Gaussian, and will have the effect of diminishing the effect of outliers.
Parameters
----------
y : array-like
The data to find the robust standard deviation of.
axis : int, optional
The axis to find the standard deviation along. Default is None (find
from whole data set).
"""
return (scipy.stats.scoreatpercentile(y, 75.0, axis=axis) -
scipy.stats.scoreatpercentile(y, 25.0, axis=axis)) / IQR_TO_STD
def scoreatpercentilew(x, p, weights):
"""Computes the weighted score at the given percentile.
Does not work on small data sets!
Parameters
----------
x : array
Array of data to apply to. Only works properly on 1d data!
p : float or array of float
Percentile(s) to find.
weights : array, same shape as `x`
The weights to apply to the values in `x`.
"""
# TODO: Vectorize this!
x = scipy.asarray(x)
weights = scipy.asarray(weights)
srt = x.argsort()
x = x[srt]
w = weights[srt]
Sn = w.cumsum()
pn = 100.0 / Sn[-1] * (Sn - w / 2.0)
k = scipy.digitize(scipy.atleast_1d(p), pn) - 1
return x[k] + (p - pn[k]) / (pn[k + 1] - pn[k]) * (x[k + 1] - x[k])
# TODO: This returns an array for a scalar input!
def medianw(x, weights=None, axis=None):
"""Computes the weighted median of the given data.
Does not work on small data sets!
Parameters
----------
x : array
Array of data to apply to. Only works properly on 1d, 2d and 3d data.
weights : array, optional
Weights to apply to the values in `x`. Default is to use an unweighted
estimator.
axis : int, optional
The axis to take the median along. Default is None (apply to flattened
array).
"""
# TODO: This could be done a whole lot better!
if weights is None:
return scipy.median(x, axis=axis)
else:
if axis is None and x.ndim == 1:
return scoreatpercentilew(x, 50, weights)[0]
elif axis == 0 and x.ndim == 3:
out = scipy.zeros_like(x[0])
for i in xrange(0, out.shape[0]):
for j in xrange(0, out.shape[1]):
out[i, j] = scoreatpercentilew(x[:, i, j], 50, weights)
return out
elif axis == 0 and x.ndim == 2:
out = scipy.zeros(x.shape[1])
for i in xrange(0, len(out)):
out[i] = scoreatpercentilew(x[:, i], 50, weights)
return out
else:
raise NotImplementedError("That shape/axis is not supported!")
def robust_stdw(x, weights=None, axis=None):
"""Computes the weighted robust standard deviation from the weighted IQR.
Does not work on small data sets!
Parameters
----------
x : array
Array of data to apply to. Only works properly on 1d, 2d and 3d data.
weights : array, optional
Weights to apply to the values in `x`. Default is to use an unweighted
estimator.
axis : int, optional
The axis to take the robust standard deviation along. Default is None
(apply to flattened array).
"""
# TODO: This could be done a whole lot better!
if weights is None:
return robust_std(x, axis=axis)
else:
if axis is None and x.ndim == 1:
lq, uq = scoreatpercentilew(x, [25, 75], weights)
return (uq - lq) / IQR_TO_STD
elif axis == 0 and x.ndim == 3:
lq = scipy.zeros_like(x[0])
uq = scipy.zeros_like(x[0])
for i in xrange(0, lq.shape[0]):
for j in xrange(0, lq.shape[1]):
lqij, uqij = scoreatpercentilew(x[:, i, j], [25, 75], weights)
lq[i, j] = lqij
uq[i, j] = uqij
return (uq - lq) / IQR_TO_STD
elif axis == 0 and x.ndim == 2:
lq = scipy.atleast_1d(scipy.zeros(x.shape[1]))
uq = scipy.atleast_1d(scipy.zeros(x.shape[1]))
for i in xrange(0, len(lq)):
lqi, uqi = scoreatpercentilew(x[:, i], [25, 75], weights)
lq[i] = lqi
uq[i] = uqi
return (uq - lq) / IQR_TO_STD
else:
raise NotImplementedError("That shape/axis is not supported!")
|
markchil/profiletools
|
profiletools/core.py
|
Python
|
gpl-3.0
| 88,014
|
[
"Gaussian",
"NetCDF"
] |
d4793fb80bfcc4dc7a1220604003012e28705581964d6f579a6ca3539bb050b8
|
#!/usr/bin/env python
#
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
depsOK = True
try:
import numpy
except ImportError:
depsOK = False
print "NumPy should be installed first from suitable binaries."
print "See http://numpy.scipy.org/"
try:
import scipy
except ImportError:
depsOK = False
print "SciPy should be installed first from suitable binaries."
print "See http://www.scipy.org/"
try:
import matplotlib
except ImportError:
depsOK = False
print "Matplotlib should be installed first from suitable binaries."
print "See http://matplotlib.sf.net/"
try:
import mayavi
except ImportError:
depsOK = False
print "Mayavi should be installed first from suitable binaries."
print "See http://code.enthought.com/projects/mayavi/"
try:
from setuptools import setup, find_packages
from setuptools.extension import Extension
if depsOK:
setup(
name = "imusim",
version = "0.2",
author = "Alex Young and Martin Ling",
license = "GPLv3",
url = "http://www.imusim.org/",
install_requires = ["simpy==2.2", "pyparsing"],
packages = find_packages(),
include_dirs = [numpy.get_include()],
ext_modules = [
Extension("imusim.maths.quaternions",
['imusim/maths/quaternions.c']),
Extension("imusim.maths.quat_splines",
['imusim/maths/quat_splines.c']),
Extension("imusim.maths.vectors",['imusim/maths/vectors.c']),
Extension("imusim.maths.natural_neighbour",[
'imusim/maths/natural_neighbour/utils.c',
'imusim/maths/natural_neighbour/delaunay.c',
'imusim/maths/natural_neighbour/natural.c',
'imusim/maths/natural_neighbour.c'])]
)
except ImportError:
print "Setuptools must be installed - see http://pypi.python.org/pypi/setuptools"
|
spaghetti-/imusim
|
setup.py
|
Python
|
gpl-3.0
| 2,664
|
[
"Mayavi"
] |
e24250be2b6c3feabeff13374c82a7960ea71a244d4b7a3a30044e39e4381e35
|
"""
Sigma_MonteCarlo
"""
import numpy as np
from scipy import interpolate
import halomodel as hm
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
rc('text', usetex=True)
rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
CosPar={'Omega_M':0.3, 'Omega_L':0.7, 'Omega_b':0.045, 'Omega_nu':1e-5, 'n_degen_nu':3., 'h':0.7, 'sigma_8':0.8, 'ns':0.96}
z = 0.52
lrg = np.genfromtxt('LRG-MgII.txt', dtype=[('R','f'), ('npairs', 'f'), ('W','f'), ('Werr', 'f')])
vdisp = np.genfromtxt('LRG-MgII_vdisp.txt', dtype=[('R','f'), ('npairs', 'f'), ('vdisp','f'), ('vdisp_err', 'f')])
Sigma_2h0 = np.genfromtxt('linear_SigmaR_2h_no_bias_z0.52.dat', dtype=[('R', 'f'), ('Sigma_R', 'f')])
# turn REW to surface density (minimum) in MSun/pc^2
pccm = 3.08567758E18 #parsec to cm
factor = 1.13E20/0.3030/(2803.53**2)
Mmg24 = 24.305*1.67E-27
Msolar = 2.E30
mass_factor = factor*Mmg24/Msolar*pccm*pccm/2/1E3*1E8 #3 is for Mg II 2803; 1E3 is for mA; 1E9 is for display purpose
#fbaryon = 0.167*0.0018
Mhalo_min = 1.E11
Mhalo_max1 = 5.E12
dlogM = 2E-1
Mhalo1 = np.exp(np.arange(np.log(Mhalo_min), np.log(Mhalo_max1), dlogM))
Mhalo_min2 = 1.E14
Mhalo_max2 = 3.E15
dlogM = 2E-1
Mhalo2 = np.exp(np.arange(np.log(Mhalo_min2), np.log(Mhalo_max2), dlogM))
Mhalo_min3 = 5.E12
Mhalo_max = 1.E14
dlogM = 2E-2
Mhalo3 = np.exp(np.arange(np.log(Mhalo_min3), np.log(Mhalo_max), dlogM))
Mhalo = np.concatenate([Mhalo1, Mhalo3, Mhalo2])
#Mhalo = np.exp(np.arange(np.log(Mhalo_min), np.log(Mhalo_max), dlogM))
nMhalo = Mhalo.shape[0]
y = lrg['R']
Sigma_lrg = lrg['W']*mass_factor
Sigma_lrg_err = lrg['Werr']*mass_factor
vdisp_lrg = vdisp['vdisp']
vdisp_lrg_err = vdisp['vdisp_err']
ny = y.shape[0]
for i in np.arange(4):
Sigma_lrg[i] = Sigma_lrg[i]*2/2.
Sigma_lrg_err[i] = Sigma_lrg_err[i]*2./2.
#for i in np.arange(2)-3:
# Sigma_lrg[i] = Sigma_lrg[i]*1.5
# Sigma_lrg_err[i] = Sigma_lrg_err[i]*1.5
#Sigma_lrg_err[14:15] = Sigma_lrg_err[14:15]*30.
#Sigma_lrg_err[13] = Sigma_lrg_err[13]*10.
#Sigma_lrg_err[14] = Sigma_lrg_err[14]*10.
#Sigma_lrg_err[15] = Sigma_lrg_err[15]*10.
# 1-halo Sigma_1h(ny, nMhalo)
Sigma_1h = hm.NFW_project_profile(y, Mhalo, z, CosPar)
# 2-halo Sigma_1h(ny, nMhalo)
bM = hm.bias(Mhalo, z, CosPar)
f = interpolate.interp1d(Sigma_2h0['R'], Sigma_2h0['Sigma_R'])
Sigma_2h = bM*f(y).reshape(ny,1)
A_min = 2E-2
A_max = 2E+2
dlogA = 1E-1
A = np.exp(np.arange(np.log(A_min), np.log(A_max), dlogA))
nA = A.shape[0]
# Sigma_all[ny, nA_1h, nA_2h, nMhalo)
Sigma_all = np.ones((1, 1, nA, 1))*(A.reshape(1, nA, 1, 1)*Sigma_1h.reshape(ny, 1, 1, nMhalo)) + np.ones((1, nA, 1, 1))*(A.reshape(1, 1, nA, 1)*Sigma_2h.reshape(ny, 1, 1, nMhalo))
# Calculate Chi-square Chi2(nA_1h, nA_2h, nMhalo)
Chi2 = np.sum((Sigma_all-Sigma_lrg.reshape(ny, 1, 1, 1))**2/Sigma_lrg_err.reshape(ny, 1, 1, 1)**2, axis=0)
Likelihood = np.exp(-Chi2/2.)
Likelihood = Likelihood/Likelihood.sum()
joint_1h_Mhalo = np.sum(Likelihood, axis=1) # (nA_1h, nMhalo)
joint_2h_Mhalo = np.sum(Likelihood, axis=0) # (nA_2h, nMhalo)
joint_1h_2h = np.sum(Likelihood, axis=2) # (nA_1h, nA_2h)
# Numerical Recipes Page 697, Chapter 15.6
levels_3d = Likelihood.max()*np.array([np.exp(-3.53/2.), np.exp(-8.02/2.), np.exp(-14.2/2.)])
levels_1h_Mhalo = joint_1h_Mhalo.max()*np.array([np.exp(-2.30/2.), np.exp(-6.17/2.), np.exp(-11.8/2.)])
levels_2h_Mhalo = joint_2h_Mhalo.max()*np.array([np.exp(-2.30/2.), np.exp(-6.17/2.), np.exp(-11.8/2.)])
levels_1h_Mhalo_f = joint_1h_Mhalo.max()*np.array([1., np.exp(-2.30/2.), np.exp(-6.17/2.), np.exp(-11.8/2.)])
levels_2h_Mhalo_f = joint_2h_Mhalo.max()*np.array([1., np.exp(-2.30/2.), np.exp(-6.17/2.), np.exp(-11.8/2.)])
levels_1h_2h = joint_1h_2h.max()*np.array([np.exp(-2.30/2.), np.exp(-6.17/2.), np.exp(-11.8/2.)])
# joint_1h_Mhalo
xmax_1h = np.argmax(joint_1h_Mhalo, axis=1)
ymax_1h = np.zeros(nA)
for i in np.arange(nA): ymax_1h[i] = joint_1h_Mhalo[i,xmax_1h[i]]
iAmax_1h = np.argmax(ymax_1h)
iMmax_1h = xmax_1h[iAmax_1h]
Max_likehood_1h = joint_1h_Mhalo[iAmax_1h, iMmax_1h]
# joint_2h_Mhalo
xmax_2h = np.argmax(joint_2h_Mhalo, axis=1)
ymax_2h = np.zeros(nA)
for i in np.arange(nA): ymax_2h[i] = joint_2h_Mhalo[i,xmax_2h[i]]
iAmax_2h = np.argmax(ymax_2h)
iMmax_2h = xmax_2h[iAmax_2h]
Max_likehood_2h = joint_2h_Mhalo[iAmax_2h, iMmax_2h]
print Mhalo[iMmax_1h], Mhalo[iMmax_2h]
print np.argmin(Chi2)/(nA*nMhalo), np.argmin(Chi2)%(nA*nMhalo)/nMhalo, np.argmin(Chi2)%(nA*nMhalo)%nMhalo
# Mpc
R_min = 2E-5
R_max = 9E1
dlogR = 1E-2
RR = np.exp(np.arange(np.log(R_min), np.log(R_max)+dlogR, dlogR))
iAmax_1h_all = np.argmin(Chi2)/(nA*nMhalo)
iAmax_2h_all = np.argmin(Chi2)%(nA*nMhalo)/nMhalo
iMmax_all = np.argmin(Chi2)%(nA*nMhalo)%nMhalo
Amax_1h = A[iAmax_1h_all]
Amax_2h = A[iAmax_2h_all]
Mmax = Mhalo[iMmax_all]
# Get 1sigma errors
minChi2_Mhalo = np.zeros(nMhalo)
minChi2_1h = np.zeros(nA)
minChi2_2h = np.zeros(nA)
for i in np.arange(nMhalo):
minChi2_Mhalo[i] = np.min(Chi2[:,:,i])
for i in np.arange(nA):
minChi2_1h[i] = np.min(Chi2[i,:,:])
minChi2_2h[i] = np.min(Chi2[:,i,:])
#levels_3d = Likelihood.max()*np.array([np.exp(-3.53/2.), np.exp(-8.02/2.), np.exp(-14.2/2.)])
iMmax_all_tmp = np.argmin(minChi2_Mhalo)
iAmax_1h_all_tmp = np.argmin(minChi2_1h)
iAmax_2h_all_tmp = np.argmin(minChi2_2h)
print iMmax_all_tmp, iMmax_all
print iAmax_1h_all_tmp, iAmax_1h_all
print iAmax_2h_all_tmp, iAmax_2h_all
print minChi2_Mhalo[iMmax_all_tmp], minChi2_1h[iAmax_1h_all_tmp], minChi2_2h[iAmax_2h_all_tmp]
minChi2_all = minChi2_Mhalo[iMmax_all_tmp]
iMmax_all_left = np.argmin(np.abs(minChi2_Mhalo[:iMmax_all_tmp]-minChi2_all-1.))
iMmax_all_right = np.argmin(np.abs(minChi2_Mhalo[iMmax_all_tmp:]-minChi2_all-1.))+iMmax_all_tmp
iAmax_1h_all_left = np.argmin(np.abs(minChi2_1h[:iAmax_1h_all_tmp]-minChi2_all-1.))
iAmax_1h_all_right = np.argmin(np.abs(minChi2_1h[iAmax_1h_all_tmp:]-minChi2_all-1.))+iAmax_1h_all_tmp
iAmax_2h_all_left = np.argmin(np.abs(minChi2_2h[:iAmax_2h_all_tmp]-minChi2_all-1.))
iAmax_2h_all_right = np.argmin(np.abs(minChi2_2h[iAmax_2h_all_tmp:]-minChi2_all-1.))+iAmax_2h_all_tmp
print np.log10(Mhalo[iMmax_all_left]), np.log10(Mhalo[iMmax_all_tmp]), np.log10(Mhalo[iMmax_all_right])
print np.log10(A[iAmax_1h_all_left]), np.log10(A[iAmax_1h_all_tmp]), np.log10(A[iAmax_1h_all_right])
print np.log10(A[iAmax_2h_all_left]), np.log10(A[iAmax_2h_all_tmp]), np.log10(A[iAmax_2h_all_right])
print np.log10(Mhalo[iMmax_all_tmp])-np.log10(Mhalo[iMmax_all_left]), np.log10(Mhalo[iMmax_all_right])-np.log10(Mhalo[iMmax_all_tmp])
print np.log10(A[iAmax_1h_all_tmp])-np.log10(A[iAmax_1h_all_left]), np.log10(A[iAmax_1h_all_right])-np.log10(A[iAmax_1h_all_tmp])
print np.log10(A[iAmax_2h_all_tmp])-np.log10(A[iAmax_2h_all_left]), np.log10(A[iAmax_2h_all_right])-np.log10(A[iAmax_2h_all_tmp])
iAmax_1h_12 = np.argmin(Chi2[:,:,0])/(nA)
iAmax_2h_12 = np.argmin(Chi2[:,:,0])%(nA)
Amax_1h_12 = A[iAmax_1h_12]
Amax_2h_12 = A[iAmax_2h_12]
M12 = Mhalo[0]
print iAmax_1h_12, iAmax_2h_12
iAmax_1h_15 = np.argmin(Chi2[:,:,nMhalo-1])/(nA)
iAmax_2h_15 = np.argmin(Chi2[:,:,nMhalo-1])%(nA)
Amax_1h_15 = A[iAmax_1h_15]
Amax_2h_15 = A[iAmax_2h_15]
M15 = Mhalo[nMhalo-1]
print iAmax_1h_15, iAmax_2h_15
bM_12 = bM[0]
Sigma_1h_12 = hm.NFW_project_profile(RR, M12, z, CosPar)
Sigma_2h_12 = bM_12*f(RR).reshape(RR.size,1)
# This is Sigma(R), we also need Sigma(<R)
Sigma_all_12 = (Amax_1h_12*Sigma_1h_12+Amax_2h_12*Sigma_2h_12).reshape(RR.size)
Mtmp=np.log10(2.26E13)
iMtmp = np.argmin(np.abs(Mtmp-np.log10(Mhalo)))
bM_sm6 = bM[iMtmp]
Chi2_joint_1h_2h_sm6 = Chi2[:,:,iMtmp] # (nA_1h, nA_2h)
# joint_1h_2h_sm6
xmax_1h_2h_sm6 = np.argmin(Chi2_joint_1h_2h_sm6, axis=1)
ymax_1h_2h_sm6 = np.zeros(nA)
for i in np.arange(nA): ymax_1h_2h_sm6[i] = Chi2_joint_1h_2h_sm6[i,xmax_1h_2h_sm6[i]]
iAmax_1h_sm6 = np.argmin(ymax_1h_2h_sm6)
iAmax_2h_sm6 = xmax_1h_2h_sm6[iAmax_1h_sm6]
minChi2_1h_sm6 = np.zeros(nA)
minChi2_2h_sm6 = np.zeros(nA)
for i in np.arange(nA):
minChi2_1h_sm6[i] = np.min(Chi2_joint_1h_2h_sm6[i,:])
minChi2_2h_sm6[i] = np.min(Chi2_joint_1h_2h_sm6[:,i])
iAmax_1h_sm6_left = np.argmin(np.abs(minChi2_1h_sm6[:iAmax_1h_sm6]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-6.63))
iAmax_2h_sm6_left = np.argmin(np.abs(minChi2_2h_sm6[:iAmax_2h_sm6]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-6.63))
iAmax_1h_sm6_right = np.argmin(np.abs(minChi2_1h_sm6[iAmax_1h_sm6:]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-6.63))+iAmax_1h_sm6
iAmax_2h_sm6_right = np.argmin(np.abs(minChi2_2h_sm6[iAmax_2h_sm6:]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-6.63))+iAmax_2h_sm6
#iAmax_1h_sm6_left = np.argmin(np.abs(minChi2_1h_sm6[:iAmax_1h_sm6]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-1.))
#iAmax_2h_sm6_left = np.argmin(np.abs(minChi2_2h_sm6[:iAmax_2h_sm6]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-1.))
#iAmax_1h_sm6_right = np.argmin(np.abs(minChi2_1h_sm6[iAmax_1h_sm6:]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-1.))+iAmax_1h_sm6
#iAmax_2h_sm6_right = np.argmin(np.abs(minChi2_2h_sm6[iAmax_2h_sm6:]-Chi2_joint_1h_2h_sm6[iAmax_1h_sm6, iAmax_2h_sm6]-1.))+iAmax_2h_sm6
Sigma_1h_sm6 = (hm.NFW_project_profile(RR, 2.26E13, 0.16, CosPar)).reshape(RR.size)
Sigma_1h_sm6_highz = (hm.NFW_project_profile(RR, 2.26E13, z, CosPar)).reshape(RR.size)
Sigma_2h_sm6 = (bM_sm6*f(RR).reshape(RR.size,1)).reshape(RR.size)
# This is Sigma(R), we also need Sigma(<R)
Sigma_all_sm6 = (Sigma_1h_sm6+Sigma_2h_sm6).reshape(RR.size)
Sigma_all_sm6_highz = (Sigma_1h_sm6_highz+Sigma_2h_sm6).reshape(RR.size)
bM_15 = bM[nMhalo-1]
Sigma_1h_15 = hm.NFW_project_profile(RR, M15, z, CosPar)
Sigma_2h_15 = bM_15*f(RR).reshape(RR.size,1)
# This is Sigma(R), we also need Sigma(<R)
Sigma_all_15 = (Amax_1h_15*Sigma_1h_15+Amax_2h_15*Sigma_2h_15).reshape(RR.size)
bMmax = bM[iMmax_all]
Sigma_1h_max = hm.NFW_project_profile(RR, Mmax, z, CosPar)
Sigma_2h_max = bMmax*f(RR).reshape(RR.size,1)
# This is Sigma(R), we also need Sigma(<R)
Sigma_all_max = (Amax_1h*Sigma_1h_max+Amax_2h*Sigma_2h_max).reshape(RR.size)
Sigma_all_max_1h = (Amax_1h*Sigma_1h_max).reshape(RR.size)
Sigma_all_max_2h = (Amax_2h*Sigma_2h_max).reshape(RR.size)
# velocity dispersion
#vdisp_mu = 3.5
#vdisp_1h_max_dm = np.sqrt(Sigma_1h_max/(Sigma_1h_max+Sigma_2h_max)).reshape(RR.size)*(hm.NFW_project_sigma(RR, Mmax, z, CosPar)).reshape(RR.size)
#vdisp_2h_max = np.sqrt(Sigma_2h_max/(Sigma_1h_max+Sigma_2h_max)).reshape(RR.size)*(hm.NFW_project_2h_sigma(RR, Mmax, z, CosPar)).reshape(RR.size)
#vdisp_all_max = np.sqrt(vdisp_1h_max**2+vdisp_2h_max**2)
#vdisp_1h_max = np.sqrt(Sigma_all_max_1h/Sigma_all_max)*(hm.NFW_project_sigma(RR, Mmax, z, CosPar)).reshape(RR.size)/vdisp_mu
#vdisp_2h_max = np.sqrt(Sigma_all_max_2h/Sigma_all_max)*(hm.NFW_project_2h_sigma(RR, Mmax, z, CosPar)).reshape(RR.size)
#vdisp_all_max = np.sqrt(vdisp_1h_max**2+vdisp_2h_max**2)
# Calculate average Sigma (for sm6):
total_mass_sm6 = np.zeros(RR.size)
total_mass_1h_sm6 = np.zeros(RR.size)
total_mass_2h_sm6 = np.zeros(RR.size)
total_mass_sm6[0] = Sigma_all_sm6[0]*np.pi*R_min*R_min*1.0
total_mass_1h_sm6[0] = Sigma_1h_sm6[0]*np.pi*R_min*R_min*1.0
total_mass_2h_sm6[0] = Sigma_2h_sm6[0]*np.pi*R_min*R_min*1.0
# Trapezoidal integration requires small stepsize
# Simpson's integration is twice as accurate with the same stepsize
for i in np.arange(RR.size-1)+1:
total_mass_sm6[i] = total_mass_sm6[i-1]+(Sigma_all_sm6[i]*RR[i]+Sigma_all_sm6[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
total_mass_1h_sm6[i] = total_mass_1h_sm6[i-1]+(Sigma_1h_sm6[i]*RR[i]+Sigma_1h_sm6[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
total_mass_2h_sm6[i] = total_mass_2h_sm6[i-1]+(Sigma_2h_sm6[i]*RR[i]+Sigma_2h_sm6[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
# Needs testing
Sigma_average_density_sm6 = total_mass_sm6/np.pi/RR**2
Sigma_average_density_1h_sm6= total_mass_1h_sm6/np.pi/RR**2
Sigma_average_density_2h_sm6 = total_mass_2h_sm6/np.pi/RR**2
#intepolation
ff_sm6 = interpolate.interp1d(RR, Sigma_average_density_sm6)
ff_1h_sm6 = interpolate.interp1d(RR, Sigma_average_density_1h_sm6)
ff_2h_sm6 = interpolate.interp1d(RR, Sigma_average_density_2h_sm6)
Sigma_average_density_sm6_lrg = ff_sm6(y)
Sigma_average_density_sm6_lrg_1h = ff_1h_sm6(y)
Sigma_average_density_sm6_lrg_2h = ff_2h_sm6(y)
gg_sm6 = interpolate.interp1d(RR, Sigma_all_sm6)
gg_1h_sm6 = interpolate.interp1d(RR, Sigma_1h_sm6)
gg_2h_sm6 = interpolate.interp1d(RR, Sigma_2h_sm6)
Sigma_all_sm6_lrg = gg_sm6(y)
Sigma_all_sm6_lrg_1h = gg_1h_sm6(y)
Sigma_all_sm6_lrg_2h = gg_2h_sm6(y)
DSigma_all_sm6 = (Sigma_average_density_sm6 - Sigma_all_sm6)
DSigma_all_sm6_1h = (Sigma_average_density_1h_sm6 - Sigma_1h_sm6)
DSigma_all_sm6_2h = (Sigma_average_density_2h_sm6 - Sigma_2h_sm6)
DSigma_all_sm6_lrg = (Sigma_average_density_sm6_lrg - Sigma_lrg)
DSigma_all_sm6_lrg_err = Sigma_lrg_err
# Calculate average Sigma:
total_mass = np.zeros(RR.size)
total_mass_1h = np.zeros(RR.size)
total_mass_2h = np.zeros(RR.size)
total_mass[0] = Sigma_all_max[0]*np.pi*R_min*R_min*1.0
total_mass_1h[0] = Sigma_all_max_1h[0]*np.pi*R_min*R_min*1.0
total_mass_2h[0] = Sigma_all_max_2h[0]*np.pi*R_min*R_min*1.0
# Trapezoidal integration requires small stepsize
# Simpson's integration is twice as accurate with the same stepsize
for i in np.arange(RR.size-1)+1:
total_mass[i] = total_mass[i-1]+(Sigma_all_max[i]*RR[i]+Sigma_all_max[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
total_mass_1h[i] = total_mass_1h[i-1]+(Sigma_all_max_1h[i]*RR[i]+Sigma_all_max_1h[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
total_mass_2h[i] = total_mass_2h[i-1]+(Sigma_all_max_2h[i]*RR[i]+Sigma_all_max_2h[i-1]*RR[i-1])*(RR[i]-RR[i-1])*np.pi
# Needs testing
Sigma_average_density_max = total_mass/np.pi/RR**2
Sigma_average_density_max_1h = total_mass_1h/np.pi/RR**2
Sigma_average_density_max_2h = total_mass_2h/np.pi/RR**2
#intepolation
ff = interpolate.interp1d(RR, Sigma_average_density_max)
ff_1h = interpolate.interp1d(RR, Sigma_average_density_max_1h)
ff_2h = interpolate.interp1d(RR, Sigma_average_density_max_2h)
Sigma_average_density_max_lrg = ff(y)
Sigma_average_density_max_lrg_1h = ff_1h(y)
Sigma_average_density_max_lrg_2h = ff_2h(y)
gg = interpolate.interp1d(RR, Sigma_all_max)
gg_1h = interpolate.interp1d(RR, Sigma_all_max_1h)
gg_2h = interpolate.interp1d(RR, Sigma_all_max_2h)
Sigma_all_max_lrg = gg(y)
Sigma_all_max_lrg_1h = gg_1h(y)
Sigma_all_max_lrg_2h = gg_2h(y)
DSigma_all_max = (Sigma_average_density_max - Sigma_all_max)/Amax_2h
DSigma_all_max_1h = (Sigma_average_density_max_1h - Sigma_all_max_1h)/Amax_2h
DSigma_all_max_2h = (Sigma_average_density_max_2h - Sigma_all_max_2h)/Amax_2h
DSigma_all_max_lrg = (Sigma_average_density_max_lrg - Sigma_lrg)/Amax_2h
DSigma_all_max_lrg_err = Sigma_lrg_err/Amax_2h
plt.clf()
#plt.figure(figsize=(10,6))
plt.subplots_adjust(left=0.20, bottom=0.2)
dashes1 = (18,7)
dashes2 = (8,3.1)
plt.loglog(RR, Amax_1h_12*Sigma_1h_12+Amax_2h_12*Sigma_2h_12, 'm--', dashes=dashes1, lw=4)
plt.loglog(RR, Amax_1h_15*Sigma_1h_15+Amax_2h_15*Sigma_2h_15, '--', dashes=dashes2, color='#FF8C00', lw=4)
legend_max = r'$\log_{10}\boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot=14\ (\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_all, iAmax_2h_all, iMmax_all]/14.)+')$'
legend_12 = r'$\log_{10}\boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot=12.0\ (\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_12, iAmax_2h_12, 0]/14.)+')$'
legend_15 = r'$\log_{10}\boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot=15.5\ (\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_15, iAmax_2h_15, nMhalo-1]/14.)+')$'
plt.legend([legend_12, legend_15], handlelength=3.4, frameon=False, loc=3)
plt.loglog(RR, Amax_1h*Sigma_1h_max+Amax_2h*Sigma_2h_max, 'b', lw=3)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
#plt.loglog(RR, Amax_1h_12*Sigma_1h_12+Amax_2h_12*Sigma_2h_12, 'm--', dashes=dashes1, lw=3)
#plt.loglog(RR, Amax_1h_15*Sigma_1h_15+Amax_2h_15*Sigma_2h_15, '--', dashes=dashes2, color='#FF8C00', lw=3)
#plt.errorbar(y[0:4], Sigma_lrg[0:4]/1.2, yerr=Sigma_lrg_err[0:4]/1.2, capsize=5, fmt='bo', mec='b', ms=10, mfc=None)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
plt.xlim(1E-2, 5E1)
plt.ylim(5E-1, 4E3)
plt.loglog([0.015,0.025], [1E0, 1E0],'b', lw=3)
plt.text(1E0, 18E2, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot='+'%4.1f' % np.log10(Mhalo[iMmax_all])+'$', fontsize=16)
plt.text(1E0, 9E2, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_all, iAmax_2h_all, iMmax_all]/14.)+'$', fontsize=16)
plt.show()
plt.savefig('Halomodel_comparison_sat1.eps')
# Read in Mandelbaum
sm6 = np.genfromtxt('mandelbaum/2006/fig1/rebin.sm.all.sm6.highfdev.dat', dtype=[('R','f'), ('DSigma', 'f'), ('DSigma_err','f')])
sm7 = np.genfromtxt('mandelbaum/2006/fig1/rebin.sm.all.sm7.highfdev.dat', dtype=[('R','f'), ('DSigma', 'f'), ('DSigma_err','f')])
sm6_z = 0.16
sm7_z = 0.19
lf6 = np.genfromtxt('mandelbaum/2006/fig2/rebin.lum.all.L6faint.highfdev.dat', dtype=[('R','f'), ('DSigma', 'f'), ('DSigma_err','f')])
lf6_z = 0.2
plt.figure(figsize=(10,7))
plt.clf()
plt.subplots_adjust(left=0.20, bottom=0.2)
show_lrg_err = np.zeros(Sigma_lrg.size)
for i in np.arange(Sigma_lrg.size): show_lrg_err[i] = min(DSigma_all_max_lrg[i]*0.9999, DSigma_all_max_lrg_err[i])
plt.loglog(RR, DSigma_all_max, 'b', lw=3)
plt.loglog(RR, DSigma_all_max_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_max_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.legend([r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=3)#, fontsize=14)
plt.errorbar(y, DSigma_all_max_lrg, yerr=show_lrg_err, capsize=5, fmt='o', color='b', mec='b', ms=10)
#plt.errorbar(sm6['R']/1E3/0.7/(1.+0.1), sm6['DSigma']*0.7*(1.+0.1)**2, sm6['DSigma_err']*0.7*(1.+0.1)**2, capsize=5, fmt='bv', ms=10)
plt.errorbar(sm6['R']/1E3/0.7/(1.+sm6_z), sm6['DSigma']*0.7*(1.+sm6_z)**2., sm6['DSigma_err']*0.7*(1.+sm6_z)**2, capsize=5, mfc='None', ecolor='0.3', mec='0.3', fmt='^', ms=10)
plt.plot([0.32, 0.32], [6.0E2, 6.0E2], 'o', color='b', mec='b', ms=9)
plt.plot([0.32, 0.32], [4.0E2, 4.0E2], '^', mfc='None', mec='0.3', ms=9)
plt.text(0.40, 5.5E2, r"\textbf{Mg II (z$\sim$0.5, Zhu et al. 2013)}", color='b', fontsize=14)
plt.text(0.40, 3.7E2, r"\textbf{Dark Matter (z$\sim$0.1, Mandelbaum et al. 2006)}", color='0.3', fontsize=14)
#plt.text(0.42, 4.8E2, r"\textbf{Mg II at $z\sim0.6$ (Zhu et al. 2013)}", color='b', fontsize=14)
#plt.text(0.42, 2.65E2, r"\textbf{Dark Matter at $z\sim 0.1$ (Mandelbaum et al. 2006)}", color='0.3', fontsize=14)
#plt.text([r"\textbf{Zhu, M\'enard \& SDSS (Mg II)}", r"\textbf{Mandelbaum \& SDSS (Dark Matter)}"], color='b',
#plt.legend([r"\textbf{Zhu et al. (2013, Mg II)}", r"\textbf{Mandelbaum et al. (2006, Dark Matter)}"], frameon=False, loc=1)
plt.loglog(RR, DSigma_all_max, 'b', lw=3)
plt.loglog(RR, DSigma_all_max_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_max_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.legend([r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=3)#, fontsize=14)
#plt.legend([r"\textbf{Zhu et al. (Mg II)}", r"\textbf{Mandelbaum et al. (Dark Matter)}", r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False)#, fontsize=14)
#plt.gca().add_artist(l1)
#plt.errorbar(lf6['R']/1E3/0.7/(1.+lf6_z), lf6['DSigma']*0.7*(1.+lf6_z)**2., lf6['DSigma_err']*0.7*(1.+lf6_z)**2, capsize=5, fmt='c^', ms=10)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\Delta \Sigma}\ (\mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.xlim(1.5E-2, 5E1)
plt.ylim(5E-1, 1E3)
#plt.text(1E-0, 5E2, r"\textbf{Zhu, M\'enard \& SDSS (Mg II)}", color='b', fontsize=12)
#plt.text(1E-0, 5E2, r"\textbf{Zhu \& SDSS (Mg II)}", color='b', fontsize=12)
#plt.text(1E-0, 3.5E2, r"\textbf{Mandelbaum \& SDSS (Dark Matter)}", color='0.5', fontsize=12)
plt.show()
plt.savefig('Darkeverything_sat1.eps')
show_lrg_err = np.zeros(Sigma_lrg.size)
for i in np.arange(Sigma_lrg.size): show_lrg_err[i] = min(DSigma_all_max_lrg[i]*0.9999, DSigma_all_max_lrg_err[i])
plt.figure(figsize=(9,13))
plt.clf()
ax1 = plt.subplot2grid((3,1),(0,0), rowspan=1)
plt.subplots_adjust(left=0.20, bottom=0.2, hspace=0)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
plt.text(1.20, 1.5E3, r"\textbf{Mg II (z$\sim$0.5, Zhu et al. 2013)}", color='b', fontsize=14)
plt.xlim(2E-2, 3E1)
plt.ylim(5E-1, 4E3)
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=24)
plt.title(r"Combining Galaxy-Gas/Mass Correlations (Demo)", fontsize=18)
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot2grid((3,1),(1,0), rowspan=1)
plt.loglog(RR, DSigma_all_sm6, 'b', lw=3)
plt.loglog(RR, DSigma_all_sm6_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_sm6_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.legend([r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=3)
plt.errorbar(sm6['R']/1E3/0.7/(1.+sm6_z), sm6['DSigma']*0.7*(1.+sm6_z)**2., sm6['DSigma_err']*0.7*(1.+sm6_z)**2, capsize=5, mfc='None', ecolor='magenta', mec='magenta', fmt='^', ms=10)
plt.text(0.20, 7.0E2, r"\textbf{Dark Matter (z$\sim$0.2, Mandelbaum et al. 2006)}", color='magenta', fontsize=14)
plt.loglog(RR, DSigma_all_sm6, 'b', lw=3)
plt.loglog(RR, DSigma_all_sm6_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_sm6_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.ylabel(r'$\boldsymbol{\Delta \Sigma}_\mathrm{m}\ (\mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=24)
plt.xlim(2E-2, 3E1)
plt.ylim(1E-1, 2E3)
plt.xscale('log')
plt.yscale('log')
plt.setp(ax2.get_xticklabels(), visible=False)
fSigma = interpolate.interp1d(RR, Sigma_all_sm6_highz)
Sigma_all_mgii = fSigma(y)
ax3 = plt.subplot2grid((3,1),(2,0), rowspan=1)
#plt.axhspan(A[iAmax_1h_sm6_left], A[iAmax_1h_sm6_right], ec='#90EE90', fc='#90EE90', lw=2)
#plt.axhspan(A[iAmax_2h_sm6_left], A[iAmax_2h_sm6_right], ec='#FFDAB9', fc='#FFDAB9', lw=2)
#plt.axhspan(A[iAmax_1h_sm6_left], A[iAmax_2h_sm6_right], ec='#E6E6FA', fc='#E6E6FA', lw=2)
#plt.axhspan(A[iAmax_1h_sm6_left], A[iAmax_1h_sm6_right], ec='green', facecolor='None', alpha=1.0, hatch='/', lw=2)
#plt.axhspan(A[iAmax_2h_sm6_left], A[iAmax_2h_sm6_right], ec='#FF8C00', facecolor='None', alpha=1.0, hatch='\\', lw=2)
#plt.axhspan((A[iAmax_1h_sm6_left]), (A[iAmax_1h_sm6_right]), facecolor='green', alpha=0.5)
#plt.axhspan((A[iAmax_2h_sm6_left]), (A[iAmax_2h_sm6_right]), facecolor='#FF8C00', alpha=0.5)
#plt.axhspan(np.log10(A[iAmax_1h_sm6_left]), np.log10(A[iAmax_1h_sm6_right]), facecolor='green', alpha=0.3)
#plt.axhspan(np.log10(A[iAmax_2h_sm6_left]), np.log10(A[iAmax_2h_sm6_right]), facecolor='#FF8C00', alpha=0.3)
plt.axhspan(2.8, 9.7, facecolor='blue', alpha=0.2)
#plt.legend([r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=2)
#ratio = np.log10(Sigma_lrg/Sigma_all_mgii)
#ratio_err = Sigma_lrg_err/Sigma_lrg/np.log(10.)
ratio = (Sigma_lrg/Sigma_all_mgii)
ratio_err = Sigma_lrg_err/Sigma_all_mgii
plt.errorbar(y, ratio, yerr=ratio_err, capsize=5, fmt='o', color='b', mec='b', ms=10)
plt.text(1.30, 16, r"\textbf{Mg II Gas-to-Mass Ratio}", color='b', fontsize=14)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
#plt.ylabel(r'$\boldsymbol{\Delta \Sigma}\ (\mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
#plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}/10^{-9}$', rotation='vertical', fontsize=24)
plt.ylabel(r'$\boldsymbol{f}_\mathrm{Mg\,II}/10^{-9}$', rotation='vertical', fontsize=24)
plt.xlim(2E-2, 3E1)
#plt.ylim(0.00, 1.79)
#plt.ylim(10**0.20, 10**2.09)
plt.ylim(-10**0.5, 19.)
plt.xscale('log')
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.show()
plt.savefig('Darkeverything_Demo_sat1.pdf')
plt.savefig('Darkeverything_Demo_sat1.eps')
plt.figure(figsize=(10,7))
plt.subplots_adjust(left=0.20, bottom=0.2)
show_lrg_err = np.zeros(Sigma_lrg.size)
for i in np.arange(Sigma_lrg.size): show_lrg_err[i] = min(DSigma_all_max_lrg[i]*0.9999, DSigma_all_max_lrg_err[i])
plt.loglog(RR, DSigma_all_max, 'b', lw=3)
plt.loglog(RR, DSigma_all_max_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_max_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.legend([r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=3)#, fontsize=14)
plt.errorbar(y, DSigma_all_max_lrg, yerr=show_lrg_err, capsize=5, fmt='o', color='b', mec='b', ms=10)
#plt.errorbar(sm6['R']/1E3/0.7/(1.+0.1), sm6['DSigma']*0.7*(1.+0.1)**2, sm6['DSigma_err']*0.7*(1.+0.1)**2, capsize=5, fmt='bv', ms=10)
plt.errorbar(sm6['R']/1E3/0.7/(1.+sm6_z), sm6['DSigma']*0.7*(1.+sm6_z)**2., sm6['DSigma_err']*0.7*(1.+sm6_z)**2, capsize=5, mfc='None', ecolor='0.3', mec='0.3', fmt='^', ms=10)
plt.plot([0.32, 0.32], [6.0E2, 6.0E2], 'o', color='b', mec='b', ms=9)
plt.plot([0.32, 0.32], [4.0E2, 4.0E2], '^', mfc='None', mec='0.3', ms=9)
plt.text(0.40, 5.5E2, r"\textbf{Mg II (z$\sim$0.6, Zhu et al. 2013)}", color='b', fontsize=14)
plt.text(0.40, 3.7E2, r"\textbf{Dark Matter (z$\sim$0.1, Mandelbaum et al. 2006)}", color='0.3', fontsize=14)
#plt.text(0.42, 4.8E2, r"\textbf{Mg II at $z\sim0.6$ (Zhu et al. 2013)}", color='b', fontsize=14)
#plt.text(0.42, 2.65E2, r"\textbf{Dark Matter at $z\sim 0.1$ (Mandelbaum et al. 2006)}", color='0.3', fontsize=14)
#plt.text([r"\textbf{Zhu, M\'enard \& SDSS (Mg II)}", r"\textbf{Mandelbaum \& SDSS (Dark Matter)}"], color='b',
#plt.legend([r"\textbf{Zhu et al. (2013, Mg II)}", r"\textbf{Mandelbaum et al. (2006, Dark Matter)}"], frameon=False, loc=1)
plt.loglog(RR, DSigma_all_max, 'b', lw=3)
plt.loglog(RR, DSigma_all_max_1h, 'g', ls='--', dashes=dashes1, lw=3)
plt.loglog(RR, DSigma_all_max_2h, color='#FF8C00', ls='--', dashes=dashes2, lw=3)
plt.legend([r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False, loc=3)#, fontsize=14)
#plt.legend([r"\textbf{Zhu et al. (Mg II)}", r"\textbf{Mandelbaum et al. (Dark Matter)}", r"\textbf{Halo model}",r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.5, frameon=False)#, fontsize=14)
#plt.gca().add_artist(l1)
#plt.errorbar(lf6['R']/1E3/0.7/(1.+lf6_z), lf6['DSigma']*0.7*(1.+lf6_z)**2., lf6['DSigma_err']*0.7*(1.+lf6_z)**2, capsize=5, fmt='c^', ms=10)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\Delta \Sigma}\ (\mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.xlim(1.5E-2, 5E1)
plt.ylim(5E-1, 1E3)
#plt.text(1E-0, 5E2, r"\textbf{Zhu, M\'enard \& SDSS (Mg II)}", color='b', fontsize=12)
#plt.text(1E-0, 5E2, r"\textbf{Zhu \& SDSS (Mg II)}", color='b', fontsize=12)
#plt.text(1E-0, 3.5E2, r"\textbf{Mandelbaum \& SDSS (Dark Matter)}", color='0.5', fontsize=12)
plt.show()
plt.savefig('Darkeverything_sat1.eps')
plt.figure(figsize=(10,10))
plt.clf()
ax1 = plt.subplot2grid((4,1),(0,0), rowspan=3)
plt.subplots_adjust(left=0.15, bottom=0.15, hspace=0)
plt.loglog(RR, Amax_1h*Sigma_1h_max+Amax_2h*Sigma_2h_max, 'b', lw=3)
plt.loglog(RR, Amax_1h*Sigma_1h_max, 'g', ls='--', dashes=dashes1, lw=3, label=r'\textbf{1-halo term}')
plt.loglog(RR, Amax_2h*Sigma_2h_max, color='#FF8C00', ls='--', dashes=dashes2, lw=3, label=r'\textbf{2-halo term}')
plt.legend([r'\textbf{Halo model}', r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.4, frameon=False, loc=3)#, fontsize=14)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
#plt.errorbar(y[0:4], Sigma_lrg[0:4]/1.2, yerr=Sigma_lrg_err[0:4]/1.2, capsize=5, fmt='bo', mec='b', ms=10, mfc=None)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-8} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
#plt.xlim(1E-2, 4E1)
plt.xlim(2E-2, 3E1)
#plt.ylim(5E-1, 4E3)
plt.ylim(2E-1, 5E2)
#plt.text(3E-2, 3E-1, r'$\frac{\boldsymbol{\chi}^2}{\textbf{dof}}='+'%4.2f' % (Chi2[iAmax_1h_all, iAmax_2h_all, iMmax_all]/14.)+'$', fontsize=22)
plt.text(1.5E0, 19E1, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot='+'%4.1f' % (np.log10(Mhalo[iMmax_all])+0.000)+'$', fontsize=20)
plt.text(1.5E0, 10E1, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_all, iAmax_2h_all, iMmax_all]/14.)+'$', fontsize=20)
#plt.text(1.5E0, 9E2, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_all, iAmax_2h_all, iMmax_all]/14.)+'$', fontsize=20)
plt.title(r"Saturation Effects (line ratio=1)", fontsize=18)
ax2 = plt.subplot2grid((4,1),(3,0), rowspan=1)
#plt.errorbar(y, Sigma_lrg/Sigma_all[:,iAmax_1h_all,iAmax_2h_all,iMmax_all], yerr=Sigma_lrg_err/Sigma_all[:,iAmax_1h_all,iAmax_2h_all,iMmax_all], capsize=5, fmt='bo', mec='b', ms=10)
plt.errorbar(y, (Sigma_lrg-Sigma_all[:,iAmax_1h_all,iAmax_2h_all,iMmax_all])/Sigma_all[:,iAmax_1h_all,iAmax_2h_all,iMmax_all], yerr=Sigma_lrg_err/Sigma_all[:,iAmax_1h_all,iAmax_2h_all,iMmax_all], capsize=5, fmt='bo', mec='b', ms=10)
plt.xscale('log')
plt.plot([1E-2,5E1],[0,0], 'k--', lw=1)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.yticks([-1., 0., 1])
plt.xlim(2E-2, 3E1)
plt.ylim(-1.99E0, 1.99E0)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\delta \Sigma}_\mathrm{Mg\,II}/\boldsymbol{\Sigma}_\mathrm{Mg\,II}^\mathrm{model}$', rotation='vertical', fontsize=26)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.show()
plt.savefig('Halomodel_residuals_sat1.eps')
plt.figure(figsize=(20,10))
plt.clf()
ax11 = plt.subplot2grid((4,2),(0,0), rowspan=3)
plt.loglog(RR, Amax_1h_12*Sigma_1h_12+Amax_2h_12*Sigma_2h_12, color='r', lw=4)
plt.loglog(RR, Amax_1h_12*Sigma_1h_12, color='m', ls='--', dashes=dashes1, lw=4, label=r'\textbf{1-halo term}')
plt.loglog(RR, Amax_2h_12*Sigma_2h_12, color='m', ls='--', dashes=dashes2, lw=4, label=r'\textbf{2-halo term}')
plt.legend([r'\textbf{Halo model}', r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.4, frameon=False, loc=3)#, fontsize=14)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
plt.xlim(2E-2, 3E1)
plt.ylim(5E-1, 4E3)
plt.text(0.8E0, 17E2, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot='+'%4.1f' % np.log10(Mhalo[0])+'$', fontsize=20)
plt.text(0.8E0, 9E2, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_12, iAmax_2h_12, 0]/14.)+'$', fontsize=20)
plt.setp(ax11.get_xticklabels(), visible=False)
ax22 = plt.subplot2grid((4,2),(3,0), rowspan=1)
plt.errorbar(y, (Sigma_lrg-Sigma_all[:,iAmax_1h_12,iAmax_2h_12,0])/Sigma_all[:,iAmax_1h_12,iAmax_2h_12,0], yerr=Sigma_lrg_err/Sigma_all[:,iAmax_1h_12,iAmax_2h_12,0], capsize=5, fmt='bo', mec='b', ms=10)
plt.xscale('log')
plt.plot([1E-2,5E1],[0,0], 'k--', lw=1)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.yticks([-1., 0., 1])
plt.xlim(2E-2, 3E1)
plt.ylim(-1.99E0, 1.99E0)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\delta \Sigma}_\mathrm{Mg\,II}/\boldsymbol{\Sigma}_\mathrm{Mg\,II}^\mathrm{model}$', rotation='vertical', fontsize=26)
ax33 = plt.subplot2grid((4,2),(0,1), rowspan=3)
plt.loglog(RR, Amax_1h_15*Sigma_1h_15+Amax_2h_15*Sigma_2h_15, color='#D2691E', lw=4)
plt.loglog(RR, Amax_1h_15*Sigma_1h_15, color='#FF8C00', ls='--', dashes=dashes1, lw=4, label=r'\textbf{1-halo term}')
plt.loglog(RR, Amax_2h_15*Sigma_2h_15, color='#FF8C00', ls='--', dashes=dashes2, lw=4, label=r'\textbf{2-halo term}')
plt.legend([r'\textbf{Halo model}', r'\textbf{1-halo term}', r'\textbf{2-halo term}'], handlelength=3.4, frameon=False, loc=3)#, fontsize=14)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
plt.xlim(2E-2, 3E1)
plt.ylim(5E-1, 4E3)
plt.text(0.8E0, 17E2, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot='+'%4.1f' % np.log10(Mhalo[nMhalo-1])+'$', fontsize=20)
plt.text(0.8E0, 9E2, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_15, iAmax_2h_15, nMhalo-1]/14.)+'$', fontsize=20)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
ax33.yaxis.tick_right()
ax33.yaxis.set_label_position("right")
plt.setp(ax33.get_xticklabels(), visible=False)
#plt.setp(ax33.get_yticklabels(), visible=False)
ax44 = plt.subplot2grid((4,2),(3,1), rowspan=1)
plt.errorbar(y, (Sigma_lrg-Sigma_all[:,iAmax_1h_15,iAmax_2h_15,nMhalo-1])/Sigma_all[:,iAmax_1h_15,iAmax_2h_15,nMhalo-1], yerr=Sigma_lrg_err/Sigma_all[:,iAmax_1h_15,iAmax_2h_15,nMhalo-1], capsize=5, fmt='bo', mec='b', ms=10)
plt.xscale('log')
plt.plot([1E-2,5E1],[0,0], 'k--', lw=1)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.yticks([-1., 0., 1])
plt.xlim(2E-2, 3E1)
plt.ylim(-1.99E0, 1.99E0)
ax44.yaxis.tick_right()
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\delta \Sigma}_\mathrm{Mg\,II}/\boldsymbol{\Sigma}_\mathrm{Mg\,II}^\mathrm{model}$', rotation='vertical', fontsize=26)
ax44.yaxis.set_label_position("right")
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.85, hspace=0, wspace=0)
plt.show()
plt.savefig('Halomodel_comparison_residuals_extremes_sat1.eps')
plt.figure(figsize=(9,6))
plt.clf()
plt.subplots_adjust(left=0.20, bottom=0.15, hspace=0)
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_2h_Mhalo[0:nA, 0:nMhalo], levels_2h_Mhalo, colors=('#FF8C00','#FF8C00','#FF8C00'))
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_1h_Mhalo[0:nA, 0:nMhalo], levels_1h_Mhalo, colors=('g', 'g', 'g'))
#plt.text(12, -0.5, r'$\boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{1h}=\Sigma_mathrm{Mg\,II}^\mathrm{1h}/\Sigma_mathrm{m}^\mathrm{1h} (1-halo gas-to-mass ratio)$', color='b', fontsize=20)
#plt.text(12, -0.8, r'$\boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{2h}=\Sigma_mathrm{Mg\,II}^\mathrm{2h}/\Sigma_mathrm{m}^\mathrm{2h} (2-halo gas-to-mass ratio)$', color='b', fontsize=20)
plt.text(13.8, 1.70, r'green: 1-halo gas-to-mass ratio', color='g', fontsize=16)
plt.text(13.8, 1.53, r'orange: 2-halo gas-to-mass ratio', color='#FF8C00', fontsize=16)
plt.xlim(np.log10(Mhalo_min), 16.5)
plt.ylim(-1.2, 1.99)
plt.xlabel(r'$\mathbf{\log}_{10}\ \mathbf{M}_\mathrm{halo}/\mathbf{M}_\mathrm{\odot}$', fontsize=24)
plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{1h, 2h}/10^{-9}$', rotation='vertical', fontsize=30)
plt.plot(np.log10(Mhalo[iMmax_1h]), np.log10(A[iAmax_1h]), 'o', color='green', markersize=15)
plt.plot(np.log10(Mhalo[iMmax_2h]), np.log10(A[iAmax_2h]), 'o', color='#FF8C00', markersize=15)
plt.savefig('Chi2_1h_2h_onepanel_sat1.eps')
plt.figure(figsize=(9,6))
plt.clf()
plt.subplots_adjust(left=0.20, bottom=0.15, hspace=0)
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_2h_Mhalo[0:nA, 0:nMhalo], levels_2h_Mhalo, colors=('#FF8C00','#FF8C00','#FF8C00'))
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_1h_Mhalo[0:nA, 0:nMhalo], levels_1h_Mhalo, colors=('g', 'g', 'g'))
#plt.text(12, -0.5, r'$\boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{1h}=\Sigma_mathrm{Mg\,II}^\mathrm{1h}/\Sigma_mathrm{m}^\mathrm{1h} (1-halo gas-to-mass ratio)$', color='b', fontsize=20)
#plt.text(12, -0.8, r'$\boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{2h}=\Sigma_mathrm{Mg\,II}^\mathrm{2h}/\Sigma_mathrm{m}^\mathrm{2h} (2-halo gas-to-mass ratio)$', color='b', fontsize=20)
plt.xlim(10.9, 16.0)
plt.ylim(-1.69, 1.5)
plt.xlabel(r'$\mathbf{\log}_{10}\ \mathbf{M}_\mathrm{halo}/\mathbf{M}_\mathrm{\odot}$', fontsize=24)
#plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{1h, 2h}/10^{-8}$', rotation='vertical', fontsize=30)
plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}/10^{-8}$', rotation='vertical', fontsize=30)
plt.plot(np.log10(Mhalo[iMmax_1h]), np.log10(A[iAmax_1h]), 'o', color='green', markersize=8)
plt.plot(np.log10(Mhalo[iMmax_2h]), np.log10(A[iAmax_2h]), 'o', color='#FF8C00', markersize=8)
plt.axhspan(np.log10(0.6), np.log10(1.26), facecolor='blue', alpha=0.2)
plt.axvspan(13.25, 13.45, facecolor='0.8', alpha=0.3)
#plt.axhspan(np.log10(A[iAmax_1h_sm6_left]), np.log10(A[iAmax_1h_sm6_right]), facecolor='green', alpha=0.3)
#plt.title(r"Combining Galaxy-Gas/Mass Correlations", fontsize=18)
plt.text(13.0, 1.33, r'green: 1-halo gas-to-mass ratio', color='g', fontsize=16)
plt.text(13.0, 1.20, r'orange: 2-halo gas-to-mass ratio', color='#FF8C00', fontsize=16)
plt.text(11.14, -1.35, r'gray band: mass constraint from galaxy-mass correlation', color='0.3', fontsize=13)
plt.text(11.14, -1.5, r'blue band: gas-to-mass ratio constraint from galaxy-gas/mass correlation', color='blue', fontsize=13)
plt.title(r"Saturation Effects (line ratio=1)", fontsize=18)
plt.savefig('Chi2_1h_2h_onepanel_dark_sat1.pdf')
plt.savefig('Chi2_1h_2h_onepanel_dark_sat1.eps')
plt.figure(figsize=(9,12))
plt.clf()
ax3 = plt.subplot2grid((2,1),(0,0), rowspan=1)
plt.axhspan(0.70, 1.20, facecolor='0.9', alpha=0.3)
plt.axvspan(13.25, 13.45, facecolor='0.9', alpha=0.3)
plt.subplots_adjust(left=0.20, bottom=0.15, hspace=0)
csf=plt.contourf(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_1h_Mhalo[0:nA, 0:nMhalo], levels_1h_Mhalo_f[::-1], colors=('white','white','blue'))
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_1h_Mhalo[0:nA, 0:nMhalo], levels_1h_Mhalo, colors=('k', 'k', 'k'))
#plt.xlim(12, 16)
plt.xlim(np.log10(Mhalo_min), 16.5)
#plt.xlim(np.log10(Mhalo_min), np.log10(Mhalo_max))
#plt.ylim(np.log10(A_min), np.log10(A_max))
plt.ylim(-1.2, 1.99)
# plt.xlabel(r'$\mathbf{\log}_{10}\ \mathbf{M}_\mathrm{halo}/\mathbf{M}_\mathrm{\odot}$', fontsize=24)
# plt.ylabel(r'$\bigg(\frac{\mathbf{\Sigma}_\mathrm{Mg\,II}}{\mathbf{\Sigma}_\mathrm{m}}\bigg)^\mathrm{1h}$', rotation='horizontal', fontsize=30)
plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{1h}/10^{-9}$', rotation='vertical', fontsize=30)
strs =[r'$68.3\%$', r'$95.4\%$', r'$99.7\%$']
fmt = {}
for l,s in zip(cs.levels, strs ): fmt[l] = s
#plt.clabel(cs, cs.levels, fmt=fmt, inline=1, fontsize=10)
#plt.plot(np.log10(Mhalo[iMmax_1h]), np.log10(A[iAmax_1h]), 'o', color='white', markersize=12)
plt.plot(np.log10(Mhalo[iMmax_1h]), np.log10(A[iAmax_1h]), 'o', color='yellow', markersize=15)
plt.title(r"Combining Galaxy-Gas/Mass Correlations (Demo)", fontsize=18)
ax4 = plt.subplot2grid((2,1),(1,0), rowspan=1)
plt.axhspan(0.60, 1.10, facecolor='0.9', alpha=0.3)
plt.axvspan(13.25, 13.45, facecolor='0.9', alpha=0.3)
csf=plt.contourf(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_2h_Mhalo[0:nA, 0:nMhalo], levels_2h_Mhalo_f[::-1], colors=('white', 'white', 'blue'))
cs=plt.contour(np.log10(Mhalo[0:nMhalo]), np.log10(A[0:nA]), joint_2h_Mhalo[0:nA, 0:nMhalo], levels_2h_Mhalo, colors=('k','k','k'))
#plt.xlim(12, 16.)
plt.xlim(np.log10(Mhalo_min), 16.5)
#plt.xlim(np.log10(Mhalo_min), np.log10(Mhalo_max))
#plt.ylim(np.log10(A_min), np.log10(A_max))
plt.ylim(-1.2, 1.99)
plt.xlabel(r'$\mathbf{\log}_{10}\ \mathbf{M}_\mathrm{halo}/\mathbf{M}_\mathrm{\odot}$', fontsize=24)
# plt.ylabel(r'$\bigg(\frac{\mathbf{\Sigma}_\mathrm{Mg\,II}}{\mathbf{\Sigma}_\mathrm{m}}\bigg)^\mathrm{2h}$', rotation='horizontal', fontsize=30)
plt.ylabel(r'$\log_{10}\ \boldsymbol{f}_\mathrm{Mg\,II}^\mathrm{2h}/10^{-9}$', rotation='vertical', fontsize=30)
strs =[r'$68.3\%$', r'$95.4\%$', r'$99.7\%$']
fmt = {}
for l,s in zip(cs.levels, strs ): fmt[l] = s
#plt.clabel(cs, cs.levels, fmt=fmt, inline=1, fontsize=10)
plt.plot(np.log10(Mhalo[iMmax_2h]), np.log10(A[iAmax_2h]), 'o', color='yellow', markersize=15)
#plt.plot(np.log10(Mhalo[iMmax_2h]), np.log10(A[iAmax_2h]), 'bx', markersize=15)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.show()
plt.savefig('Chi2_1h_2h_sat1.eps')
#print "velocity dispersion ..."
#plt.figure(figsize=(10,7))
#plt.clf()
#ax = plt.subplot2grid((1,1),(0,0), rowspan=1)
#plt.subplots_adjust(left=0.20, bottom=0.2)
#plt.loglog(RR, vdisp_all_max, 'b', lw=2)
#plt.loglog(RR, vdisp_1h_max, 'g', ls='--', dashes=dashes1, lw=2)
#plt.loglog(RR, vdisp_2h_max, color='#FF8C00', ls='--', dashes=dashes2, lw=2)
#plt.loglog(RR, vdisp_1h_max_dm, 'gray', ls=':', lw=2)
#
#plt.legend([r'\textbf{Halo model}', r'\textbf{1-halo term (gas)}', r'\textbf{2-halo term (gas\&dark matter)}', r'\textbf{1-halo term (dark matter)}'], handlelength=3.4, frameon=False, loc=3)
#plt.errorbar(y, vdisp_lrg, yerr=vdisp_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
#plt.setp(ax.get_yticklabels(), visible=True)
#plt.yticks([20, 50, 100, 200, 400],('20', '50', '100', '200', '400'), fontsize=22)
#plt.ylabel(r'$\boldsymbol{\sigma}_\mathrm{los}\ (\mathrm{km\ s^{-1}})$', rotation='vertical', fontsize=26)
#plt.setp(ax.get_xticklabels(), visible=True)
#plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
#plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=26)
#plt.xlim(1E-2, 3E1)
#plt.ylim(1.0E1, 6.5E2)
#plt.show()
#plt.savefig('Halomodel_vdisp_sat1.eps')
print "Demo running ..."
ndemo = 3
#M_demo = np.arange(8)*0.5+12.0
#M_demo = np.array([11.5, 12.5, 13.5, 14.5, 15.5])
#outcolor=[plt.cm.hsv(0.01), plt.cm.hsv(0.1), plt.cm.hsv(0.7), plt.cm.hsv(0.85), plt.cm.hsv(0.95)]
#dashes = [(8,3), (11,4), (14,5), (17, 6), (20,7)]
#M_demo = np.array([11.5, 13.4, 15.5])
M_demo = np.array([12.0, np.log10(Mmax), 16.0])
outcolor=[plt.cm.hsv(0.01), plt.cm.hsv(0.7), plt.cm.hsv(0.1)]
dashes = [(10,3), (17, 6), (25,9)]
Sigma_1h_demo = np.zeros((RR.size, ndemo))
Sigma_2h_demo = np.zeros((RR.size, ndemo))
Amax_1h_demo = np.zeros(ndemo)
Amax_2h_demo = np.zeros(ndemo)
iAmax_1h_demo = np.zeros(ndemo)
iAmax_2h_demo = np.zeros(ndemo)
iMmax_demo = np.zeros(ndemo)
for i in np.arange(ndemo):
Mtmp = M_demo[i]
iMtmp = np.argmin(np.abs(Mtmp-np.log10(Mhalo)))
iMmax_demo[i] = iMtmp
bMtmp = bM[iMtmp]
Sigma_1h_demo[:,i] = (hm.NFW_project_profile(RR, 10.**Mtmp, z, CosPar)).reshape(RR.size)
Sigma_2h_demo[:,i] = (bMtmp*f(RR).reshape(RR.size,1)).reshape(RR.size)
iAmax_1h_tmp = np.argmin(Chi2[:,:,iMtmp])/(nA)
iAmax_2h_tmp = np.argmin(Chi2[:,:,iMtmp])%(nA)
iAmax_1h_demo[i] = iAmax_1h_tmp
iAmax_2h_demo[i] = iAmax_2h_tmp
Amax_1h_demo[i] = A[iAmax_1h_tmp]
Amax_2h_demo[i] = A[iAmax_2h_tmp]
print iMtmp, iAmax_1h_tmp, iAmax_2h_tmp, nA, nMhalo
plt.figure(figsize=(10,7))
plt.clf()
plt.subplots_adjust(left=0.20, bottom=0.2)
#outcolor=[plt.cm.hsv(0.01), plt.cm.hsv(0.05), plt.cm.hsv(0.1), plt.cm.hsv(0.6), plt.cm.hsv(0.65), plt.cm.hsv(0.75), plt.cm.hsv(0.85), plt.cm.hsv(0.90)]
plt.xlim(1.5E-2, 4E1)
plt.ylim(4E-1, 6E3)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=10)
plt.text(1.3E0, 17E2, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot=$', color='k', fontsize=20)
for i in np.arange(ndemo):
plt.loglog(RR, Amax_2h*(Sigma_1h_demo[:,i]+Sigma_2h_demo[:,i]), color=outcolor[i], lw=3)
ytmp = 18.5E2/(1.8**(ndemo-1-i))
plt.text(1.3E1, ytmp, r'$'+'%4.1f' % (M_demo[i]+0.000) +'$', color=outcolor[i], weight='bold', fontsize=20)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}^\mathrm{model}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
plt.show()
plt.savefig('Halomodel_demo_sat1.eps')
plt.figure(figsize=(9,13))
plt.clf()
ax = plt.subplot2grid((ndemo+3,1),(0,0), rowspan=3)
plt.subplots_adjust(left=0.15, bottom=0.15, hspace=0)
plt.text(1.3E0, 17E2, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot=$', color='k', fontsize=20)
for i in np.arange(ndemo):
plt.loglog(RR, Amax_1h_demo[i]*Sigma_1h_demo[:,i]+Amax_2h_demo[i]*Sigma_2h_demo[:,i], color=outcolor[i], lw=4)
#plt.loglog(RR, Amax_1h_demo[i]*Sigma_1h_demo[:,i]+Amax_2h_demo[i]*Sigma_2h_demo[:,i], '--', color=outcolor[i], dashes=dashes[i], lw=4)
plt.loglog(RR, Amax_1h_demo[i]*Sigma_1h_demo[:,i], '--', color=outcolor[i], dashes=dashes1, lw=1)
plt.loglog(RR, Amax_2h_demo[i]*Sigma_2h_demo[:,i], '--', color=outcolor[i], dashes=dashes2, lw=1)
ytmp = 18.5E2/(1.8**i)
plt.text(1.8E1, ytmp, r'$'+'%4.1f' % (M_demo[i]+0.000) +'$', color=outcolor[i], weight='heavy', fontsize=20)
#plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=24)
plt.ylabel(r'$\boldsymbol{\Sigma}_\mathrm{Mg\,II}\ (10^{-9} \mathrm{M}_\odot\ \mathrm{pc^{-2}})$', rotation='vertical', fontsize=26)
#plt.xlim(2E-2, 3E1)
#plt.ylim(5E-1, 4E3)
plt.xlim(1.5E-2, 5E1)
plt.ylim(5E-1, 4E3)
plt.setp(ax.get_xticklabels(), visible=False)
#plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.errorbar(y, Sigma_lrg, yerr=Sigma_lrg_err, capsize=5, fmt='bo', mec='b', ms=14)
#plt.savefig('Halomodel_demo_bestfit.eps')
#plt.clf()
#plt.figure(figsize=(10,10))
#plt.subplots_adjust(left=0.15, bottom=0.15, hspace=0)
for i in np.arange(ndemo):
ax = plt.subplot2grid((ndemo+3,1),(i+3,0), rowspan=1)
plt.errorbar(y, (Sigma_lrg-Sigma_all[:,iAmax_1h_demo[i],iAmax_2h_demo[i],iMmax_demo[i]])/Sigma_all[:,iAmax_1h_demo[i],iAmax_2h_demo[i],iMmax_demo[i]], yerr=Sigma_lrg_err/Sigma_all[:,iAmax_1h_demo[i],iAmax_2h_demo[i],iMmax_demo[i]], capsize=5, fmt='o', mec=outcolor[i], mfc=outcolor[i], ecolor=outcolor[i], ms=10)
#plt.plot(RR, ((Amax_1h*Sigma_1h_max+Amax_2h*Sigma_2h_max)-(Amax_1h_demo[i]*Sigma_1h_demo[:,i]+Amax_2h_demo[i]*Sigma_2h_demo[:,i])), '--', color=outcolor[i], dashes=dashes[i], lw=4)
plt.xscale('log')
plt.plot([1E-2,5E1],[0,0], 'k--', lw=1)
plt.xlim(1.5E-2, 5E1)
#if i != ndemo-1: plt.ylim(-1.49E0, 1.49E0)
#if i == ndemo-1: plt.ylim(-1.E0, 2.E0)
plt.ylim(-1.01E0, 1.99E0)
plt.setp(ax.get_xticklabels(), visible=False)
#if i == 0: plt.text(2E0, 1.2E0, r'$\log_{10} \boldsymbol{\mathrm{M}}_\mathrm{halo}/\boldsymbol{\mathrm{M}}_\odot='+'%4.1f' % M_demo[i] +'$', color=outcolor[i], weight='bold', fontsize=20)
#if i != 0: plt.text(1.7E1, 1.2E0, r'$'+'%4.1f' % M_demo[i] +'$', color=outcolor[i], weight='bold', fontsize=20)
plt.text(5E0, 1.25E0, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_demo[i], iAmax_2h_demo[i], iMmax_demo[i]]/14.)+'$', color=outcolor[i], fontsize=20)
#if i == 0: plt.text(5E0, 1.0E0, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_demo[i], iAmax_2h_demo[i], iMmax_demo[i]]/14.)+'$', fontsize=20)
#if i != 0: plt.text(1.7E1, -2.0E0, r'$'+'%4.2f' % (Chi2[iAmax_1h_demo[i], iAmax_2h_demo[i], iMmax_demo[i]]/14.)+'$', fontsize=20)
# plt.text(1E1, 1.5E0, r'$\boldsymbol{\chi}^2/{dof}='+'%4.2f' % (Chi2[iAmax_1h_15, iAmax_2h_15, nMhalo-1]/14.)+'$', fontsize=20)
if i==ndemo/2: plt.ylabel(r'$\boldsymbol{\delta \Sigma}_\mathrm{Mg\,II}/\boldsymbol{\Sigma}_\mathrm{Mg\,II}^\mathrm{model}$', rotation='vertical', fontsize=26)
plt.setp(ax.get_xticklabels(), visible=True)
plt.xticks([0.1, 1, 10],('100 kpc', '1 Mpc', '10 Mpc'), fontsize=22)
plt.xlabel(r'$\mathbf{r}_\mathrm{p}$', fontsize=26)
plt.show()
plt.savefig('Halomodel_demo_bestfit_residuals_sat1.eps')
|
guangtunbenzhu/BGT-Cosmology
|
Examples/LRG-MgII/Sigma_MonteCarlo_saturated.py
|
Python
|
mit
| 47,529
|
[
"Galaxy"
] |
f0f4008b90bdff4913fcc3afdf2e81b589411cd727e5ee07c245c7544d55a4ec
|
import copy
import datetime
import functools
import logging
import random
from typing import (
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
cast,
)
from uuid import UUID
from ruamel.yaml.comments import CommentedMap
from schema_salad.exceptions import ValidationException
from schema_salad.sourceline import SourceLine, indent
from . import command_line_tool, context, procgenerator
from .checker import circular_dependency_checker, static_checker
from .context import LoadingContext, RuntimeContext, getdefault
from .errors import WorkflowException
from .load_tool import load_tool
from .loghandler import _logger
from .process import Process, get_overrides, shortname
from .provenance_profile import ProvenanceProfile
from .utils import (
CWLObjectType,
JobsGeneratorType,
OutputCallbackType,
StepType,
aslist,
)
from .workflow_job import WorkflowJob
def default_make_tool(
toolpath_object: CommentedMap, loadingContext: LoadingContext
) -> Process:
if not isinstance(toolpath_object, MutableMapping):
raise WorkflowException("Not a dict: '%s'" % toolpath_object)
if "class" in toolpath_object:
if toolpath_object["class"] == "CommandLineTool":
return command_line_tool.CommandLineTool(toolpath_object, loadingContext)
if toolpath_object["class"] == "ExpressionTool":
return command_line_tool.ExpressionTool(toolpath_object, loadingContext)
if toolpath_object["class"] == "Workflow":
return Workflow(toolpath_object, loadingContext)
if toolpath_object["class"] == "ProcessGenerator":
return procgenerator.ProcessGenerator(toolpath_object, loadingContext)
if toolpath_object["class"] == "Operation":
return command_line_tool.AbstractOperation(toolpath_object, loadingContext)
raise WorkflowException(
"Missing or invalid 'class' field in "
"%s, expecting one of: CommandLineTool, ExpressionTool, Workflow"
% toolpath_object["id"]
)
context.default_make_tool = default_make_tool
class Workflow(Process):
def __init__(
self,
toolpath_object: CommentedMap,
loadingContext: LoadingContext,
) -> None:
"""Initialize this Workflow."""
super().__init__(toolpath_object, loadingContext)
self.provenance_object = None # type: Optional[ProvenanceProfile]
if loadingContext.research_obj is not None:
run_uuid = None # type: Optional[UUID]
is_main = not loadingContext.prov_obj # Not yet set
if is_main:
run_uuid = loadingContext.research_obj.ro_uuid
self.provenance_object = ProvenanceProfile(
loadingContext.research_obj,
full_name=loadingContext.cwl_full_name,
host_provenance=loadingContext.host_provenance,
user_provenance=loadingContext.user_provenance,
orcid=loadingContext.orcid,
run_uuid=run_uuid,
fsaccess=loadingContext.research_obj.fsaccess,
) # inherit RO UUID for main wf run
# TODO: Is Workflow(..) only called when we are the main workflow?
self.parent_wf = self.provenance_object
# FIXME: Won't this overwrite prov_obj for nested workflows?
loadingContext.prov_obj = self.provenance_object
loadingContext = loadingContext.copy()
loadingContext.requirements = self.requirements
loadingContext.hints = self.hints
self.steps = [] # type: List[WorkflowStep]
validation_errors = []
for index, step in enumerate(self.tool.get("steps", [])):
try:
self.steps.append(
self.make_workflow_step(
step, index, loadingContext, loadingContext.prov_obj
)
)
except ValidationException as vexc:
if _logger.isEnabledFor(logging.DEBUG):
_logger.exception("Validation failed at")
validation_errors.append(vexc)
if validation_errors:
raise ValidationException("\n".join(str(v) for v in validation_errors))
random.shuffle(self.steps)
# statically validate data links instead of doing it at runtime.
workflow_inputs = self.tool["inputs"]
workflow_outputs = self.tool["outputs"]
step_inputs = [] # type: List[CWLObjectType]
step_outputs = [] # type: List[CWLObjectType]
param_to_step = {} # type: Dict[str, CWLObjectType]
for step in self.steps:
step_inputs.extend(step.tool["inputs"])
step_outputs.extend(step.tool["outputs"])
for s in step.tool["inputs"]:
param_to_step[s["id"]] = step.tool
for s in step.tool["outputs"]:
param_to_step[s["id"]] = step.tool
if getdefault(loadingContext.do_validate, True):
static_checker(
workflow_inputs,
workflow_outputs,
step_inputs,
step_outputs,
param_to_step,
)
circular_dependency_checker(step_inputs)
def make_workflow_step(
self,
toolpath_object: CommentedMap,
pos: int,
loadingContext: LoadingContext,
parentworkflowProv: Optional[ProvenanceProfile] = None,
) -> "WorkflowStep":
return WorkflowStep(toolpath_object, pos, loadingContext, parentworkflowProv)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
builder = self._init_job(job_order, runtimeContext)
if runtimeContext.research_obj is not None:
if runtimeContext.toplevel:
# Record primary-job.json
runtimeContext.research_obj.fsaccess = runtimeContext.make_fs_access("")
runtimeContext.research_obj.create_job(builder.job)
job = WorkflowJob(self, runtimeContext)
yield job
runtimeContext = runtimeContext.copy()
runtimeContext.part_of = "workflow %s" % job.name
runtimeContext.toplevel = False
yield from job.job(builder.job, output_callbacks, runtimeContext)
def visit(self, op: Callable[[CommentedMap], None]) -> None:
op(self.tool)
for step in self.steps:
step.visit(op)
def used_by_step(step: StepType, shortinputid: str) -> bool:
for st in cast(MutableSequence[CWLObjectType], step["in"]):
if st.get("valueFrom"):
if ("inputs.%s" % shortinputid) in cast(str, st.get("valueFrom")):
return True
if step.get("when"):
if ("inputs.%s" % shortinputid) in cast(str, step.get("when")):
return True
return False
class WorkflowStep(Process):
def __init__(
self,
toolpath_object: CommentedMap,
pos: int,
loadingContext: LoadingContext,
parentworkflowProv: Optional[ProvenanceProfile] = None,
) -> None:
"""Initialize this WorkflowStep."""
debug = loadingContext.debug
if "id" in toolpath_object:
self.id = toolpath_object["id"]
else:
self.id = "#step" + str(pos)
loadingContext = loadingContext.copy()
parent_requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))
loadingContext.requirements = copy.deepcopy(
toolpath_object.get("requirements", [])
)
assert loadingContext.requirements is not None # nosec
for parent_req in parent_requirements:
found_in_step = False
for step_req in loadingContext.requirements:
if parent_req["class"] == step_req["class"]:
found_in_step = True
break
if not found_in_step:
loadingContext.requirements.append(parent_req)
loadingContext.requirements.extend(
cast(
List[CWLObjectType],
get_overrides(
getdefault(loadingContext.overrides_list, []), self.id
).get("requirements", []),
)
)
hints = copy.deepcopy(getdefault(loadingContext.hints, []))
hints.extend(toolpath_object.get("hints", []))
loadingContext.hints = hints
try:
if isinstance(toolpath_object["run"], CommentedMap):
self.embedded_tool = loadingContext.construct_tool_object(
toolpath_object["run"], loadingContext
) # type: Process
else:
loadingContext.metadata = {}
self.embedded_tool = load_tool(toolpath_object["run"], loadingContext)
except ValidationException as vexc:
if loadingContext.debug:
_logger.exception("Validation exception")
raise WorkflowException(
"Tool definition %s failed validation:\n%s"
% (toolpath_object["run"], indent(str(vexc)))
) from vexc
validation_errors = []
self.tool = toolpath_object = copy.deepcopy(toolpath_object)
bound = set()
if self.embedded_tool.get_requirement("SchemaDefRequirement")[0]:
if "requirements" not in toolpath_object:
toolpath_object["requirements"] = []
toolpath_object["requirements"].append(
self.embedded_tool.get_requirement("SchemaDefRequirement")[0]
)
for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
toolpath_object[toolfield] = []
for index, step_entry in enumerate(toolpath_object[stepfield]):
if isinstance(step_entry, str):
param = CommentedMap() # type: CommentedMap
inputid = step_entry
else:
param = CommentedMap(step_entry.items())
inputid = step_entry["id"]
shortinputid = shortname(inputid)
found = False
for tool_entry in self.embedded_tool.tool[toolfield]:
frag = shortname(tool_entry["id"])
if frag == shortinputid:
# if the case that the step has a default for a parameter,
# we do not want the default of the tool to override it
step_default = None
if "default" in param and "default" in tool_entry:
step_default = param["default"]
param.update(tool_entry)
param["_tool_entry"] = tool_entry
if step_default is not None:
param["default"] = step_default
found = True
bound.add(frag)
break
if not found:
if stepfield == "in":
param["type"] = "Any"
param["used_by_step"] = used_by_step(self.tool, shortinputid)
param["not_connected"] = True
else:
if isinstance(step_entry, Mapping):
step_entry_name = step_entry["id"]
else:
step_entry_name = step_entry
validation_errors.append(
SourceLine(
self.tool["out"], index, include_traceback=debug
).makeError(
"Workflow step output '%s' does not correspond to"
% shortname(step_entry_name)
)
+ "\n"
+ SourceLine(
self.embedded_tool.tool,
"outputs",
include_traceback=debug,
).makeError(
" tool output (expected '%s')"
% (
"', '".join(
[
shortname(tool_entry["id"])
for tool_entry in self.embedded_tool.tool[
"outputs"
]
]
)
)
)
)
param["id"] = inputid
param.lc.line = toolpath_object[stepfield].lc.data[index][0]
param.lc.col = toolpath_object[stepfield].lc.data[index][1]
param.lc.filename = toolpath_object[stepfield].lc.filename
toolpath_object[toolfield].append(param)
missing_values = []
for _, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
if shortname(tool_entry["id"]) not in bound:
if "null" not in tool_entry["type"] and "default" not in tool_entry:
missing_values.append(shortname(tool_entry["id"]))
if missing_values:
validation_errors.append(
SourceLine(self.tool, "in", include_traceback=debug).makeError(
"Step is missing required parameter%s '%s'"
% (
"s" if len(missing_values) > 1 else "",
"', '".join(missing_values),
)
)
)
if validation_errors:
raise ValidationException("\n".join(validation_errors))
super().__init__(toolpath_object, loadingContext)
if self.embedded_tool.tool["class"] == "Workflow":
(feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
if not feature:
raise WorkflowException(
"Workflow contains embedded workflow but "
"SubworkflowFeatureRequirement not in requirements"
)
if "scatter" in self.tool:
(feature, _) = self.get_requirement("ScatterFeatureRequirement")
if not feature:
raise WorkflowException(
"Workflow contains scatter but ScatterFeatureRequirement "
"not in requirements"
)
inputparms = copy.deepcopy(self.tool["inputs"])
outputparms = copy.deepcopy(self.tool["outputs"])
scatter = aslist(self.tool["scatter"])
method = self.tool.get("scatterMethod")
if method is None and len(scatter) != 1:
raise ValidationException(
"Must specify scatterMethod when scattering over multiple inputs"
)
inp_map = {i["id"]: i for i in inputparms}
for inp in scatter:
if inp not in inp_map:
SourceLine(
self.tool, "scatter", ValidationException, debug
).makeError(
"Scatter parameter '%s' does not correspond to "
"an input parameter of this step, expecting '%s'"
% (
shortname(inp),
"', '".join(shortname(k) for k in inp_map.keys()),
)
)
inp_map[inp]["type"] = {"type": "array", "items": inp_map[inp]["type"]}
if self.tool.get("scatterMethod") == "nested_crossproduct":
nesting = len(scatter)
else:
nesting = 1
for _ in range(0, nesting):
for oparam in outputparms:
oparam["type"] = {"type": "array", "items": oparam["type"]}
self.tool["inputs"] = inputparms
self.tool["outputs"] = outputparms
self.prov_obj = None # type: Optional[ProvenanceProfile]
if loadingContext.research_obj is not None:
self.prov_obj = parentworkflowProv
if self.embedded_tool.tool["class"] == "Workflow":
self.parent_wf = self.embedded_tool.parent_wf
else:
self.parent_wf = self.prov_obj
def receive_output(
self,
output_callback: OutputCallbackType,
jobout: CWLObjectType,
processStatus: str,
) -> None:
output = {}
for i in self.tool["outputs"]:
field = shortname(i["id"])
if field in jobout:
output[i["id"]] = jobout[field]
else:
processStatus = "permanentFail"
output_callback(output, processStatus)
def job(
self,
job_order: CWLObjectType,
output_callbacks: Optional[OutputCallbackType],
runtimeContext: RuntimeContext,
) -> JobsGeneratorType:
"""Initialize sub-workflow as a step in the parent profile."""
if (
self.embedded_tool.tool["class"] == "Workflow"
and runtimeContext.research_obj
and self.prov_obj
and self.embedded_tool.provenance_object
):
self.embedded_tool.parent_wf = self.prov_obj
process_name = self.tool["id"].split("#")[1]
self.prov_obj.start_process(
process_name,
datetime.datetime.now(),
self.embedded_tool.provenance_object.workflow_run_uri,
)
step_input = {}
for inp in self.tool["inputs"]:
field = shortname(inp["id"])
if not inp.get("not_connected"):
step_input[field] = job_order[inp["id"]]
try:
yield from self.embedded_tool.job(
step_input,
functools.partial(self.receive_output, output_callbacks),
runtimeContext,
)
except WorkflowException:
_logger.error("Exception on step '%s'", runtimeContext.name)
raise
except Exception as exc:
_logger.exception("Unexpected exception")
raise WorkflowException(str(exc)) from exc
def visit(self, op: Callable[[CommentedMap], None]) -> None:
self.embedded_tool.visit(op)
|
common-workflow-language/cwltool
|
cwltool/workflow.py
|
Python
|
apache-2.0
| 18,689
|
[
"VisIt"
] |
e2eaa6f8ab3d84cf502ac7b0a6bfbd257ec0ba126341612c8e4730b39e2de149
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2012 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import time
from datetime import datetime
import pytz
from tools.translate import _
from tools import ustr
import logging
_logger = logging.getLogger(__name__)
class sale_order(osv.osv):
_inherit = "sale.order"
_columns = {
'so_payment_method': fields.char('Payment Method', size=32),
}
sale_order()
class delivery_driver(osv.osv):
_name='delivery.driver'
_columns = {
'partner_id': fields.many2one('res.partner','Partner',help='Fill this field if the driver is a outsourcing of the company'),
'employee_id': fields.many2one('hr.employee','Employee',help='Fill this if the driver is a employee of the company'),
'name': fields.char('Name', size=64, required=True),
'carrier_id': fields.many2one('delivery.carrier','Carrier'),
'outsourcing': fields.boolean('Outsourcing ?'),
'route_ids': fields.one2many('delivery.route','driver_id','Delivery Routes'),
'is_driver': fields.boolean('Is Driver ?'),
'is_picker': fields.boolean('Is Picker ?'),
'active': fields.boolean('Active ?'),
'color': fields.integer('Color Index'),
'tmp_route_id': fields.many2one('delivery.route_tmp','Temporary Delivery Route'),
}
_defaults = {
'outsourcing': False,
'is_driver': True,
'active': True,
}
def write(self, cr, uid, ids, vals, context=None):
context = context or {}
if type(ids)!=type([]):
ids = [ids]
if 'tmp_route_id' in vals and 'force_dts_id' in context:
driver = self.browse(cr, uid, ids[0])
if vals['tmp_route_id']:
tmp_route = self.pool.get('delivery.route_tmp').browse(cr, uid, vals['tmp_route_id'])
if tmp_route.route_id:
route_vals = {}
if driver.is_driver and not tmp_route.route_id.driver_id:
route_vals = {'driver_id':driver.id}
elif not tmp_route.route_id.picker_id:
route_vals = {'picker_id':driver.id}
if route_vals:
cr.execute("UPDATE delivery_route SET driver_id = Null WHERE driver_id = " + str(driver.id) + " AND dts_id=" + str(tmp_route.dts_id.id) + " ")
cr.execute("UPDATE delivery_route SET picker_id = Null WHERE picker_id = " + str(driver.id) + " AND dts_id=" + str(tmp_route.dts_id.id) + " ")
cr.commit()
self.pool.get('delivery.route').write(cr, uid, [tmp_route.route_id.id], route_vals)
else:
raise osv.except_osv(_('Error'), _("You can not assign more than one Deliver and one Picker."))
else:
cr.execute("UPDATE delivery_route SET driver_id = Null WHERE driver_id = " + str(driver.id) + " AND dts_id=" + str(context['force_dts_id']) + " ")
cr.execute("UPDATE delivery_route SET picker_id = Null WHERE picker_id = " + str(driver.id) + " AND dts_id=" + str(context['force_dts_id']) + " ")
cr.commit()
if 'is_driver' in vals or 'is_picker' in vals:
driver = self.browse(cr, uid, ids[0])
is_driver = vals.get('is_driver', driver.is_driver)
is_picker = vals.get('is_picker', driver.is_picker)
if is_picker and is_driver:
vals.update({'color': 8})
elif is_driver and not is_picker:
vals.update({'color': 6})
elif is_picker and not is_driver:
vals.update({'color': 2})
else:
vals.update({'color': 0})
return super(delivery_driver, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
context = context or {}
if 'is_driver' in vals or 'is_picker' in vals:
is_driver = vals.get('is_driver', False)
is_picker = vals.get('is_picker', False)
if is_picker and is_driver:
vals.update({'color': 8})
elif is_driver and not is_picker:
vals.update({'color': 6})
elif is_picker and not is_driver:
vals.update({'color': 2})
else:
vals.update({'color': 0})
return super(delivery_driver, self).create(cr, uid, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order='name', context=None, count=False):
onlyactive = True
for arg in args:
if len(arg)==3 and arg[0]=='active':
onlyactive = False
if onlyactive:
args.append(('active','=',True))
return super(delivery_driver, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _read_group_tmp_route_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
context = context or {}
route_tmp_obj = self.pool.get('delivery.route_tmp')
args = []
if 'force_dts_id' in context:
args.append(('dts_id', '=', context['force_dts_id']))
route_tmp_ids = route_tmp_obj.search(cr, uid, args, context=context)
result = route_tmp_obj.name_get(cr, uid, route_tmp_ids, context=context)
fold = {}
return result, fold
_group_by_full = {
'tmp_route_id': _read_group_tmp_route_ids,
}
delivery_driver()
class delivery_time_slot(osv.osv):
_name='delivery.time.slot'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Name', size=64, required=True),
'max_time': fields.char('If before', size=5, required=True, help='This time will be used to assign the Time Slot. Format: 20:30'),
'start_time': fields.char('From', size=5),
'end_time': fields.char('To', size=5),
'type': fields.selection([('dts', 'Delivery'), ('pts', 'Preparation')], 'Type', required=True, select=True),
'parent_id': fields.many2one('delivery.time.slot','Parent'),
'dts_id': fields.many2one('delivery.time.slot','Linked Delivery Time', domain=[('type','=','dts')]),
'shop_id': fields.many2one('sale.shop','Shop'),
}
_defaults = {
'type': 'dts',
'max_time': '20:30',
}
_order = 'sequence, name DESC'
delivery_time_slot()
class delivery_time(osv.osv):
_name='delivery.time'
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Name', size=64, required=True),
#'start_hour': fields.selection(([(str(x),str(x)) for x in range(0,24)] + [('-','--')]),'Start Hour'),
#'start_minute': fields.selection(([(str(x*5),str(x*5)) for x in range(0,12)] + [('-','--')]),'Start Minute'),
#'end_hour': fields.selection(([(str(x),str(x)) for x in range(0,24)] + [('-','--')]),'End Hour'),
#'end_minute': fields.selection(([(str(x*5),str(x*5)) for x in range(0,12)] + [('-','--')]),'End Minute'),
'start_date': fields.datetime('Delivery Time From'),
'end_date': fields.datetime('Delivery Time To'),
'active': fields.boolean('Active'),
'type': fields.selection([('dts', 'Delivery'), ('pts', 'Preparation')], 'Type', required=True, select=True),
'dts_id': fields.many2one('delivery.time','Linked Delivery Time', domain=[('type','=','dts')]),
'slot_id': fields.many2one('delivery.time.slot','Time Slot'),
}
_defaults = {
'active': True,
'type': 'dts',
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if context is None:
context = {}
if not args:
args = []
args.extend(context.get('domain',[]))
#ids = self.search(cr, user, args, limit=limit, context=context)
#return self.name_get(cr, user, ids, context)
return super(delivery_time, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def create_from_time(self, cr, uid, data, context=None):
""" create a delivery.time by given time
start_date:
end_date:
"""
#start_date = datetime.strptime(data['start_date'], '%Y-%m-%d %H:%M:%S')
#end_date = datetime.strptime(data['end_date'], '%Y-%m-%d %H:%M:%S')
context_tz = context.get('tz', 'Asia/Shanghai')
tz = pytz.timezone(context_tz)
start = pytz.utc.localize(data['start_date']).astimezone(tz)
end = pytz.utc.localize(data['end_date']).astimezone(tz)
start = start.strftime('%y/%m/%d %H:%M')
end = end.strftime('%H:%M')
# convert start in user's timezone
name = "%s~%s" %(start,end,)
data.update({'name':name})
return self.create(cr, uid, data, context)
_order = 'sequence, name DESC'
delivery_time()
class delivery_carrier(osv.osv):
_name = "delivery.carrier"
_inherit = "delivery.carrier"
_columns = {
'driver_ids' : fields.one2many('delivery.driver','carrier_id','Delivery Drivers'),
}
delivery_carrier()
class delivery_route_tmp(osv.osv):
_name = 'delivery.route_tmp'
_columns = {
'name': fields.char('Reference', size=64, required=False),
'dts_id': fields.many2one('delivery.time','Delivery Time', select=True, domain=[('type','=','dts')]),
'route_id': fields.many2one('delivery.route','Route', required=False),
}
delivery_route_tmp()
class delivery_route(osv.osv):
_name='delivery.route'
def _init_name(self, cr, uid, context=None):
context = context or {}
dts_id = context.get('force_dts_id_kanban', False) or False
if dts_id:
dts_pool = self.pool.get('delivery.time')
base_name = dts_pool.read(cr, uid, [dts_id], ['name'])[0]['name'].split()[0]
for idx in range(1,99):
name = base_name + str(idx).rjust(2, '0')
ids = self.search(cr, uid, [('name','ilike',name)]) or False
if not ids:
return name
return '/'
def name_get(self, cr, uid, ids, context=None):
context = context or {}
result = []
if isinstance(ids, int):
ids = [ids]
if context.get('force_dts_id_kanban', False):
for record in self.browse(cr, uid, ids, context=context):
name = ustr(record.name)
if record.driver_id:
name = ustr(record.driver_id.name)+' '+name[2:]
result.append((record.id, name.strip()))
else:
for record in self.browse(cr, uid, ids, context=context):
result.append((record.id, record.name.strip()))
return result
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = 'delivery.route'
# SHOULD USE ir_sequence.next_by_code() or ir_sequence.next_by_id()
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(delivery_route, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.date('Date', required=False, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'dts_id': fields.many2one('delivery.time','Delivery Time', select=True, domain=[('type','=','dts')], readonly=True, states={'draft': [('readonly', False)]}),
'driver_id': fields.many2one('delivery.driver','Delivery Driver', required=False, domain=[('is_driver','=',True)], readonly=True, states={'draft': [('readonly', False)]}),
'picker_id': fields.many2one('delivery.driver','Delivery Deliver', required=False, domain=[('is_picker','=',True)], readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection([
('draft','Draft'),
('confirm','Confirm'),
('departure','Departure'),
('done', 'Done'),
('cancel','Cancel')],'State',readonly=True),
'line_ids': fields.one2many('delivery.route.line','route_id','Lines', required=True, readonly=False, states={'done': [('readonly', True)]}),
'departure_date': fields.datetime('Departure Date', readonly=False, states={'done': [('readonly', True)]}),
'arrive_date': fields.datetime('Arrive Date', readonly=False, states={'done': [('readonly', True)]}),
'confirm_cs': fields.boolean('Confirmed by CS'),
}
_defaults = {
'state': 'draft',
'name': lambda self, cr, uid, context: self._init_name(cr, uid, context=context),
'dts_id': lambda self, cr, uid, context: context.get('force_dts_id_kanban', False) or False,
}
def action_draft(self, cr, uid, ids, context=None):
line_obj = self.pool.get('delivery.route.line')
for route in self.browse(cr,uid,ids,context=context):
for line in route.line_ids:
line_obj.action_draft(cr,uid,[line.id],context=context)
self.write(cr, uid, ids, {'state': 'draft'}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
line_obj = self.pool.get('delivery.route.line')
for route in self.browse(cr,uid,ids,context=context):
for line in route.line_ids:
line_obj.action_confirm(cr,uid,[line.id],context=context)
self.write(cr, uid, ids, {'state': 'confirm'}, context=context)
return True
def action_departure(self, cr, uid, ids, context=None):
line_obj = self.pool.get('delivery.route.line')
for route in self.browse(cr,uid,ids,context=context):
if not route.confirm_cs:
raise osv.except_osv(_('Error'), _("Before departure, routes need to be confirmed by the Customer Service."))
for line in route.line_ids:
line_obj.action_delivered(cr,uid,[line.id],context=context)
self.write(cr, uid, ids, {'state': 'departure','departure_date':time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_arrive(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'arrive'}, context=context)
return True
def action_done_cs(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'confirm_cs': True}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
line_obj = self.pool.get('delivery.route.line')
for route in self.browse(cr,uid,ids,context=context):
for line in route.line_ids:
if line.state in ('draft','confim','delivered'):
raise osv.except_osv(_('Error'), _("All the lines of delivery route must be delivered or returned."))
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
line_obj = self.pool.get('delivery.route.line')
for route in self.browse(cr,uid,ids,context=context):
for line in route.line_ids:
line_obj.action_cancel(cr,uid,[line.id],context=context)
self.write(cr, uid, ids, {'state': 'cancel','confirm_cs':False}, context=context)
return True
def _read_group_driver_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
context = context or {}
driver_obj = self.pool.get('delivery.driver')
args = [('is_driver', '=', True)]
driver_ids = driver_obj.search(cr, uid, args, context=context)
result = driver_obj.name_get(cr, uid, driver_ids, context=context)
fold = {}
return result, fold
def _read_group_picker_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
context = context or {}
driver_obj = self.pool.get('delivery.driver')
args = [('is_picker', '=', True)]
driver_ids = driver_obj.search(cr, uid, args, context=context)
result = driver_obj.name_get(cr, uid, driver_ids, context=context)
fold = {}
return result, fold
_group_by_full = {
'driver_id': _read_group_driver_ids,
'picker_id': _read_group_picker_ids,
}
_order = 'date DESC, name'
delivery_route()
class delivery_route_line(osv.osv):
_name='delivery.route.line'
def _get_drivers(self, cr, uid, ids, fields, args, context=None):
result = {}
for route in self.browse(cr, uid, ids):
res = {}
if route.route_id:
res['picker'] = route.route_id.picker_id and route.route_id.picker_id.name or " "
res['driver'] = route.route_id.driver_id and route.route_id.driver_id.name or " "
else:
res['picker'] = " "
res['driver'] = " "
result[route.id] = res
return result
def _get_origin(self, cr, uid, ids, fields, args, context=None):
result = {}
for route in self.browse(cr, uid, ids):
res = {}
res['origin'] = route.picking_id.origin or route.picking_id.name or ""
res['sale_order_id'] = route.picking_id.sale_id and route.picking_id.sale_id.id or False
res['purchase_id'] = route.picking_id.purchase_id and route.picking_id.purchase_id.id or False
res['address_id'] = route.picking_id.partner_id and route.picking_id.partner_id.id or False
res['so_payment_method'] = route.picking_id.sale_id and route.picking_id.sale_id.so_payment_method or False
res['picking_note'] = route.picking_id.note or " "
result[route.id] = res
return result
def _get_box_type(self, cr, uid, ids, fields, args, context=None):
res = {}
for route in self.browse(cr, uid, ids):
box_type = ''
iced = False
warm = False
other = False
pack_set = set([move.product_id.deliver_in for move in route.picking_id.move_lines ])
for pack in pack_set:
if pack in ['warm', 'iced', 'iced_n_warm'] and not iced:
if pack in ['iced', 'iced_n_warm']:
box_type += '冷, '
iced = True
if pack in ['warm', 'iced_n_warm'] and not warm:
box_type += '热, '
warm = True
else:
if not other:
box_type += '正常, '
other = True
if box_type:
box_type = box_type[:-2]
res[route.id] = box_type
return res
def _route_to_update_after_picking_change(self, cr, uid, ids, fields=None, arg=None, context=None):
if type(ids) != type([]):
ids = [ids]
return self.pool.get('delivery.route.line').search(cr, uid, [('picking_id','in',ids)]) or []
def _route_to_update_after_parent_change(self, cr, uid, ids, fields=None, arg=None, context=None):
if type(ids) != type([]):
ids = [ids]
return self.pool.get('delivery.route.line').search(cr, uid, [('route_id','in',ids)]) or []
_store_origin = {
'delivery.route.line': (lambda self,cr,uid,ids,context: ids,['picking_id'],10),
'stock.picking': (_route_to_update_after_picking_change, ['sale_id','purchase_id','origin','note','so_payment_method','partner_id'], 10),
}
_store_drivers = {
'delivery.route.line': (lambda self,cr,uid,ids,context: ids,['route_id'],10),
'delivery.route': (_route_to_update_after_parent_change, ['picker_id','driver_id'], 10),
}
_columns = {
'sequence': fields.integer('Sequence'),
'route_id': fields.many2one('delivery.route','Delivery Route', required=False, readonly=True, states={'draft': [('readonly', False)]}, ondelete="cascade"),
'picking_id': fields.many2one('stock.picking','Picking', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'purchase_id': fields.function(_get_origin, type='many2one', obj='purchase.order', store=_store_origin, multi="origin", string='Purchase Order'),
'sale_order_id': fields.function(_get_origin, type='many2one', obj='sale.order', store=_store_origin, multi="origin", string='Sale Order'),
'origin': fields.function(_get_origin, type='char', size=256, store=_store_origin, multi="origin", string='Origin'),
'confirm_cs': fields.related('route_id','confirm_cs',type='boolean',string='Confirmed by CS'),
'address_id': fields.function(_get_origin,type='many2one',relation='res.partner', multi="origin", string='Delivery Address'),
'street': fields.related('address_id', 'street', type='char', size=256, string='Street'),
'partner_phone': fields.related('address_id', 'phone', type='char', size=128, string='Partner Phone', readonly=True),
'picker': fields.function(_get_drivers, type='char', size=128, store=_store_drivers, multi="drivers", string='Clerk'),
'driver': fields.function(_get_drivers, type='char', size=128, store=_store_drivers, multi="drivers", string='Driver'),
'driver_phone': fields.related('route_id', 'driver_id', 'employee_id', 'mobile_phone', type='char', size=128, string='Driver Phone'),
'so_payment_method': fields.function(_get_origin, type='char', size=128, multi="origin", string='Payment Method'),
'picking_note': fields.function(_get_origin, type='html', multi="origin", string='DO Notes'),
'box_type': fields.function(_get_box_type, type='char', size=32, store=False, string='Box Type'),
'state': fields.selection([('draft','Draft'), ('confirm','Confirm'), ('delivered','In delivery'), ('received','Delivered'), ('returned','Returned'), ('cancel','Cancel')],'State',readonly=True),
'visit_date': fields.datetime('Visit Date',states={'delivered': [('required', True)], 'received':[('readonly',True)], 'returned':[('readonly',True)],}),
'note': fields.text('Notes'),
'color': fields.integer('Color Index'),
'exceptions': fields.boolean('Received with exceptions'),
'complete_state': fields.selection([("not_planned", _("Not planned")), ("planned", _("Planned")), ("in_del", _("In delivery")), ("del_ok", _("Delivered")), ("del_ex", _("Exception")), ("del_rt", _("Returned")), ("del_rt_exp", _("No redelivery")), ("cancel", _("Cancel"))], 'Delivery State'),
}
_defaults = {
'state': 'draft',
'complete_state': 'not_planned',
}
_order = 'sequence'
def _read_group_route_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
context = context or {}
route_obj = self.pool.get('delivery.route')
args = [('state', '=', 'draft')]
if 'force_dts_id_kanban' in context:
args.append(('dts_id', '=', context['force_dts_id_kanban']))
route_ids = route_obj.search(cr, uid, args, order='name', context=context)
result = route_obj.name_get(cr, uid, route_ids, context=context)
fold = {}
return result, fold
def unlink(self, cr, uid, ids, context=None):
for o in self.browse(cr, uid, ids, context=context):
if o.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid action !'), _('Cannot delete Delivery Route Line(s) which are already received, returned or delivered !'))
return super(delivery_route_line, self).unlink(cr, uid, ids, context=context)
def action_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'draft','delivery_state':'not_planned'}, context=context)
return True
def action_received_do_line(self, cr, uid, line, context=None):
self.pool.get('stock.picking').write(cr,uid,[line.picking_id.id],{'delivered':True,'delivery_state':'del_ok'}, context=context)
self.notify_related_order(cr, uid, line, 'The Order has been <b>Delivered</b>', context)
return True
def action_received_exp_do_line(self, cr, uid, line, context=None):
self.pool.get('stock.picking').write(cr,uid,[line.picking_id.id],{'delivered':True,'delivery_state':'del_ex'}, context=context)
self.notify_related_order(cr, uid, line, 'The Order has been <b>Delivered with exceptions</b>', context)
return True
def action_delivered_do_line(self, cr, uid, line, context=None):
delivered_cpt = line.picking_id.delivered_cpt + 1
self.pool.get('stock.picking').write(cr,uid,[line.picking_id.id],{'delivered_cpt':delivered_cpt,'delivery_state':'in_del'}, context=context)
self.notify_related_order(cr, uid, line, 'The Order is <b>in Delivery</b>', context)
return True
def action_returned_do_line(self, cr, uid, line, context=None):
contexet = context or {}
context.update({'set_dts': False})
self.pool.get('stock.picking').write(cr,uid,[line.picking_id.id],{'delivery_state':'del_rt'}, context=context)
#self.copy(cr, uid, line.id, {'dts_id':False,'note': 'Re-delivery for ' + str(line.origin),'route_id':False,'return_reasons':[],'exceptions':False,'state':'draft','complete_state':'not_planned','visit_date':False,'color':0}, context=context)
self.create(cr, uid, {'dts_id':False,
'note': 'Re-delivery for ' + str(line.origin),
'route_id':False,
'return_reasons':[],
'exceptions': False,
'color':0,
'picking_id':line.picking_id and line.picking_id.id,}, context=context)
self.notify_related_order(cr, uid, line, 'The Order has been <b>Returned (Redelivery)</b>', context)
return True
def action_returned_exp_do_line(self, cr, uid, line, context=None):
self.pool.get('stock.picking').write(cr,uid,[line.picking_id.id],{'delivered':True,'delivery_state':'del_rt_exp'}, context=context)
self.notify_related_order(cr, uid, line, 'The Order has been <b>Returned (No Redelivery)</b>', context)
return True
def action_delivered(self, cr, uid, ids, context=None):
picking_obj = self.pool.get('stock.picking')
for line in self.browse(cr,uid,ids,context=context):
self.action_delivered_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'in_del', 'state': 'delivered'}, context=context)
return True
def action_received(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
self.action_received_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'del_ok', 'state': 'received','visit_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_received_exp(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
self.action_received_exp_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'del_ex', 'state': 'received', 'exceptions': True,'visit_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_returned(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
self.action_returned_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'del_rt', 'state': 'returned','visit_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_returned_exp(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
self.action_returned_exp_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'del_rt_exp', 'state': 'returned', 'exceptions': True,'visit_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_cancel_do_line(self, cr, uid, line, context=None):
delivered_cpt = line.picking_id.delivered_cpt - 1
if delivered_cpt < 0:
delivered_cpt = 0
self.pool.get('stock.picking').write(cr, uid, line.picking_id.id,{'delivered':False, 'delivered_cpt':delivered_cpt, 'delivery_state':'not_planned'},context=context)
self.notify_related_order(cr, uid, line, 'The Delivery has been <b>Canceled</b>', context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr,uid,ids,context=context):
self.action_cancel_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'state': 'cancel', 'complete_state':'cancel', 'exceptions': False}, context=context)
return True
def action_confirm_do_line(self, cr, uid, line, context=None):
self.pool.get('stock.picking').write(cr, uid, line.picking_id.id,{'delivery_state':'planned'},context=context)
self.notify_related_order(cr, uid, line, 'The Delivery has been <b>Planned</b>', context)
return True
def action_confirm(self, cr, uid, ids, context=None):
for line in self.browse(cr,uid,ids,context=context):
if line.picking_id.delivered:
raise osv.except_osv(_('Error'), _('The picking %s (origin:%s) was delivered in other delivery route'%(line.picking_id.name,line.picking_id.origin)))
# if line.picking_id.type == 'out' and line.picking_id.state not in ('done'):
# raise osv.except_osv(_('Error'), _('The picking %s (origin:%s) must be in done state'%(line.picking_id.name,line.picking_id.origin)))
self.action_confirm_do_line(cr, uid, line, context=context)
self.write(cr, uid, ids, {'complete_state':'planned', 'state': 'confirm'}, context=context)
return True
def notify_related_order(self, cr, uid, line, delivery_state, context=None):
res_id = False
model = False
if line.sale_order_id:
res_id = line.sale_order_id.id
model = 'sale.order'
elif line.purchase_id:
res_id = line.purchase_id.id
model = 'purchase.order'
if res_id and model:
drivers = ''
body = str(delivery_state)
if line.visit_date:
body += " at " + str(line.visit_date)
body += "<br />"
if line.route_id.name:
body += "<b>Route</b>: " + str(line.route_id.name) + "<br />"
if line.route_id.driver_id:
drivers += str(line.route_id.driver_id.name.encode('utf-8'))
if line.route_id.driver_id.employee_id and line.route_id.driver_id.employee_id.mobile_phone:
drivers += " (" + str(line.route_id.driver_id.employee_id.mobile_phone) + ")"
if line.route_id.picker_id:
if drivers:
drivers += ' & '
drivers += str(line.route_id.picker_id.name.encode('utf-8'))
if drivers:
body += "by: " + drivers + ")"
self.pool.get('mail.message').create(cr, uid, {
'type': 'notification',
'record_name': 'Delivery Route Line',
'body': body,
'res_id': res_id,
'model': model,
})
return True
_group_by_full = {
'route_id': _read_group_route_ids,
}
delivery_route_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jmesteve/openerp
|
openerp/addons/delivery_routes/delivery.py
|
Python
|
agpl-3.0
| 33,481
|
[
"VisIt"
] |
9b6ae73f4f557488d7c543e8aaf569654502c7fbeb43d98bd7282c7368889e30
|
"""
Fits binary source model using EMCEE sampler.
The code simulates binary source light curve and fits the model twice:
with source flux ratio found via linear regression and
with source flux ratio as a chain parameter.
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import MulensModel as mm
# Fix the seed for the random number generator so the behavior is reproducible.
np.random.seed(12343)
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
""" likelihood function """
for (param, theta_) in zip(parameters_to_fit, theta):
# Here we handle fixing source flux ratio:
if param == 'flux_ratio':
# implemented for a single dataset
event.fix_source_flux_ratio = {my_dataset: theta_}
else:
setattr(event.model.parameters, param, theta_)
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
for param in ['t_E', 'u_0_1', 'u_0_2']:
if param in parameters_to_fit:
if theta[parameters_to_fit.index(param)] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
""" combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
def fit_EMCEE(parameters_to_fit, starting_params, sigmas, ln_prob, event,
n_walkers=20, n_steps=3000, n_burn=1500):
"""
Fit model using EMCEE and print results.
Arguments:
parameters_to_fit - list of parameters
starting_params - dict that specifies values of these parameters
sigmas - list of sigma values used to find starting values
ln_prob - function returning logarithm of probability
event - MulensModel.Event instance
n_walkers - number of walkers in EMCEE
n_steps - number of steps per walker
n_burn - number of steps considered as burn-in ( < n_steps)
"""
n_dim = len(parameters_to_fit)
mean = [starting_params[p] for p in parameters_to_fit]
start = [mean + np.random.randn(n_dim) * sigmas for i in range(n_walkers)]
# Run emcee (this can take some time):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
for i in range(n_dim):
r = results[1, i]
msg = parameters_to_fit[i] + ": {:.5f} +{:.5f} -{:.5f}"
print(msg.format(r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from event:
prob = sampler.lnprobability[:, n_burn:].reshape((-1))
best_index = np.argmax(prob)
best = samples[best_index, :]
for key, val in enumerate(parameters_to_fit):
if val == 'flux_ratio':
event.fix_source_flux_ratio = {my_dataset: best[key]}
else:
setattr(event.model.parameters, val, best[key])
print("\nSmallest chi2 model:")
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print("chi2 = ", event.get_chi2())
# First, prepare the data. There is nothing very exciting in this part,
# so you may skip it.
t_0_1 = 6100.
u_0_1 = 0.2
t_0_2 = 6140.
u_0_2 = 0.01
t_E = 25.
assumed_flux_1 = 100.
assumed_flux_2 = 5.
assumed_flux_blend = 10.
n_a = 1000
n_b = 600
time_a = np.linspace(6000., 6300., n_a)
time_b = np.linspace(6139., 6141., n_b)
time = np.sort(np.concatenate((time_a, time_b)))
model_1 = mm.Model({'t_0': t_0_1, 'u_0': u_0_1, 't_E': t_E})
A_1 = model_1.get_magnification(time)
model_2 = mm.Model({'t_0': t_0_2, 'u_0': u_0_2, 't_E': t_E})
A_2 = model_2.get_magnification(time)
flux = A_1 * assumed_flux_1 + A_2 * assumed_flux_2 + assumed_flux_blend
flux_err = 6. + 0. * time
flux += flux_err * np.random.normal(size=n_a+n_b)
my_dataset = mm.MulensData([time, flux, flux_err], phot_fmt='flux')
# If you want to plot, then just uncomment:
# plt.plot(time, flux, 'ro')
# plt.show()
# Starting parameters:
params = {'t_0_1': 6101., 'u_0_1': 0.19, 't_0_2': 6140.123, 'u_0_2': 0.04,
't_E': 20.}
my_model = mm.Model(params)
my_event = mm.Event(datasets=my_dataset, model=my_model)
# First fit - source flux ratio not set, hence found by regression:
parameters_to_fit = ["t_0_1", "u_0_1", "t_0_2", "u_0_2", "t_E"]
sigmas = [0.1, 0.05, 1., 0.01, 1.]
print("\nFirst fit. This can take some time...")
fit_EMCEE(parameters_to_fit, params, sigmas, ln_prob, my_event)
# Starting parameters for second fit:
params = {'t_0_1': 6101., 'u_0_1': 0.19, 't_0_2': 6140.123, 'u_0_2': 0.04,
't_E': 25.987}
my_model = mm.Model(params)
my_event = mm.Event(datasets=my_dataset, model=my_model)
params['flux_ratio'] = 0.02
# Second fit - source flux ratio as one of the chain parameters:
parameters_to_fit = ["t_0_1", "u_0_1", "t_0_2", "u_0_2", "t_E", "flux_ratio"]
sigmas = [0.1, 0.05, 1., 0.01, 1., 0.001]
print("\nSecond fit. This can take some time...")
fit_EMCEE(parameters_to_fit, params, sigmas, ln_prob, my_event)
|
rpoleski/MulensModel
|
examples/example_11_binary_source.py
|
Python
|
mit
| 5,751
|
[
"exciting"
] |
2bbdbeb8c3b08d14216898a8e150d3007ee14586a22e900d680d77c27ce079dc
|
#!/usr/bin/env python
from ase.visualize import view
from ase.lattice.surface import fcc111, add_adsorbate
from gpaw import GPAW
from gpaw.mixer import MixerSum
from gpaw import dscf
filename='homo'
#-------------------------------------------
c_mol = GPAW(nbands=9, h=0.2, xc='RPBE', kpts=(8,6,1),
spinpol=True,
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-9,
'bands': 'occupied'}, txt='CO_homo.txt')
calc = GPAW(nbands=45, h=0.2, xc='RPBE', kpts=(8,6,1),
eigensolver='cg',
spinpol=True,
mixer=MixerSum(nmaxold=5, beta=0.1, weight=100),
convergence={'energy': 100,
'density': 100,
'eigenstates': 1.0e-7,
'bands': -10}, txt=filename+'.txt')
#----------------------------------------
# Import Slab with relaxed CO
#slab = ('gs.gpw').get_atoms()
slab = fcc111('Pt', size=(1, 2, 3), orthogonal=True)
add_adsorbate(slab, 'C', 2.0, 'ontop')
add_adsorbate(slab, 'O', 3.15, 'ontop')
slab.center(axis=2, vacuum=4.0)
view(slab)
molecule = slab.copy()
del molecule [:-2]
# Molecule
#----------------
molecule.set_calculator(c_mol)
molecule.get_potential_energy()
#Homo wavefunction
wf_u = [kpt.psit_nG[4] for kpt in c_mol.wfs.kpt_u]
#Homo projector overlaps
mol = range(len(slab))[-2:]
p_uai = [dict([(mol[a], P_ni[4]) for a, P_ni in kpt.P_ani.items()])
for kpt in c_mol.wfs.kpt_u]
# Slab with adsorbed molecule
#-----------------------------------
slab.set_calculator(calc)
orbital = dscf.AEOrbital(calc, wf_u, p_uai, Estart=-100.0, Eend=0.0)
dscf.dscf_calculation(calc, [[-1.0, orbital, 1]], slab)
slab.get_potential_energy()
|
qsnake/gpaw
|
doc/documentation/dscf/homo.py
|
Python
|
gpl-3.0
| 1,793
|
[
"ASE",
"GPAW"
] |
3ededd1ef7903059f795d85a272dd68fb291587bbbbdc7ea88791503959ee9f3
|
#!/usr/bin/env python
# Written by Daniel Barshis, June 2011
import sys
#sys.argv[1] Input file is a list of contig names in a single column with ContigName as the column header
#sys.argv[2] Output file name
#sys.argv[3:] Any number of files to add columns from
#makes a dictionary of a file where the first column is a list of contignames and subsequent columns with any associated information (i.e. counts, blast hits, etc.)
def make_dict1(file):
fin = open(file, 'r')
dict={}
headers=[]
count=0
for line in fin:
count+=1
line=line.rstrip()
cols=line.split('\t') #for tab-delimited text files
if count==1:
headers=cols[0:]
#count+=1
if count > 1:
dict[cols[0]]=cols[1:]
return dict, headers
dictbase, dictheaders=make_dict1(sys.argv[1]) #Input data table
xpressionfiles=sys.argv[3:] #Any number of expression files
#Loops through a list of secondary files that you would like to add information from
#the purpose would be to combine information from multiple files into one big "meta" table
xpressfiles=[]
xtrahits=[]
for file in xpressionfiles:
Xvalues, Xheaders=make_dict1(file)
xpressfiles.append(file+'_'+str(Xheaders[1])) # used to denote the column that you're extracting
xtracount=0
# For adding info when Xvalues is missing values that are in your dictbase
# for item in dictbase.keys():
# if Xvalues.has_key(item):
# # dictbase[item]+=Xvalues[item] #appends a list of strings
# dictbase[item].append(Xvalues[item][2])
# else:
# xtracount+=1
# dictbase[item].append('No Sig Blast Hit')
# dictbase[item].append('\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % ('NA','NA','NA','NA','NA','NA','NA',))
# For adding info when Xvalues has all the values in your dictbase or more
for item in Xvalues.keys():
if dictbase.has_key(item):
# dictbase[item]+=Xvalues[item] #appends a list of strings
dictbase[item].append(Xvalues[item][0]) #appends a single column of data as a string appends quality, single counts
else:
xtracount+=1
xtrahits.append(file+'='+str(xtracount))
# print dictbase
o=open(str(sys.argv[2]), 'w') # New data table file name
#o.write('\t'.join(dictheaders)+'\t'+'\t'.join(Xheaders)+'\n') #used if you want all new headers from one single file
o.write('\t'.join(dictheaders)+'\t'+'\t'.join(xpressfiles)+'\n') #used if you want a specific header and filename for each file
print 'Hits not matched' + '\t'.join(xtrahits)
l=[]
for key,value in dictbase.items():
l.append((key,value)) #translates dictbase into a tuple and adds just the contig # (minus"contig") as the first item
l.sort() #sorts tuple into numeric order
for item in l:
# print item
o.write(str(item[0])+'\t'+'\t'.join(item[1])+'\n') #writes each line of the tuple as separate tab delimited text
o.close()
|
cuttlefishh/papers
|
vibrio-fischeri-transcriptomics/code/python/ParseExpression2BigTable_counts.py
|
Python
|
mit
| 2,760
|
[
"BLAST"
] |
b307d3daa3522d0fe8d3a9c87dc9a886296f1478bf70ad348b65f0b8a72dbcd7
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bio_glodap(argdict, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# NWGOA3 grid sub-sample
xrange=src_grd.xrange; yrange=src_grd.yrange
src_varname = argdict['tracer']
tracer = src_varname
src_file = argdict['file']
units = argdict['units']
longname = argdict['longname']
nframe = argdict['nframe']
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# create clim file
dst_file = tracer + '.nc'
dst_file = dst_dir + dst_grd.name + '_ic_bio_' + dst_file
print 'Creating clim file', dst_file
if os.path.exists(dst_file) is True:
os.remove(dst_file)
pyroms_toolbox.nc_create_roms_file(dst_file, dst_grd, nctime)
# open clim file
nc = netCDF.Dataset(dst_file, 'a', format='NETCDF3_64BIT')
#load var
cdf = netCDF.Dataset(src_file)
src_var = cdf.variables[src_varname]
tmp = cdf.variables['time'][nframe]
#if len(tmp) > 1:
# print 'error : multiple frames in input file' ; exit()
#else:
# time = tmp[0]
# to be in sync with physics, add +0.5 day
#time = time + 0.5
# time will be given by physics anyway
time = 0.
#get missing value
spval = src_var._FillValue
spval2 = -1.0e+10
# determine variable dimension
ndim = len(src_var.dimensions) - 1
# NWGOA3 grid sub-sample
if ndim == 3:
src_var = src_var[0,:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
elif ndim == 2:
src_var = src_var[0,yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
if tracer == 'alk':
unit_conversion = 1. / 1e6
elif tracer == 'dic':
unit_conversion = 1. / 1e6
src_var = src_var * unit_conversion
Bpos = 't'
Cpos = 'rho'
z = src_grd.z_t
Mp, Lp = dst_grd.hgrid.mask_rho.shape
wts_file = 'remap_weights_ESM2M_to_NWGOA3_bilinear_t_to_rho.nc'
dst_varname = tracer
dimensions = ('ocean_time', 's_rho', 'eta_rho', 'xi_rho')
long_name = longname
field = tracer + ', scalar, series'
units = units
if ndim == 3:
# build intermediate zgrid
zlevel = -z[::-1]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in file
print 'Creating variable', dst_varname
nc.createVariable(dst_varname, 'f8', dimensions, fill_value=spval2)
nc.variables[dst_varname].long_name = long_name
nc.variables[dst_varname].units = units
nc.variables[dst_varname].field = field
#nc.variables[dst_varname_north]._FillValue = spval
# remapping
print 'remapping', dst_varname, 'from', src_grd.name, \
'to', dst_grd.name
if ndim == 3:
# flood the grid
print 'flood the grid'
src_varz = pyroms_toolbox.BGrid_GFDL.flood(src_var, src_grd, Bpos=Bpos, spval=spval, \
dmax=dmax, cdepth=cdepth, kk=kk)
else:
src_varz = src_var
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_varz = pyroms.remapping.remap(src_varz, wts_file, spval=spval)
if ndim == 3:
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_var = pyroms.remapping.z2roms(dst_varz[::-1,:,:], dst_grdz, \
dst_grd, Cpos=Cpos, spval=spval, flood=False)
else:
dst_var = dst_varz
if ndim == 3:
for kz in np.arange(dst_grd.vgrid.N):
tmp = dst_var[kz,:,:].copy()
tmp[np.where(dst_grd.hgrid.mask_rho == 0)] = spval2
dst_var[kz,:,:] = tmp.copy()
# write data in destination file
print 'write data in destination file\n'
nc.variables['ocean_time'][0] = time
nc.variables[dst_varname][0] = dst_var
# close file
nc.close()
cdf.close()
if src_varname == 'eta':
return dst_varz
|
kshedstrom/pyroms
|
examples/cobalt-preproc/Initial_bio/remap_bio_glodap.py
|
Python
|
bsd-3-clause
| 4,385
|
[
"NetCDF"
] |
0f4daa6e85c51a2006e255ae7436c6c8b10e3bc695686da9a07829de44eecae7
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from cinder import exception
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(CinderTask, self).__init__(_make_task_name(self.__class__,
addons),
**kwargs)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger)
def _format_failure(self, fail):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(DynamicLogListener, self)._format_failure(fail)
|
julianwang/cinder
|
cinder/flow_utils.py
|
Python
|
apache-2.0
| 2,961
|
[
"VisIt"
] |
f11d3000ad593398460a0d707afcb7dd94bbc34227892d5ea2fa6f6415ceb899
|
import os
import sys
import time
import config
import numpy as np
from scipy import integrate
from scipy import special
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline
import pylab as pl
from numba import jit
import timeit
#import fastcorr
from CosmologyFunctions import CosmologyFunctions
from mass_function import halo_bias_st, bias_mass_func_tinker, bias_mass_func_bocquet
from convert_NFW_RadMass import MfracToMvir, MvirToMRfrac, MfracToMfrac, MvirTomMRfrac, MfracTomMFrac, dlnMdensitydlnMcritOR200, HuKravtsov
from pressure_profiles import battaglia_profile_2d
from lensing_efficiency import Wkcom
__author__ = ("Vinu Vikraman <[email protected]>")
@jit(nopython=True)
def Wk_just_calling_from_lensing_efficiencyc(zl, chil, zsarr, chisarr, Ns, constk):
#zl = lens redshift
#chil = comoving distant to lens
#zsarr = redshift distribution of source
#angsarr = angular diameter distance
#Ns = Normalized redshift distribution of sources
al = 1. / (1. + zl)
Wk = constk * chil / al
gw = 0.0
for i, N in enumerate(Ns):
if chisarr[i] < chil:
continue
gw += ((chisarr[i] - chil) * N / chisarr[i])
gw *= (zsarr[1] - zsarr[0])
if gw <= 0:
gw = 0.
Wk = Wk * gw
return Wk
@jit(nopython=True)
def integrate_kyhalo(ell, zarr, chiarr, dVdzdOm, marr, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, zsarr, chisarr, Ns, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir):
'''
Eq. 3.1 Ma et al.
'''
cl1h = 0.0
cl2h = 0.0
jj = 0
for i, zi in enumerate(zarr):
zp = 1. + zi
#print zi, Wkcom(zi, chiarr[i], zsarr, angsarr, Ns, constk)
kl_yl_multi = Wkcom(zi, chiarr[i], zsarr, chisarr, Ns, constk) * consty / chiarr[i] / chiarr[i] / rhobarr[i]
mint = 0.0
mk2 = 0.0
my2 = 0.0
#for mi in marr:
for j in range(config.mspace):
mi = marr[jj]
kint = 0.0
yint = 0.0
if input_mvir:
Mvir, Rvir, M200, R200, rho_s, Rs = MvirToMRfrac(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
else:
Mvir, Rvir, M200, R200, rho_s, Rs = MfracToMvir(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
#Eq. 3.2 Ma et al
rp = np.linspace(0, config.kRmax*Rvir, config.kRspace)
for tr in rp:
if tr == 0:
continue
kint += (tr * tr * np.sin(ell * tr / chiarr[i]) / (ell * tr / chiarr[i]) * rho_s / (tr/Rs) / (1. + tr/Rs)**2.)
kint *= (4. * np.pi * (rp[1] - rp[0]))
#Eq. 3.3 Ma et al
xmax = config.yRmax * Rvir / Rs #Ma et al paper says that Eq. 3.3 convergence by r=5 rvir.
xp = np.linspace(0, xmax, config.yRspace)
ells = chiarr[i] / zp / Rs
for x in xp:
if x == 0:
continue
yint += (x * x * np.sin(ell * x / ells) / (ell * x / ells) * battaglia_profile_2d(x, 0., Rs, M200, R200, zi, rho_crit_arr[i], omega_b0, omega_m0, cosmo_h))
yint *= (4 * np.pi * Rs * (xp[1] - xp[0]) / ells / ells)
mint += (dlnm * mf[jj] * kint * yint)
mk2 += (dlnm * bias[jj] * mf[jj] * kint)
my2 += (dlnm * bias[jj] * mf[jj] * yint)
jj += 1
cl1h += (dVdzdOm[i] * kl_yl_multi * mint)
cl2h += (dVdzdOm[i] * pk[i] * Darr[i] * Darr[i] * kl_yl_multi * mk2 * my2)
cl1h *= dz
cl2h *= dz
cl = cl1h + cl2h
return cl1h, cl2h, cl
@jit(nopython=True)
def integrate_kkhalo(ell, zarr, chiarr, dVdzdOm, marr, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, zsarr, chisarr, Ns, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir):
'''
Eq. 3.1 Ma et al.
'''
cl1h = 0.0
cl2h = 0.0
jj = 0
for i, zi in enumerate(zarr):
zp = 1. + zi
#print zi, Wkcom(zi, chiarr[i], zsarr, angsarr, Ns, constk)
kl_multi = Wkcom(zi, chiarr[i], zsarr, chisarr, Ns, constk) / chiarr[i] / chiarr[i] / rhobarr[i]
mint = 0.0
mk2 = 0.0
#for mi in marr:
for j in range(config.mspace):
mi = marr[jj]
kint = 0.0
if input_mvir:
Mvir, Rvir, M200, R200, rho_s, Rs = MvirToMRfrac(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
else:
Mvir, Rvir, M200, R200, rho_s, Rs = MfracToMvir(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
#Eq. 3.2 Ma et al
#limit_kk_Rvir.py tests the limit of Rvir.
rp = np.linspace(0, config.kRmax * Rvir, config.kRspace)
for tr in rp:
if tr == 0:
continue
kint += (tr * tr * np.sin(ell * tr / chiarr[i]) / (ell * tr / chiarr[i]) * rho_s / (tr/Rs) / (1. + tr/Rs)**2.)
kint *= (4. * np.pi * (rp[1] - rp[0]))
mint += (dlnm * mf[jj] * kint * kint)
mk2 += (dlnm * bias[jj] * mf[jj] * kint)
jj += 1
cl1h += (dVdzdOm[i] * kl_multi * kl_multi * mint)
cl2h += (dVdzdOm[i] * pk[i] * Darr[i] * Darr[i] * kl_multi * kl_multi * mk2 * mk2)
cl1h *= dz
cl2h *= dz
cl = cl1h + cl2h
return cl1h, cl2h, cl
@jit(nopython=True)
def integrate_yyhalo(ell, zarr, chiarr, dVdzdOm, marr, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir):
'''
Eq. 3.1 Ma et al.
'''
cl1h = 0.0
cl2h = 0.0
jj = 0
for i, zi in enumerate(zarr[:]):
zp = 1. + zi
mint = 0.0
my2 = 0.0
#for j, mi in enumerate(marr[:]):
for j in range(config.mspace):
mi = marr[jj]
if input_mvir:
Mvir, Rvir, M200, R200, rho_s, Rs = MvirToMRfrac(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
else:
Mvir, Rvir, M200, R200, rho_s, Rs = MfracToMvir(mi, zi, BDarr[i], rho_crit_arr[i], cosmo_h, frac=200.0)
xmax = config.yRmax * Rvir / Rs
ells = chiarr[i] / zp / Rs
xarr = np.linspace(1e-5, xmax, config.yRspace)
yint = 0.
for x in xarr:
if x == 0:
continue
yint += (x * x * np.sin(ell * x / ells) / (ell * x / ells) * battaglia_profile_2d(x, 0., Rs, M200, R200, zi, rho_crit_arr[i], omega_b0, omega_m0, cosmo_h))
yint *= (4 * np.pi * Rs * (xarr[1] - xarr[0]) / ells / ells)
mint += (dlnm * mf[jj] * yint * yint)
my2 += (dlnm * bias[jj] * mf[jj] * yint)
jj += 1
cl1h += (dVdzdOm[i] * consty * consty * mint)
cl2h += (dVdzdOm[i] * pk[i] * Darr[i] * Darr[i] * consty * consty * my2 * my2)
cl1h *= dz
cl2h *= dz
cl = cl1h + cl2h
return cl1h, cl2h, cl
def cl_WL_tSZ(fwhm_k, fwhm_y, kk, yy, ky, zsfile, odir='../data'):
'''
Compute WL X tSZ halomodel for a given source redshift distribution
'''
if ky:
sigma_k = fwhm_k * np.pi / 2.355 / 60. /180. #angle in radian
sigma_y = fwhm_y * np.pi / 2.355 / 60. /180. #angle in radian
sigmasq = sigma_k * sigma_y
elif kk:
sigma_k = fwhm_k * np.pi / 2.355 / 60. /180. #angle in radian
sigmasq = sigma_k * sigma_k
elif yy:
sigma_y = fwhm_y * np.pi / 2.355 / 60. /180. #angle in radian
sigmasq = sigma_y * sigma_y
else:
raise ValueError('Either kk, yy or ky should be True')
cosmo0 = CosmologyFunctions(0)
omega_b0 = cosmo0._omega_b0
omega_m0 = cosmo0._omega_m0
cosmo_h = cosmo0._h
light_speed = config.light_speed #km/s
mpctocm = config.mpctocm
kB_kev_K = config.kB_kev_K
sigma_t_cm = config.sigma_t_cm #cm^2
rest_electron_kev = config.rest_electron_kev #keV
constk = 3. * omega_m0 * (cosmo_h * 100. / light_speed)**2. / 2. #Mpc^-2
consty = mpctocm * sigma_t_cm / rest_electron_kev
zsarr, Ns = np.genfromtxt(zsfile, unpack=True)
if np.isscalar(zsarr):
zsarr = np.array([zsarr])
Ns = np.array([Ns])
else:
zint = np.sum(Ns) * (zsarr[1] - zsarr[0])
Ns /= zint
kmin = config.kmin #1/Mpc
kmax = config.kmax
kspace = config.kspace
mmin = config.mmin
mmax = config.mmax
mspace = config.mspace
zmin = config.zmin
zmax = config.zmax
zspace = config.zspace
dlnk = np.log(kmax/kmin) / kspace
lnkarr = np.linspace(np.log(kmin), np.log(kmax), kspace)
karr = np.exp(lnkarr).astype(np.float64)
#No little h
#Input Mpc/h to power spectra and get Mpc^3/h^3
pk_arr = np.array([cosmo0.linear_power(k/cosmo0._h) for k in karr]).astype(np.float64) / cosmo0._h / cosmo0._h / cosmo0._h
pkspl = InterpolatedUnivariateSpline(karr/cosmo0._h, pk_arr, k=2)
#pl.loglog(karr, pk_arr)
#pl.show()
dlnm = np.log(mmax/mmin) / mspace
lnmarr = np.linspace(np.log(mmin), np.log(mmax), mspace)
marr = np.exp(lnmarr).astype(np.float64)
zarr = np.linspace(zmin, zmax, zspace)
dz = (zmax-zmin) / zspace
print 'dlnk, dlnm dz', dlnk, dlnm, dz
#No little h
#Need to give mass * h and get the sigma without little h
#The following lines are used only used for ST MF and ST bias
sigma_m0 = np.array([cosmo0.sigma_m(m * cosmo0._h) for m in marr])
rho_norm0 = cosmo0.rho_bar()
lnMassSigmaSpl = InterpolatedUnivariateSpline(lnmarr, sigma_m0, k=3)
hzarr, BDarr, rhobarr, chiarr, dVdzdOm, rho_crit_arr = [], [], [], [], [], []
bias, Darr = [], []
marr2, mf, dlnmdlnm = [], [], []
if config.MF =='Tinker' and config.MassToIntegrate == 'm200m':
#Mass using critical density (ie. m200c)
tm200c = np.logspace(np.log10(1e8), np.log10(1e17), 50)
#m200m mass using mean mass density
tmarr = np.exp(lnmarr).astype(np.float64)
#tf = np.genfromtxt('../data/z_m_relation.dat')
#tz = tf[:,0]
#tmv = tf[:,1]
#tm200c = tf[:,2]
#tm200m = tf[:,3]
for i, zi in enumerate(zarr):
cosmo = CosmologyFunctions(zi)
rcrit = cosmo.rho_crit() * cosmo._h * cosmo._h
rbar = cosmo.rho_bar() * cosmo._h * cosmo._h
bn = cosmo.BryanDelta()
BDarr.append(bn) #OK
rho_crit_arr.append(rcrit) #OK
rhobarr.append(rbar)
chiarr.append(cosmo.comoving_distance() / cosmo._h)
hzarr.append(cosmo.E0(zi))
#Number of Msun objects/Mpc^3 (i.e. unit is 1/Mpc^3)
if config.MF =='Tinker':
if config.MassToIntegrate == 'virial':
if bn/cosmo.omega_m() > 200:
mFrac = marr * cosmo_h
#print bn, cosmo.omega_m(), bn/cosmo.omega_m()
mf.append(bias_mass_func_tinker(zi, mFrac.min(), mFrac.max(), mspace, bias=False, Delta=bn/cosmo.omega_m(), marr=mFrac, reduced=False)[1])
marr2.append(marr)
dlnmdlnm.append(np.ones_like(marr))
else:
mFrac = np.array([HuKravtsov(zi, mv, rcrit, rbar, bn, config.MassDef*cosmo.omega_m(), cosmo_h, 1)[2] for mv in marr]) * cosmo_h
mf.append(bias_mass_func_tinker(zi, mFrac.min(), mFrac.max(), mspace, bias=False, Delta=config.MassDef, marr=mFrac, reduced=False)[1])
marr2.append(marr)
dlnmdlnm.append([dlnMdensitydlnMcritOR200(config.MassDef * cosmo.omega_m(), bn, mFm/cosmo_h, mv, zi, cosmo_h, 1) for mv,mFm in zip(marr, mFrac)]) #dlnmFrac/dlnMv. In the bias_mass_func_tinker() I have computed dn/dlnM where M is in the unit of Msol. I have therefore include h in that mass function. Therefore, I just need to multiply dlnmFrac/dlnMv only
#print dlnmdlnm
#print a
input_mvir = 1
elif config.MassToIntegrate == 'm200c':
#XXX
#m200m = np.array([HuKravtsov(zi, mv, rcrit, rbar, 200, 200*cosmo.omega_m(), cosmo_h, 0)[2] for mv in marr]) #* cosmo_h
#print m200m
#XXX
if 200./cosmo.omega_m() > 200:
mFrac = marr * cosmo_h
#print 200, cosmo.omega_m(), 200/cosmo.omega_m()
mf.append(bias_mass_func_tinker(zi, mFrac.min(), mFrac.max(), mspace, bias=False, Delta=200./cosmo.omega_m(), marr=mFrac, reduced=False)[1])
marr2.append(marr)
dlnmdlnm.append(np.ones_like(marr))
else:
mFrac = np.array([HuKravtsov(zi, m2c, rcrit, rbar, 200, config.MassDef*cosmo.omega_m(), cosmo_h, 0)[2] for m2c in marr]) * cosmo_h
mf.append(bias_mass_func_tinker(zi, mFrac.min(), mFrac.max(), mspace, bias=False, Delta=config.MassDef, marr=mFrac)[1])
marr2.append(marr)
for m2,mFm in zip(marr, mFrac):
dlnmdlnm.append(dlnMdensitydlnMcritOR200(config.MassDef * cosmo.omega_m(), 200., mFm/cosmo_h, m2, zi, cosmo_h, 0)) #dlnM200m/dlnMv. In the bias_mass_func_tinker() I have computed dn/dlnM where M is in the unit of Msol. I have therefore include h in that mass function. Therefore, I just need to multiply dlnM200m/dlnMv only
input_mvir = 0
elif config.MassToIntegrate == 'm200m':
#raise ValueError('Use MassToIntegrate=virial/m200c. m200m is not working')
#Temporary mass array of m200m from m200c
tm200m = np.array([HuKravtsov(zi, tt, rcrit, rbar, 200, 200.*cosmo.omega_m(), cosmo._h, 0)[2] for tt in tm200c])
#m200m vs m200c spline
tmspl = InterpolatedUnivariateSpline(tm200m, tm200c)
#Now m200c from m200m, i.e. tmarr which is the integrating
#variable
m200c = tmspl(tmarr)
#m200m Msol/h
m200m = tmarr * cosmo_h
marr2.append(m200c)
mf.append(bias_mass_func_tinker(zi, m200m.min(), m200m.max(), mspace, bias=False, Delta=200, marr=m200m)[1])
input_mvir = 0
elif config.MF == 'Bocquet':
if config.MassToIntegrate == 'virial':
m200 = np.array([HuKravtsov(zi, mv, rcrit, rcrit, bn, 200, cosmo_h, 1)[2] for mv in marr])
mf.append(bias_mass_func_bocquet(zi, m200.min(), m200.max(), mspace, bias=False, marr=m200)[1])
marr2.append(marr)
for mv,m2 in zip(marr, m200):
dlnmdlnm.append(dlnMdensitydlnMcritOR200(200., bn, m2, mv, zi, cosmo_h, 1))
input_mvir = 1
elif config.MassToIntegrate == 'm200c':
tmf = bias_mass_func_bocquet(zi, marr.min(), marr.max(), mspace, bias=False, marr=marr)[1]
mf.append(tmf)
dlnmdlnm.append(np.ones(len(tmf)))
input_mvir = 0
elif config.MF == 'ST':
raise ValueError('MF should be Tinker or Bocquet')
#Bias is calculated by assuming that the mass is virial. I need to change that
bias.append(np.array([halo_bias_st(cosmo.delta_c() * cosmo.delta_c() / cosmo._growth / cosmo._growth / lnMassSigmaSpl(np.log(m)) / lnMassSigmaSpl(np.log(m))) for m in marr]))
dVdzdOm.append(cosmo.E(zi) / cosmo._h) #Mpc/h, It should have (km/s/Mpc)^-1 but in the cosmology code the speed of light is removed
Darr.append(cosmo._growth)
if config.MF =='Tinker' and config.MassToIntegrate == 'm200m':
mf = np.array(mf).flatten()
else:
mf = np.array(mf).flatten() * np.array(dlnmdlnm).flatten()
hzarr = np.array(hzarr)
BDarr = np.array(BDarr)
rhobarr = np.array(rhobarr)
chiarr = np.array(chiarr)
dVdzdOm = np.array(dVdzdOm) * chiarr * chiarr
rho_crit_arr = np.array(rho_crit_arr)
marr2 = np.array(marr2).flatten()
zchispl = InterpolatedUnivariateSpline(zarr, chiarr, k=2)
chisarr = zchispl(zsarr)
bias = np.array(bias).flatten()
Darr = np.array(Darr)
#ellarr = np.linspace(1, 10001, 10)
ellarr = np.logspace(np.log10(config.ellmin), np.log10(config.ellmax), config.ellspace)
cl_arr, cl1h_arr, cl2h_arr = [], [], []
for ell in ellarr:
pk = pkspl(ell/chiarr)
if ky:
cl1h, cl2h, cl = integrate_kyhalo(ell, zarr, chiarr, dVdzdOm, marr2, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, zsarr, chisarr, Ns, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir)
if kk:
cl1h, cl2h, cl = integrate_kkhalo(ell, zarr, chiarr, dVdzdOm, marr2, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, zsarr, chisarr, Ns, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir)
if yy:
cl1h, cl2h, cl = integrate_yyhalo(ell, zarr, chiarr, dVdzdOm, marr2, mf, BDarr, rhobarr, rho_crit_arr, bias, Darr, pk, dz, dlnm, omega_b0, omega_m0, cosmo_h, constk, consty, input_mvir)
cl_arr.append(cl)
cl1h_arr.append(cl1h)
cl2h_arr.append(cl2h)
print ell, cl1h, cl2h, cl
convolve = np.exp(-1 * sigmasq * ellarr * ellarr)# i.e. the output is Cl by convolving by exp(-sigma^2 l^2)
cl = np.array(cl_arr) * convolve
cl1h = np.array(cl1h_arr) * convolve
cl2h = np.array(cl2h_arr) * convolve
if config.savefile:
if ky:
np.savetxt(os.path.join(odir, 'cl_ky_z.dat'), np.transpose((ellarr, cl1h, cl2h, cl)), fmt='%.2f %.3e %.3e %.3e', header='l Cl1h Cl2h Cl')
if kk:
np.savetxt(os.path.join(odir, 'cl_kk_z.dat'), np.transpose((ellarr, cl1h, cl2h, cl)), fmt='%.2f %.3e %.3e %.3e', header='l Cl1h Cl2h Cl')
if yy:
np.savetxt(os.path.join(odir, 'cl_yy_z.dat'), np.transpose((ellarr, cl1h, cl2h, cl)), fmt='%.2f %.3e %.3e %.3e', header='l Cl1h Cl2h Cl')
return ellarr, cl1h, cl2h, cl
if __name__=='__main__':
fwhm_k = 0.0
fwhm_y = 0.0
kk = 1
yy = 0
ky = 0
zsfile = 'source_distribution.txt'
zsfile = 'source_distribution_zs_1.txt'
ellarr, cl1h, cl2h, cl = cl_WL_tSZ(fwhm_k, fwhm_y, kk, yy, ky, zsfile, odir='../data')
if yy:
bl, bcl = np.genfromtxt('../data/battaglia_analytical.csv', delimiter=',', unpack=True)
pl.plot(bl, bcl, label='Battaglia')
#Convert y to \delta_T using 150 GHz. (g(x) TCMB)^2 = 6.7354
cl *= 6.7354
cl1h *= 6.7354
cl2h *= 6.7354
pl.plot(ellarr, 1e12 * ellarr * (ellarr+1) * cl / 2. / np.pi, label='Cl')
pl.plot(ellarr, 1e12 * ellarr * (ellarr+1) * cl1h / 2. / np.pi, label='Cl1h')
pl.plot(ellarr, 1e12 * ellarr * (ellarr+1) * cl2h / 2. / np.pi, label='Cl2h')
pl.xlabel(r'$\ell$')
pl.ylabel(r'$C_\ell \ell (\ell + 1)/2/\pi \mu K^2$')
pl.legend(loc=0)
else:
pl.plot(ellarr, ellarr * (ellarr+1) * cl / 2. / np.pi, label='Cl')
pl.plot(ellarr, ellarr * (ellarr+1) * cl1h / 2. / np.pi, label='Cl1h')
pl.plot(ellarr, ellarr * (ellarr+1) * cl2h / 2. / np.pi, label='Cl2h')
pl.xlabel(r'$\ell$')
pl.ylabel(r'$C_\ell \ell (\ell + 1)/2/\pi$')
pl.legend(loc=0)
pl.show()
|
vvinuv/HaloModel
|
halowlsz/halomodel_cl_WL_tSZ_z.py
|
Python
|
gpl-3.0
| 19,200
|
[
"TINKER"
] |
ac227e509bd03084e25ea17699dd8ee0d89c62f522e2bdde3067897ea7c90caf
|
#!/usr/bin/env python2
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import argparse
import multiprocessing
import os, os.path
import sys
import time
relative_path = os.path.dirname(os.path.realpath(__file__))
# tweak PYTHONPATH
sys.path.insert(0, os.path.join(relative_path, "client"))
sys.path.insert(0, os.path.join(relative_path, "server"))
sys.path.insert(0, os.path.join(relative_path, "server", "data"))
sys.path.insert(0, os.path.join(relative_path, "client-ai"))
sys.path.insert(0, os.path.join(relative_path, "server","lib"))
parser = argparse.ArgumentParser()
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument("--configdir", dest = "configDir",
metavar = "DIRECTORY",
default = os.path.join(os.path.expanduser("~"), ".outerspace"),
help = "Override default configuration directory",
)
common_parser.add_argument("--server", dest = "server",
metavar = "HOSTNAME:PORT",
default = "pichu.dahaic.net:9080",
help = "Outer Space server location"
)
common_parser.add_argument("--local", dest = "local",
action = "store_true",
default = False,
help = "Setting on local mode (connections are made to localhost)"
)
common_parser.add_argument("--profile", dest = "profile",
default=False,
help = "Run with profiling enabled"
)
subparsers = parser.add_subparsers(help='Subcommands: client (default), server, ai, ai-pool')
parser_client = subparsers.add_parser('client', help='Game client of Outer Space', parents=[common_parser])
parser_server = subparsers.add_parser('server', help='Dedicated server', parents=[common_parser])
parser_ai = subparsers.add_parser('ai', help='Run one AI worker', parents=[common_parser])
parser_ai_pool = subparsers.add_parser('ai-pool', help='Batch run of AI players defined in configuration', parents=[common_parser])
# unfortunately, argparser does not support default subcommand (maybe it is
# messy approach? :( ) so we push 'client' when default should apply
if len(sys.argv) == 1 or sys.argv[1] not in ['client', 'server', 'ai', 'ai-pool', '--help', '-h']:
sys.argv = [sys.argv[0]] + ['client'] + sys.argv[1:]
subcommand = sys.argv[1]
# common stuff
# client
parser_client.add_argument("--configfilename", dest = "configFilename",
metavar = "FILENAME",
default = "osci.ini",
help = "Override default configuration file name",
)
parser_client.add_argument("--login", dest = "login",
metavar = "HOSTNAME:PORT",
default = None,
help = "Account login"
)
parser_client.add_argument("--password", dest = "password",
metavar = "HOSTNAME:PORT",
default = None,
help = "Account password"
)
parser_client.add_argument("--heartbeat", dest = "heartbeat",
type = int,
metavar = "SECONDS",
default = 60,
help = "Heartbeat for server connection"
)
# server
parser_server.add_argument("--configfilename", dest = "configFilename",
metavar = "FILENAME",
default = "osci.ini",
help = "Override default configuration file name",
)
parser_server.add_argument("--restore", dest = "restore",
metavar = "STRING",
default = None,
help = "Restore from backup files beginning with STRING",
)
parser_server.add_argument("--reset", dest = "reset",
action = "store_true", default=False,
help = "Server resets itself before starting up"
)
parser_server.add_argument("--upgrade", dest = "upgrade",
action = "store_true", default=False,
help = "Server will undergo upgrade routine"
)
parser_server.add_argument("--mode", dest = "mode",
type=int,
metavar = "MODE",
default=1,
help = "Server mode: 0 - debug, 1 - normal",
)
# ai
parser_ai.add_argument("--login", dest = "login",
metavar = "LOGIN",
default = None,
help = "Login name of the AI player.",
)
parser_ai.add_argument("--password", dest = "password",
metavar = "PASSWORD",
default = None,
help = "Corresponding password of the AI player.",
)
parser_ai.add_argument("--ai", dest = "ai",
metavar = "AI",
default = None,
help = "Type of the AI applied."
)
parser_ai.add_argument("--game", dest = "game",
metavar = "NAME",
default = 'Alpha',
help = "Name of game to which the AI belongs",
)
parser_ai.add_argument("--test-connection", dest = "test",
action = "store_true", default=False,
help = argparse.SUPPRESS
)
parser_ai.add_argument("--galaxy", dest = "galaxies",
metavar = "NAME",
action = "append",
default = [],
help = "Name of galaxy to enable AI for, no argument means all galaxies"
)
# ai-pool
parser_ai_pool.add_argument("--procs", dest = "procs",
metavar = "PROCS",
default = multiprocessing.cpu_count() * 4,
type=int,
help = "Maximum number of concurrent processes, default is 4 times cpu count."
)
parser_ai_pool.add_argument("--galaxy", dest = "galaxies",
metavar = "NAME",
action = "append",
default = [],
help = "Name of galaxy to enable AI for, no argument means all galaxies"
)
parser_ai_pool.add_argument("--game", dest = "game",
metavar = "NAME",
default = "Alpha",
help = "Name of the game for which the AIs should be run.",
)
options = parser.parse_args()
options.configDir = os.path.expanduser(options.configDir)
if options.local:
# we will set localhost as a connections
options.server = 'localhost:9080'
if subcommand == 'server':
from main_server import runServer
task = runServer
elif subcommand == 'ai':
from main_ai import runAIClient
task = runAIClient
elif subcommand == 'ai-pool':
from main_ai_pool import runAIPool
task = runAIPool
# basically default (as we force it in case of nonexistent subcommand
elif subcommand == 'client':
from main_client import runClient
task = runClient
if __name__ == '__main__':
if not os.path.exists(options.configDir):
os.makedirs(options.configDir)
# first, we have to initialize Rules (to provide it with configDir)
import ige.ospace.Rules as Rules
Rules.init(options.configDir)
if options.profile:
import cProfile
profiling_output = '{0}.raw'.format(options.profile)
cProfile.run('task(options)', profiling_output )
import pstats
with open(options.profile, 'w') as pro_file:
stats = pstats.Stats(profiling_output, stream=pro_file)
stats.strip_dirs()
stats.sort_stats('time')
stats.print_stats()
else:
task(options)
exit()
|
dahaic/outerspace
|
outerspace.py
|
Python
|
gpl-2.0
| 7,215
|
[
"Galaxy"
] |
15bd8ae88630af0c43e002e048e4e8014742851553b49ada403aee1831e726c7
|
from __future__ import print_function
#
# Copied from VTK/Common/Testing/Python/PythonSmoke.py
#
import qt
try:
import vtk
except:
print("Cannot import vtk")
qt.QApplication.exit(1)
try:
print(dir(vtk))
except:
print("Cannot print dir(vtk)")
qt.QApplication.exit(1)
try:
try:
try:
o = vtk.vtkLineWidget()
print("Using Hybrid")
except:
o = vtk.vtkActor()
print("Using Rendering")
except:
o = vtk.vtkObject()
print("Using Common")
except:
print("Cannot create vtkObject")
qt.QApplication.exit(1)
try:
print(o)
print("Reference count: %d" % o.GetReferenceCount())
print("Class name: %s" % o.GetClassName())
except:
print("Cannot print object")
qt.QApplication.exit(1)
try:
b = vtk.vtkObject()
d = b.SafeDownCast(o)
print((b, d))
except:
print("Cannot downcast")
qt.QApplication.exit(1)
qt.QApplication.exit(0)
|
SINTEFMedtek/CTK
|
Applications/ctkSimplePythonShell/Testing/Python/vtkPythonSmoke.py
|
Python
|
apache-2.0
| 896
|
[
"VTK"
] |
1f50091a0c3b27b1cadfac3d5ed7a0f1466abd0e85aa01342e6cd05cfefe997f
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from json import dumps, loads
from copy import deepcopy
import inspect
import warnings
import networkx as nx
from qiita_core.qiita_settings import qiita_config
import qiita_db as qdb
from configparser import ConfigParser
class Command(qdb.base.QiitaObject):
r"""An executable command available in the system
Attributes
----------
active
post_processing_cmd
analysis_only
default_parameter_sets
description
merging_scheme
name
naming_order
optional_parameters
outputs
parameters
required_parameters
software
name
description
cli
parameters_table
Methods
-------
_check_id
activate
Class Methods
-------------
create
exists
get_commands_by_input_type(cls, artifact_types, active_only=True,
get_html_generator(cls, artifact_type):
get_validator(cls, artifact_type):
See Also
--------
qiita_db.software.Software
"""
_table = "software_command"
@classmethod
def get_commands_by_input_type(cls, artifact_types, active_only=True,
exclude_analysis=True):
"""Returns the commands that can process the given artifact types
Parameters
----------
artifact_type : list of str
The artifact types
active_only : bool, optional
If True, return only active commands, otherwise return all commands
Default: True
exclude_analysis : bool, optional
If True, return commands that are not part of the analysis pipeline
Returns
-------
generator of qiita_db.software.Command
The commands that can process the given artifact tyoes
"""
with qdb.sql_connection.TRN:
sql = """SELECT DISTINCT command_id
FROM qiita.command_parameter
JOIN qiita.parameter_artifact_type
USING (command_parameter_id)
JOIN qiita.artifact_type USING (artifact_type_id)
JOIN qiita.software_command USING (command_id)
WHERE artifact_type IN %s"""
if active_only:
sql += " AND active = True"
if exclude_analysis:
sql += " AND is_analysis = False"
qdb.sql_connection.TRN.add(sql, [tuple(artifact_types)])
for c_id in qdb.sql_connection.TRN.execute_fetchflatten():
yield cls(c_id)
@classmethod
def get_html_generator(cls, artifact_type):
"""Returns the command that generete the HTML for the given artifact
Parameters
----------
artifact_type : str
The artifact type to search the HTML generator for
Returns
-------
qiita_db.software.Command
The newly created command
Raises
------
qdb.exceptions.QiitaDBError when the generete the HTML command can't
be found
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.software_command
JOIN qiita.software_artifact_type USING (software_id)
JOIN qiita.artifact_type USING (artifact_type_id)
WHERE artifact_type = %s
AND name = 'Generate HTML summary'
AND active = true"""
qdb.sql_connection.TRN.add(sql, [artifact_type])
try:
res = qdb.sql_connection.TRN.execute_fetchlast()
except IndexError:
raise qdb.exceptions.QiitaDBError(
"There is no command to generate the HTML summary for "
"artifact type '%s'" % artifact_type)
return cls(res)
@classmethod
def get_validator(cls, artifact_type):
"""Returns the command that validates the given artifact
Parameters
----------
artifact_type : str
The artifact type to search the Validate for
Returns
-------
qiita_db.software.Command
The newly created command
Raises
------
qdb.exceptions.QiitaDBError when the Validate command can't be found
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.software_command
JOIN qiita.software_artifact_type USING (software_id)
JOIN qiita.artifact_type USING (artifact_type_id)
WHERE artifact_type = %s
AND name = 'Validate'
AND active = true"""
qdb.sql_connection.TRN.add(sql, [artifact_type])
try:
res = qdb.sql_connection.TRN.execute_fetchlast()
except IndexError:
raise qdb.exceptions.QiitaDBError(
"There is no command to generate the Validate for "
"artifact type '%s'" % artifact_type)
return cls(res)
def _check_id(self, id_):
"""Check that the provided ID actually exists in the database
Parameters
----------
id_ : int
The ID to test
Notes
-----
This function overwrites the base function, as the sql layout doesn't
follow the same conventions done in the other classes.
"""
with qdb.sql_connection.TRN:
sql = """SELECT EXISTS(
SELECT *
FROM qiita.software_command
WHERE command_id = %s)"""
qdb.sql_connection.TRN.add(sql, [id_])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def exists(cls, software, name):
"""Checks if the command already exists in the system
Parameters
----------
qiita_db.software.Software
The software to which this command belongs to.
name : str
The name of the command
Returns
-------
bool
Whether the command exists in the system or not
"""
with qdb.sql_connection.TRN:
sql = """SELECT EXISTS(SELECT *
FROM qiita.software_command
WHERE software_id = %s
AND name = %s)"""
qdb.sql_connection.TRN.add(sql, [software.id, name])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def create(cls, software, name, description, parameters, outputs=None,
analysis_only=False):
r"""Creates a new command in the system
The supported types for the parameters are:
- string: the parameter is a free text input
- integer: the parameter is an integer
- float: the parameter is a float
- artifact: the parameter is an artifact instance, the artifact id
will be stored
- reference: the parameter is a reference instance, the reference
id will be stored
- choice: the format of this should be `choice:<json-dump-of-list>`
in which json-dump-of-list is the JSON dump of a list containing
the acceptable values
Parameters
----------
software : qiita_db.software.Software
The software to which this command belongs to.
name : str
The name of the command
description : str
The description of the command
parameters : dict
The description of the parameters that this command received. The
format is: {parameter_name: (parameter_type, default, name_order,
check_biom_merge, qiita_optional_parameter (optional))},
where parameter_name, parameter_type and default are strings,
name_order is an optional integer value and check_biom_merge is
an optional boolean value. name_order is used to specify the order
of the parameter when automatically naming the artifacts.
check_biom_merge is used when merging artifacts in the analysis
pipeline. qiita_optional_parameter is an optional bool to "force"
the parameter to be optional
outputs : dict, optional
The description of the outputs that this command generated. The
format is either {output_name: artifact_type} or
{output_name: (artifact_type, check_biom_merge)}
analysis_only : bool, optional
If true, then the command will only be available on the analysis
pipeline. Default: False.
Returns
-------
qiita_db.software.Command
The newly created command
Raises
------
QiitaDBError
- If parameters is empty
- If the parameters dictionary is malformed
- If one of the parameter types is not supported
- If the default value of a choice parameter is not listed in
the available choices
QiitaDBDuplicateError
- If the command already exists
Notes
-----
If the default value for a parameter is NULL, then the parameter will
be required. On the other hand, if it is provided, the parameter will
be optional and the default value will be used when the user doesn't
overwrite it.
"""
# Perform some sanity checks in the parameters dictionary
if not parameters:
raise qdb.exceptions.QiitaDBError(
"Error creating command %s. At least one parameter should "
"be provided." % name)
sql_param_values = []
sql_artifact_params = []
for pname, vals in parameters.items():
qiita_optional_parameter = False
if 'qiita_optional_parameter' in vals:
qiita_optional_parameter = True
vals.remove('qiita_optional_parameter')
lenvals = len(vals)
if lenvals == 2:
ptype, dflt = vals
name_order = None
check_biom_merge = False
elif lenvals == 4:
ptype, dflt, name_order, check_biom_merge = vals
else:
raise qdb.exceptions.QiitaDBError(
"Malformed parameters dictionary, the format should be "
"either {param_name: [parameter_type, default]} or "
"{parameter_name: (parameter_type, default, name_order, "
"check_biom_merge)}. Found: %s for parameter name %s"
% (vals, pname))
# Check that the type is one of the supported types
supported_types = ['string', 'integer', 'float', 'reference',
'boolean', 'prep_template', 'analysis']
if ptype not in supported_types and not ptype.startswith(
('choice', 'mchoice', 'artifact')):
supported_types.extend(['choice', 'mchoice', 'artifact'])
raise qdb.exceptions.QiitaDBError(
"Unsupported parameters type '%s' for parameter %s. "
"Supported types are: %s"
% (ptype, pname, ', '.join(supported_types)))
if ptype.startswith(('choice', 'mchoice')) and dflt is not None:
choices = set(loads(ptype.split(':')[1]))
dflt_val = dflt
if ptype.startswith('choice'):
# In the choice case, the dflt value is a single string,
# create a list with it the string on it to use the
# issuperset call below
dflt_val = [dflt_val]
else:
# jsonize the list to store it in the DB
dflt = dumps(dflt)
if not choices.issuperset(dflt_val):
raise qdb.exceptions.QiitaDBError(
"The default value '%s' for the parameter %s is not "
"listed in the available choices: %s"
% (dflt, pname, ', '.join(choices)))
if ptype.startswith('artifact'):
atypes = loads(ptype.split(':')[1])
sql_artifact_params.append(
[pname, 'artifact', atypes])
else:
# a parameter will be required (not optional) if
# qiita_optional_parameter is false and there is the default
# value (dflt) is None
required = not qiita_optional_parameter and dflt is None
sql_param_values.append([pname, ptype, required, dflt,
name_order, check_biom_merge])
with qdb.sql_connection.TRN:
if cls.exists(software, name):
raise qdb.exceptions.QiitaDBDuplicateError(
"command", "software: %d, name: %s"
% (software.id, name))
# Add the command to the DB
sql = """INSERT INTO qiita.software_command
(name, software_id, description, is_analysis)
VALUES (%s, %s, %s, %s)
RETURNING command_id"""
sql_params = [name, software.id, description, analysis_only]
qdb.sql_connection.TRN.add(sql, sql_params)
c_id = qdb.sql_connection.TRN.execute_fetchlast()
# Add the parameters to the DB
sql = """INSERT INTO qiita.command_parameter
(command_id, parameter_name, parameter_type,
required, default_value, name_order, check_biom_merge)
VALUES (%s, %s, %s, %s, %s, %s, %s)
RETURNING command_parameter_id"""
sql_params = [
[c_id, pname, p_type, reqd, default, no, chm]
for pname, p_type, reqd, default, no, chm in sql_param_values]
qdb.sql_connection.TRN.add(sql, sql_params, many=True)
qdb.sql_connection.TRN.execute()
# Add the artifact parameters
sql_type = """INSERT INTO qiita.parameter_artifact_type
(command_parameter_id, artifact_type_id)
VALUES (%s, %s)"""
supported_types = []
for pname, p_type, atypes in sql_artifact_params:
sql_params = [c_id, pname, p_type, True, None, None, False]
qdb.sql_connection.TRN.add(sql, sql_params)
pid = qdb.sql_connection.TRN.execute_fetchlast()
sql_params = [
[pid, qdb.util.convert_to_id(at, 'artifact_type')]
for at in atypes]
qdb.sql_connection.TRN.add(sql_type, sql_params, many=True)
supported_types.extend([atid for _, atid in sql_params])
# If the software type is 'artifact definition', there are a couple
# of extra steps
if software.type == 'artifact definition':
# If supported types is not empty, link the software with these
# types
if supported_types:
sql = """INSERT INTO qiita.software_artifact_type
(software_id, artifact_type_id)
VALUES (%s, %s)"""
sql_params = [[software.id, atid]
for atid in supported_types]
qdb.sql_connection.TRN.add(sql, sql_params, many=True)
# If this is the validate command, we need to add the
# provenance and name parameters. These are used internally,
# that's why we are adding them here
if name == 'Validate':
sql = """INSERT INTO qiita.command_parameter
(command_id, parameter_name, parameter_type,
required, default_value)
VALUES (%s, 'name', 'string', 'False',
'dflt_name'),
(%s, 'provenance', 'string', 'False', NULL)
"""
qdb.sql_connection.TRN.add(sql, [c_id, c_id])
# Add the outputs to the command
if outputs:
sql_args = []
for pname, at in outputs.items():
if isinstance(at, tuple):
sql_args.append(
[pname, c_id,
qdb.util.convert_to_id(at[0], 'artifact_type'),
at[1]])
else:
sql_args.append(
[pname, c_id,
qdb.util.convert_to_id(at, 'artifact_type'),
False])
sql = """INSERT INTO qiita.command_output
(name, command_id, artifact_type_id,
check_biom_merge)
VALUES (%s, %s, %s, %s)"""
qdb.sql_connection.TRN.add(sql, sql_args, many=True)
qdb.sql_connection.TRN.execute()
return cls(c_id)
@property
def software(self):
"""The software to which this command belongs to
Returns
-------
qiita_db.software.Software
the software to which this command belongs to
"""
with qdb.sql_connection.TRN:
sql = """SELECT software_id
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return Software(qdb.sql_connection.TRN.execute_fetchlast())
@property
def name(self):
"""The name of the command
Returns
-------
str
The name of the command
"""
with qdb.sql_connection.TRN:
sql = """SELECT name
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def post_processing_cmd(self):
"""Additional processing commands required for merging
Returns
-------
str
Returns the additional processing command for merging
"""
with qdb.sql_connection.TRN:
sql = """SELECT post_processing_cmd
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
cmd = qdb.sql_connection.TRN.execute_fetchlast()
if cmd:
# assume correctly formatted json data
# load data into dictionary; don't return JSON
return loads(qdb.sql_connection.TRN.execute_fetchlast())
return None
@property
def description(self):
"""The description of the command
Returns
-------
str
The description of the command
"""
with qdb.sql_connection.TRN:
sql = """SELECT description
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def parameters(self):
"""Returns the parameters that the command accepts
Returns
-------
dict
Dictionary of {parameter_name: [ptype, dflt]}
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_name, parameter_type, default_value
FROM qiita.command_parameter
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()
return {pname: [ptype, dflt] for pname, ptype, dflt in res}
@property
def required_parameters(self):
"""Returns the required parameters that the command accepts
Returns
-------
dict
Dictionary of {parameter_name: ptype}
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_parameter_id, parameter_name,
parameter_type, array_agg(
artifact_type ORDER BY artifact_type) AS
artifact_type
FROM qiita.command_parameter
LEFT JOIN qiita.parameter_artifact_type
USING (command_parameter_id)
LEFT JOIN qiita.artifact_type USING (artifact_type_id)
WHERE command_id = %s AND required = True
GROUP BY command_parameter_id"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()
return {pname: (ptype, atype) for _, pname, ptype, atype in res}
@property
def optional_parameters(self):
"""Returns the optional parameters that the command accepts
Returns
-------
dict
Dictionary of {parameter_name: [ptype, default]}
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_name, parameter_type, default_value
FROM qiita.command_parameter
WHERE command_id = %s AND required = false"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchindex()
# Define a function to load the json storing the default parameters
# if ptype is multiple choice. When I added it to the for loop as
# a one liner if, made the code a bit hard to read
def dflt_fmt(dflt, ptype):
if ptype.startswith('mchoice'):
return loads(dflt)
return dflt
return {pname: [ptype, dflt_fmt(dflt, ptype)]
for pname, ptype, dflt in res}
@property
def default_parameter_sets(self):
"""Returns the list of default parameter sets
Returns
-------
generator
generator of qiita_db.software.DefaultParameters
"""
with qdb.sql_connection.TRN:
sql = """SELECT default_parameter_set_id
FROM qiita.default_parameter_set
WHERE command_id = %s
ORDER BY default_parameter_set_id"""
qdb.sql_connection.TRN.add(sql, [self.id])
res = qdb.sql_connection.TRN.execute_fetchflatten()
for pid in res:
yield DefaultParameters(pid)
@property
def outputs(self):
"""Returns the list of output artifact types
Returns
-------
list of str
The output artifact types
"""
with qdb.sql_connection.TRN:
sql = """SELECT name, artifact_type
FROM qiita.command_output
JOIN qiita.artifact_type USING (artifact_type_id)
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchindex()
@property
def active(self):
"""Returns if the command is active or not
Returns
-------
bool
Whether the command is active or not
Notes
-----
This method differentiates between commands based on analysis_only or
the software type. The commands that are not for analysis (processing)
and are from an artifact definition software will return as active
if they have the same name than a command that is active; this helps
for situations where the processing plugins are updated but some
commands didn't change its version.
"""
with qdb.sql_connection.TRN:
cmd_type = self.software.type
if self.analysis_only or cmd_type == 'artifact definition':
sql = """SELECT active
FROM qiita.software_command
WHERE command_id = %s"""
else:
sql = """SELECT EXISTS (
SELECT active FROM qiita.software_command
WHERE name IN (
SELECT name FROM qiita.software_command
WHERE command_id = %s) AND active = true)"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def activate(self):
"""Activates the command"""
sql = """UPDATE qiita.software_command
SET active = %s
WHERE command_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [True, self.id])
@property
def analysis_only(self):
"""Returns if the command is an analysis-only command
Returns
-------
bool
Whether the command is analysis only or not
"""
with qdb.sql_connection.TRN:
sql = """SELECT is_analysis
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def naming_order(self):
"""The ordered list of parameters to use to name the output artifacts
Returns
-------
list of str
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_name
FROM qiita.command_parameter
WHERE command_id = %s AND name_order IS NOT NULL
ORDER BY name_order"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchflatten()
@property
def merging_scheme(self):
"""The values to check when merging the output result
Returns
-------
dict of {'parameters': [list of str],
'outputs': [list of str]
'ignore_parent_command': bool}
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_name
FROM qiita.command_parameter
WHERE command_id = %s AND check_biom_merge = TRUE
ORDER BY parameter_name"""
qdb.sql_connection.TRN.add(sql, [self.id])
params = qdb.sql_connection.TRN.execute_fetchflatten()
sql = """SELECT name
FROM qiita.command_output
WHERE command_id = %s AND check_biom_merge = TRUE
ORDER BY name"""
qdb.sql_connection.TRN.add(sql, [self.id])
outputs = qdb.sql_connection.TRN.execute_fetchflatten()
sql = """SELECT ignore_parent_command
FROM qiita.software_command
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
ipc = qdb.sql_connection.TRN.execute_fetchlast()
return {'parameters': params,
'outputs': outputs,
'ignore_parent_command': ipc}
@property
def resource_allocation(self):
"""The resource allocation defined in the database for this command
Returns
-------
str
"""
with qdb.sql_connection.TRN:
sql = """SELECT allocation FROM
qiita.processing_job_resource_allocation
WHERE name = %s and
job_type = 'RESOURCE_PARAMS_COMMAND'"""
qdb.sql_connection.TRN.add(sql, [self.name])
result = qdb.sql_connection.TRN.execute_fetchflatten()
# if no matches for both type and name were found, query the
# 'default' value for the type
if not result:
sql = """SELECT allocation FROM
qiita.processing_job_resource_allocation WHERE
name = %s and job_type = 'RESOURCE_PARAMS_COMMAND'"""
qdb.sql_connection.TRN.add(sql, ['default'])
result = qdb.sql_connection.TRN.execute_fetchflatten()
if not result:
raise ValueError("Could not match '%s' to a resource "
"allocation!" % self.name)
return result[0]
class Software(qdb.base.QiitaObject):
r"""A software package available in the system
Attributes
----------
name
version
description
commands
publications
environment_name
start_script
Methods
-------
add_publications
create
See Also
--------
qiita_db.software.Command
"""
_table = "software"
@classmethod
def iter(cls, active=True):
"""Iterates over all active software
Parameters
----------
active : bool, optional
If True will only return active software
Returns
-------
list of qiita_db.software.Software
The software objects
"""
sql = """SELECT software_id
FROM qiita.software {0}
ORDER BY software_id""".format(
'WHERE active = True' if active else '')
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql)
for s_id in qdb.sql_connection.TRN.execute_fetchflatten():
yield cls(s_id)
@classmethod
def deactivate_all(cls):
"""Deactivates all the plugins in the system"""
with qdb.sql_connection.TRN:
sql = "UPDATE qiita.software SET active = False"
qdb.sql_connection.TRN.add(sql)
sql = "UPDATE qiita.software_command SET active = False"
qdb.sql_connection.TRN.add(sql)
qdb.sql_connection.TRN.execute()
@classmethod
def from_file(cls, fp, update=False):
"""Installs/updates a plugin from a plugin configuration file
Parameters
----------
fp : str
Path to the plugin configuration file
update : bool, optional
If true, update the values in the database with the current values
in the config file. Otherwise, use stored values and warn if config
file contents and database contents do not match
Returns
-------
qiita_db.software.Software
The software object for the contents of `fp`
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If the plugin type in the DB and in the config file doesn't match
If the (client_id, client_secret) pair in the DB and in the config
file doesn't match
"""
config = ConfigParser()
with open(fp, newline=None) as conf_file:
config.read_file(conf_file)
name = config.get('main', 'NAME')
version = config.get('main', 'VERSION')
description = config.get('main', 'DESCRIPTION')
env_script = config.get('main', 'ENVIRONMENT_SCRIPT')
start_script = config.get('main', 'START_SCRIPT')
software_type = config.get('main', 'PLUGIN_TYPE')
publications = config.get('main', 'PUBLICATIONS')
publications = loads(publications) if publications else []
client_id = config.get('oauth2', 'CLIENT_ID')
client_secret = config.get('oauth2', 'CLIENT_SECRET')
if cls.exists(name, version):
# This plugin already exists, check that all the values are the
# same and return the existing plugin
with qdb.sql_connection.TRN:
sql = """SELECT software_id
FROM qiita.software
WHERE name = %s AND version = %s"""
qdb.sql_connection.TRN.add(sql, [name, version])
instance = cls(qdb.sql_connection.TRN.execute_fetchlast())
warning_values = []
sql_update = """UPDATE qiita.software
SET {0} = %s
WHERE software_id = %s"""
values = [description, env_script, start_script]
attrs = ['description', 'environment_script', 'start_script']
for value, attr in zip(values, attrs):
if value != instance.__getattribute__(attr):
if update:
qdb.sql_connection.TRN.add(
sql_update.format(attr), [value, instance.id])
else:
warning_values.append(attr)
# Having a different plugin type should be an error,
# independently if the user is trying to update plugins or not
if software_type != instance.type:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'The plugin type of the plugin "%s" version %s does '
'not match the one in the system' % (name, version))
if publications != instance.publications:
if update:
instance.add_publications(publications)
else:
warning_values.append('publications')
if (client_id != instance.client_id or
client_secret != instance.client_secret):
if update:
sql = """INSERT INTO qiita.oauth_identifiers
(client_id, client_secret)
SELECT %s, %s
WHERE NOT EXISTS(SELECT *
FROM qiita.oauth_identifiers
WHERE client_id = %s
AND client_secret = %s)"""
qdb.sql_connection.TRN.add(
sql, [client_id, client_secret,
client_id, client_secret])
sql = """UPDATE qiita.oauth_software
SET client_id = %s
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(
sql, [client_id, instance.id])
else:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
'The (client_id, client_secret) pair of the '
'plugin "%s" version "%s" does not match the one '
'in the system' % (name, version))
if warning_values:
warnings.warn(
'Plugin "%s" version "%s" config file does not match '
'with stored information. Check the config file or '
'run "qiita plugin update" to update the plugin '
'information. Offending values: %s'
% (name, version, ", ".join(sorted(warning_values))),
qdb.exceptions.QiitaDBWarning)
qdb.sql_connection.TRN.execute()
else:
# This is a new plugin, create it
instance = cls.create(
name, version, description, env_script, start_script,
software_type, publications=publications, client_id=client_id,
client_secret=client_secret)
return instance
@classmethod
def exists(cls, name, version):
"""Returns whether the plugin (name, version) already exists
Parameters
----------
name : str
The name of the plugin
version : str
The version of the plugin
"""
with qdb.sql_connection.TRN:
sql = """SELECT EXISTS(
SELECT * FROM qiita.software
WHERE name = %s AND version = %s)"""
qdb.sql_connection.TRN.add(sql, [name, version])
return qdb.sql_connection.TRN.execute_fetchlast()
@classmethod
def create(cls, name, version, description, environment_script,
start_script, software_type, publications=None,
client_id=None, client_secret=None):
r"""Creates a new software in the system
Parameters
----------
name : str
The name of the software
version : str
The version of the software
description : str
The description of the software
environment_script : str
The script used to start the environment in which the plugin runs
start_script : str
The script used to start the plugin
software_type : str
The type of the software
publications : list of (str, str), optional
A list with the (DOI, pubmed_id) of the publications attached to
the software
client_id : str, optional
The client_id of the software. Default: randomly generated
client_secret : str, optional
The client_secret of the software. Default: randomly generated
Raises
------
qiita_db.exceptions.QiitaDBError
If one of client_id or client_secret is provided but not both
"""
with qdb.sql_connection.TRN:
sql = """INSERT INTO qiita.software
(name, version, description, environment_script,
start_script, software_type_id)
VALUES (%s, %s, %s, %s, %s, %s)
RETURNING software_id"""
type_id = qdb.util.convert_to_id(software_type, "software_type")
sql_params = [name, version, description, environment_script,
start_script, type_id]
qdb.sql_connection.TRN.add(sql, sql_params)
s_id = qdb.sql_connection.TRN.execute_fetchlast()
instance = cls(s_id)
if publications:
instance.add_publications(publications)
id_is_none = client_id is None
secret_is_none = client_secret is None
if id_is_none and secret_is_none:
# Both are none, generate new ones
client_id = qdb.util.create_rand_string(50, punct=False)
client_secret = qdb.util.create_rand_string(255, punct=False)
elif id_is_none ^ secret_is_none:
# One has been provided but not the other, raise an error
raise qdb.exceptions.QiitaDBError(
'Plugin "%s" version "%s" cannot be created, please '
'provide both client_id and client_secret or none of them'
% (name, version))
# At this point both client_id and client_secret are defined
sql = """INSERT INTO qiita.oauth_identifiers
(client_id, client_secret)
SELECT %s, %s
WHERE NOT EXISTS(SELECT *
FROM qiita.oauth_identifiers
WHERE client_id = %s
AND client_secret = %s)"""
qdb.sql_connection.TRN.add(
sql, [client_id, client_secret, client_id, client_secret])
sql = """INSERT INTO qiita.oauth_software (software_id, client_id)
VALUES (%s, %s)"""
qdb.sql_connection.TRN.add(sql, [s_id, client_id])
return instance
@classmethod
def from_name_and_version(cls, name, version):
"""Returns the software object with the given name and version
Parameters
----------
name: str
The software name
version : str
The software version
Returns
-------
qiita_db.software.Software
The software with the given name and version
Raises
------
qiita_db.exceptions.QiitaDBUnknownIDError
If no software with the given name and version exists
"""
with qdb.sql_connection.TRN:
sql = """SELECT software_id
FROM qiita.software
WHERE name = %s AND version = %s"""
qdb.sql_connection.TRN.add(sql, [name, version])
res = qdb.sql_connection.TRN.execute_fetchindex()
if not res:
raise qdb.exceptions.QiitaDBUnknownIDError(
"%s %s" % (name, version), cls._table)
return cls(res[0][0])
@property
def name(self):
"""The name of the software
Returns
-------
str
The name of the software
"""
with qdb.sql_connection.TRN:
sql = "SELECT name FROM qiita.software WHERE software_id = %s"
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def version(self):
"""The version of the software
Returns
-------
str
The version of the software
"""
with qdb.sql_connection.TRN:
sql = "SELECT version FROM qiita.software WHERE software_id = %s"
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def description(self):
"""The description of the software
Returns
-------
str
The software description
"""
with qdb.sql_connection.TRN:
sql = """SELECT description
FROM qiita.software
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def commands(self):
"""The list of commands attached to this software
Returns
-------
list of qiita_db.software.Command
The commands attached to this software package
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.software_command
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return [Command(cid)
for cid in qdb.sql_connection.TRN.execute_fetchflatten()]
def get_command(self, cmd_name):
"""Returns the command with the given name in the software
Parameters
----------
cmd_name: str
The command with the given name
Returns
-------
qiita_db.software.Command
The command with the given name in this software
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.software_command
WHERE software_id =%s AND name=%s"""
qdb.sql_connection.TRN.add(sql, [self.id, cmd_name])
res = qdb.sql_connection.TRN.execute_fetchindex()
if not res:
raise qdb.exceptions.QiitaDBUnknownIDError(
cmd_name, "software_command")
return Command(res[0][0])
@property
def publications(self):
"""The publications attached to the software
Returns
-------
list of (str, str)
The list of DOI and pubmed_id attached to the publication
"""
with qdb.sql_connection.TRN:
sql = """SELECT p.doi, p.pubmed_id
FROM qiita.publication p
JOIN qiita.software_publication sp
ON p.doi = sp.publication_doi
WHERE sp.software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchindex()
def add_publications(self, publications):
"""Add publications to the software
Parameters
----------
publications : list of 2-tuples of str
A list with the (DOI, pubmed_id) of the publications to be attached
to the software
Notes
-----
For more information about pubmed id, visit
https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_830.html
"""
with qdb.sql_connection.TRN:
sql = """INSERT INTO qiita.publication (doi, pubmed_id)
SELECT %s, %s
WHERE NOT EXISTS(SELECT *
FROM qiita.publication
WHERE doi = %s)"""
args = [[doi, pid, doi] for doi, pid in publications]
qdb.sql_connection.TRN.add(sql, args, many=True)
sql = """INSERT INTO qiita.software_publication
(software_id, publication_doi)
SELECT %s, %s
WHERE NOT EXISTS(SELECT *
FROM qiita.software_publication
WHERE software_id = %s AND
publication_doi = %s)"""
sql_params = [[self.id, doi, self.id, doi]
for doi, _ in publications]
qdb.sql_connection.TRN.add(sql, sql_params, many=True)
qdb.sql_connection.TRN.execute()
@property
def environment_script(self):
"""The script used to start the plugin environment
Returns
-------
str
The script used to start the environment
"""
with qdb.sql_connection.TRN:
sql = """SELECT environment_script
FROM qiita.software
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def start_script(self):
"""The script used to start the plugin
Returns
-------
str
The plugin's start script
"""
with qdb.sql_connection.TRN:
sql = """SELECT start_script
FROM qiita.software
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def default_workflows(self):
"""Returns the default workflows attached to the current software
Returns
-------
generator of qiita_db.software.DefaultWorkflow
The defaultworkflows attached to the software
"""
with qdb.sql_connection.TRN:
sql = """SELECT default_workflow_id
FROM qiita.default_workflow
WHERE software_id = %s
ORDER BY default_workflow_id"""
qdb.sql_connection.TRN.add(sql, [self.id])
for wf_id in qdb.sql_connection.TRN.execute_fetchflatten():
yield DefaultWorkflow(wf_id)
@property
def type(self):
"""Returns the type of the software
Returns
-------
str
The type of the software
"""
with qdb.sql_connection.TRN:
sql = """SELECT software_type
FROM qiita.software_type
JOIN qiita.software USING (software_type_id)
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def deprecated(self):
"""Returns if the software is deprecated or not
Returns
-------
bool
Whether the software is deprecated or not
"""
with qdb.sql_connection.TRN:
sql = """SELECT deprecated
FROM qiita.software
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@deprecated.setter
def deprecated(self, deprecate):
"""Changes deprecated of the software
Parameters
----------
deprecate : bool
New software deprecate value
"""
sql = """UPDATE qiita.software SET deprecated = %s
WHERE software_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [deprecate, self._id])
@property
def active(self):
"""Returns if the software is active or not
Returns
-------
bool
Whether the software is active or not
"""
with qdb.sql_connection.TRN:
sql = "SELECT active FROM qiita.software WHERE software_id = %s"
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def activate(self):
"""Activates the plugin"""
sql = """UPDATE qiita.software
SET active = %s
WHERE software_id = %s"""
qdb.sql_connection.perform_as_transaction(sql, [True, self.id])
@property
def client_id(self):
"""Returns the client id of the plugin
Returns
-------
str
The client id of the software
"""
with qdb.sql_connection.TRN:
sql = """SELECT client_id
FROM qiita.oauth_software
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def client_secret(self):
"""Returns the client secret of the plugin
Returns
-------
str
The client secrect of the plugin
"""
with qdb.sql_connection.TRN:
sql = """SELECT client_secret
FROM qiita.oauth_software
JOIN qiita.oauth_identifiers USING (client_id)
WHERE software_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
def register_commands(self):
"""Registers the software commands"""
url = "%s%s" % (qiita_config.base_url, qiita_config.portal_dir)
cmd = '%s; %s "%s" "register" "ignored"' % (
self.environment_script, self.start_script, url)
# it can be assumed that any command beginning with 'source'
# is calling 'source', an internal command of 'bash' and hence
# should be executed from bash, instead of sh.
# TODO: confirm that exit_code propagates from bash to sh to
# rv.
if cmd.startswith('source'):
cmd = "bash -c '%s'" % cmd
p_out, p_err, rv = qdb.processing_job._system_call(cmd)
if rv != 0:
s = "cmd: %s\nexit status: %d\n" % (cmd, rv)
s += "stdout: %s\nstderr: %s\n" % (p_out, p_err)
raise ValueError(s)
class DefaultParameters(qdb.base.QiitaObject):
"""Models a default set of parameters of a command
Attributes
----------
name
values
Methods
-------
exists
create
iter
to_str
to_file
See Also
--------
qiita_db.software.Command
"""
_table = 'default_parameter_set'
@classmethod
def exists(cls, command, **kwargs):
r"""Check if a parameter set already exists
Parameters
----------
command : qiita_db.software.Command
The command to which the parameter set belongs to
kwargs : dict of {str: str}
The parameters and their values
Returns
-------
bool
Whether if the parameter set exists in the given command
Raises
------
qiita_db.exceptions.QiitaDBError
- If there are missing parameters for the given command
- If `kwargs` contains parameters not originally defined in the
command
"""
with qdb.sql_connection.TRN:
command_params = set(command.optional_parameters)
user_params = set(kwargs)
missing_in_user = command_params - user_params
extra_in_user = user_params - command_params
if missing_in_user or extra_in_user:
raise qdb.exceptions.QiitaDBError(
"The given set of parameters do not match the ones for "
"the command.\nMissing parameters: %s\n"
"Extra parameters: %s\n"
% (', '.join(missing_in_user), ', '.join(extra_in_user)))
sql = """SELECT parameter_set
FROM qiita.default_parameter_set
WHERE command_id = %s"""
qdb.sql_connection.TRN.add(sql, [command.id])
for p_set in qdb.sql_connection.TRN.execute_fetchflatten():
if p_set == kwargs:
return True
return False
@classmethod
def create(cls, param_set_name, command, **kwargs):
r"""Create a new parameter set for the given command
Parameters
----------
param_set_name: str
The name of the new parameter set
command : qiita_db.software.Command
The command to add the new parameter set
kwargs : dict
The parameters and their values
Returns
-------
qiita_db.software.Parameters
The new parameter set instance
Raises
------
qiita_db.exceptions.QiitaDBError
- If there are missing parameters for the given command
- If there are extra parameters in `kwargs` than for the given
command
qdb.exceptions.QiitaDBDuplicateError
- If the parameter set already exists
"""
with qdb.sql_connection.TRN:
# setting to default values all parameters not in the user_params
cmd_params = command.optional_parameters
missing_in_user = {k: cmd_params[k][1]
for k in (set(cmd_params) - set(kwargs))}
if missing_in_user:
kwargs.update(missing_in_user)
# If the columns in kwargs and command do not match, cls.exists
# will raise the error for us
if cls.exists(command, **kwargs):
raise qdb.exceptions.QiitaDBDuplicateError(
cls._table, "Values: %s" % kwargs)
sql = """INSERT INTO qiita.default_parameter_set
(command_id, parameter_set_name, parameter_set)
VALUES (%s, %s, %s)
RETURNING default_parameter_set_id"""
sql_args = [command.id, param_set_name, dumps(kwargs)]
qdb.sql_connection.TRN.add(sql, sql_args)
return cls(qdb.sql_connection.TRN.execute_fetchlast())
@property
def name(self):
"""The name of the parameter set
Returns
-------
str
The name of the parameter set
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_set_name
FROM qiita.default_parameter_set
WHERE default_parameter_set_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def values(self):
"""The values of the parameter set
Returns
-------
dict of {str: object}
Dictionary with the parameters values keyed by parameter name
"""
with qdb.sql_connection.TRN:
sql = """SELECT parameter_set
FROM qiita.default_parameter_set
WHERE default_parameter_set_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def command(self):
"""The command that this parameter set belongs to
Returns
-------
qiita_db.software.Command
The command that this parameter set belongs to
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.default_parameter_set
WHERE default_parameter_set_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return Command(qdb.sql_connection.TRN.execute_fetchlast())
class Parameters(object):
"""Represents an instance of parameters used to process an artifact
Raises
------
qiita_db.exceptions.QiitaDBOperationNotPermittedError
If trying to instantiate this class directly. In order to instantiate
this class, the classmethods `load` or `from_default_params` should
be used.
"""
def __eq__(self, other):
"""Equality based on the parameter values and the command"""
if type(self) != type(other):
return False
if self.command != other.command:
return False
if self.values != other.values:
return False
return True
@classmethod
def load(cls, command, json_str=None, values_dict=None):
"""Load the parameters set form a json str or from a dict of values
Parameters
----------
command : qiita_db.software.Command
The command to which the parameter set belongs to
json_str : str, optional
The json string encoding the parameters
values_dict : dict of {str: object}, optional
The dictionary with the parameter values
Returns
-------
qiita_db.software.Parameters
The loaded parameter set
Raises
------
qiita_db.exceptions.QiitaDBError
- If `json_str` and `values` are both provided
- If neither `json_str` or `values` are provided
- If `json_str` or `values` do not encode a parameter set of
the provided command.
Notes
-----
The parameters `json_str` and `values_dict` are mutually exclusive,
only one of them should be provided at a time. However, one of them
should always be provided.
"""
if json_str is None and values_dict is None:
raise qdb.exceptions.QiitaDBError(
"Either `json_str` or `values_dict` should be provided.")
elif json_str is not None and values_dict is not None:
raise qdb.exceptions.QiitaDBError(
"Either `json_str` or `values_dict` should be provided, "
"but not both")
elif json_str is not None:
parameters = loads(json_str)
error_msg = ("The provided JSON string doesn't encode a "
"parameter set for command %s" % command.id)
else:
parameters = deepcopy(values_dict)
error_msg = ("The provided values dictionary doesn't encode a "
"parameter set for command %s" % command.id)
# setting to default values all parameters not in the user_params
cmd_params = command.optional_parameters
missing_in_user = {k: cmd_params[k][1]
for k in (set(cmd_params) - set(parameters))}
if missing_in_user:
parameters.update(missing_in_user)
with qdb.sql_connection.TRN:
cmd_reqd_params = command.required_parameters
cmd_opt_params = command.optional_parameters
values = {}
for key in cmd_reqd_params:
try:
values[key] = parameters.pop(key)
except KeyError:
raise qdb.exceptions.QiitaDBError(
"%s. Missing required parameter: %s"
% (error_msg, key))
for key in cmd_opt_params:
try:
values[key] = parameters.pop(key)
except KeyError:
raise qdb.exceptions.QiitaDBError(
"%s. Missing optional parameter: %s"
% (error_msg, key))
if parameters:
raise qdb.exceptions.QiitaDBError(
"%s. Extra parameters: %s"
% (error_msg, ', '.join(parameters.keys())))
return cls(values, command)
@classmethod
def from_default_params(cls, dflt_params, req_params, opt_params=None):
"""Creates the parameter set from a `dflt_params` set
Parameters
----------
dflt_params : qiita_db.software.DefaultParameters
The DefaultParameters object in which this instance is based on
req_params : dict of {str: object}
The required parameters values, keyed by parameter name
opt_params : dict of {str: object}, optional
The optional parameters to change from the default set, keyed by
parameter name. Default: None, use the values in `dflt_params`
Raises
------
QiitaDBError
- If there are missing requried parameters
- If there is an unknown required ot optional parameter
"""
with qdb.sql_connection.TRN:
command = dflt_params.command
cmd_req_params = command.required_parameters
cmd_opt_params = command.optional_parameters
missing_reqd = set(cmd_req_params) - set(req_params)
extra_reqd = set(req_params) - set(cmd_req_params)
if missing_reqd or extra_reqd:
raise qdb.exceptions.QiitaDBError(
"Provided required parameters not expected.\n"
"Missing required parameters: %s\n"
"Extra required parameters: %s\n"
% (', '.join(missing_reqd), ', '.join(extra_reqd)))
if opt_params:
extra_opts = set(opt_params) - set(cmd_opt_params)
if extra_opts:
raise qdb.exceptions.QiitaDBError(
"Extra optional parameters provded: %s"
% ', '.join(extra_opts))
values = dflt_params.values
values.update(req_params)
if opt_params:
values.update(opt_params)
return cls(values, command)
def __init__(self, values, command):
# Time for some python magic! The __init__ function should not be used
# outside of this module, users should always be using one of the above
# classmethods to instantiate the object. Lets test that it is the case
# First, we are going to get the current frame (i.e. this __init__)
# function and the caller to the __init__
current_frame = inspect.currentframe()
caller_frame = current_frame.f_back
# The file names where the function is defined is stored in the
# f_code.co_filename attribute, and in this case it has to be the same
# for both of them. Also, we are restricing that the name of the caller
# should be either `load` or `from_default_params`, which are the two
# classmethods defined above
current_file = current_frame.f_code.co_filename
caller_file = caller_frame.f_code.co_filename
caller_name = caller_frame.f_code.co_name
if current_file != caller_file or \
caller_name not in ['load', 'from_default_params']:
raise qdb.exceptions.QiitaDBOperationNotPermittedError(
"qiita_db.software.Parameters can't be instantiated directly. "
"Please use one of the classmethods: `load` or "
"`from_default_params`")
self._values = values
self._command = command
@property
def command(self):
"""The command to which this parameter set belongs to
Returns
-------
qiita_db.software.Command
The command to which this parameter set belongs to
"""
return self._command
@property
def values(self):
"""The values of the parameters
Returns
-------
dict of {str: object}
The parameter values keyed by parameter name
"""
return self._values
def dump(self):
"""Return the values in the parameter as JSON
Returns
-------
str
The parameter values as a JSON string
"""
return dumps(self._values, sort_keys=True)
class DefaultWorkflowNode(qdb.base.QiitaObject):
r"""Represents a node in a default software workflow
Attributes
----------
command
parameters
"""
_table = "default_workflow_node"
@property
def command(self):
"""The command to execute in this node
Returns
-------
qiita_db.software.Command
"""
with qdb.sql_connection.TRN:
sql = """SELECT command_id
FROM qiita.default_workflow_node
WHERE default_workflow_node_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
cmd_id = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.software.Command(cmd_id)
@property
def parameters(self):
"""The default parameter set to use in this node
Returns
-------
qiita_db.software.DefaultParameters
"""
with qdb.sql_connection.TRN:
sql = """SELECT default_parameter_set_id
FROM qiita.default_workflow_node
WHERE default_workflow_node_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
params_id = qdb.sql_connection.TRN.execute_fetchlast()
return qdb.software.DefaultParameters(params_id)
class DefaultWorkflowEdge(qdb.base.QiitaObject):
r"""Represents an edge in a default software workflow
Attributes
----------
connections
"""
_table = "default_workflow_edge"
@property
def connections(self):
"""Retrieve how the commands are connected using this edge
Returns
-------
list of [str, str]
The list of pairs of output parameter name and input parameter name
used to connect the output of the source command to the input of
the destination command.
"""
with qdb.sql_connection.TRN:
sql = """SELECT name, parameter_name
FROM qiita.default_workflow_edge_connections c
JOIN qiita.command_output o
ON c.parent_output_id = o.command_output_id
JOIN qiita.command_parameter p
ON c.child_input_id = p.command_parameter_id
WHERE default_workflow_edge_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchindex()
class DefaultWorkflow(qdb.base.QiitaObject):
r"""Represents a software's default workflow
A default workflow is defined by a Directed Acyclic Graph (DAG) in which
the nodes represent the commands to be executed with the default parameter
set to use and the edges represent the command precedence, including
which outputs of the source command are provided as input to the
destination command.
"""
_table = "default_workflow"
@property
def name(self):
with qdb.sql_connection.TRN:
sql = """SELECT name
FROM qiita.default_workflow
WHERE default_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchlast()
@property
def graph(self):
"""Returns the graph that represents the workflow
Returns
-------
networkx.DiGraph
The graph representing the default workflow.
"""
g = nx.DiGraph()
with qdb.sql_connection.TRN:
# Retrieve all graph workflow nodes
sql = """SELECT default_workflow_node_id
FROM qiita.default_workflow_node
WHERE default_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
db_nodes = qdb.sql_connection.TRN.execute_fetchflatten()
nodes = {n_id: DefaultWorkflowNode(n_id) for n_id in db_nodes}
# Retrieve all graph edges
sql = """SELECT DISTINCT default_workflow_edge_id, parent_id,
child_id
FROM qiita.default_workflow_edge e
JOIN qiita.default_workflow_node n
ON e.parent_id = n.default_workflow_node_id
OR e.child_id = n.default_workflow_node_id
WHERE default_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
db_edges = qdb.sql_connection.TRN.execute_fetchindex()
for edge_id, p_id, c_id in db_edges:
e = DefaultWorkflowEdge(edge_id)
g.add_edge(nodes[p_id], nodes[c_id], connections=e)
return g
|
ElDeveloper/qiita
|
qiita_db/software.py
|
Python
|
bsd-3-clause
| 70,632
|
[
"VisIt"
] |
4f0b6fad9e4b654aba76e03ff754244dddac44141577730a65b03895fe36821d
|
"""
-----------
Setup stage
-----------
*** Identify residues in the molecule - playground/rotamer/identify_residue.py
*** Find dihedrals and store atom identities. - playground/rotamer/find_dihedrals.py
* Choose backbone-dependent, local environment etc. to determine what sort of library data to generate.
* Read in relevant part of rotamer library data: store dictionary for each residue's rotamer states.
----------
Move stage
----------
*** Read in coordinates from GMIN - playground/amber/coords_io.py
* Select a set of residues to move.
* Measure rotamer state of residues and their neighbours (if appropriate).
* Select a new conformation from the rotamer library, or a random configuration, with probability given by
Good-Turing frequency estimation.
*** Write coordinates for GMIN - playground/amber/coords_io.py
------
Future
------
- Decomposed energies for dihedrals wrt other sidechain dihedrals.
"""
|
khs26/rotamer_library
|
rotamer/rotamer_move.py
|
Python
|
mit
| 924
|
[
"Amber"
] |
2303992b5c5ba41f084e8290b2b92c0f75a70b6abd64988bff417ef3b10c8e67
|
# Hop --- a framework to analyze solvation dynamics from MD simulations
# Copyright (c) 2007-2010 Oliver Beckstein <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Generating densities from trajectories --- :mod:`hop.density`
=============================================================
As an input a trajectory is required that
1. Has been centered on the protein of interest.
2. Has all molecules made whole that have been broken across periodic
boundaries.
3. Has the solvent molecules remap so that they are closest to the
solute (this is important when using funky unit cells such as
dodechedra or truncated octahedra).
Classes and functions
---------------------
"""
from __future__ import absolute_import
import sys
import os, os.path
import errno
import cPickle
import warnings
import numpy
import MDAnalysis
import MDAnalysis.analysis.density
from MDAnalysis.analysis.density import notwithin_coordinates_factory
from . import constants
from .exceptions import MissingDataError, InconsistentDataWarning
from .utilities import msg,set_verbosity,get_verbosity, flatten, sorted, \
DefaultDict, fixedwidth_bins, iterable, asiterable, CustomProgressMeter
from .sitemap import Density
import logging
logger = logging.getLogger("MDAnalysis.app.hop.density")
class DensityCollector(object):
"""Collect subsequent coordinate frames to build up a :class:`Density`."""
use_kdtree = True
def __init__(self, name, universe, **kwargs):
self.name = name
try:
universe.select_atoms('all')
universe.trajectory.ts
except AttributeError:
errmsg = "DensityCollector: The universe must be a proper MDAnalysis.Universe instance."
logger.fatal(errmsg)
raise TypeError(errmsg)
self.universe = u = universe
self.delta = kwargs.pop('delta', 1.0)
self.atomselection = kwargs.pop('atomselection', 'name OH2')
self.cutoff = kwargs.pop('cutoff', 3.5)
self.soluteselection = kwargs.pop('soluteselection', None) #'protein and not name H*')
self.padding = kwargs.pop('padding', 2.0)
self.metadata = kwargs.pop('metadata', {})
self.parameters = kwargs.pop('parameters',{}) # for advanced fiddling...
# define the self.current_coordinates() function ... monkey patching!
if self.cutoff > 0 and self.soluteselection is not None:
# special fast selection for '<atomsel> not within <cutoff> of <solutesel>'
notwithin_coordinates = notwithin_coordinates_factory(u,self.atomselection,self.soluteselection,
self.cutoff,use_kdtree=self.use_kdtree)
self.current_coordinates = notwithin_coordinates
self.mode = "BULK"
else:
group = u.select_atoms(self.atomselection)
self.current_coordinates = lambda : group.positions
self.mode = "SOLVENT"
coord = self.current_coordinates()
logger.info("%-10s: Selected %d atoms out of %d atoms (%s) from %d total.",
self.name, coord.shape[0],len(u.select_atoms(self.atomselection)),
self.atomselection,len(u.atoms))
self.__complete = False
def init_histogram(self, **kwargs):
# needs to be done separately because we might need additional information
# after init (at least I cannot think of a better way...)
smin = kwargs.pop("smin", self.min_coordinates(padding=self.padding))
smax = kwargs.pop("smax", self.max_coordinates(padding=self.padding))
BINS = fixedwidth_bins(self.delta, smin, smax)
self.arange = zip(BINS['min'],BINS['max'])
self.bins = BINS['Nbins']
# create empty grid with the right dimensions (and get the edges)
grid,edges = numpy.histogramdd(numpy.zeros((1,3)), bins=self.bins,
range=self.arange, normed=False)
grid *= 0.0
h = grid.copy()
self.grid = grid
self.edges = edges
self._h = h # temporary for accumulation
def min_coordinates(self, **kwargs):
return numpy.min(self.current_coordinates(), axis=0) - kwargs.pop('padding', self.padding)
def max_coordinates(self, **kwargs):
return numpy.max(self.current_coordinates(), axis=0) + kwargs.pop('padding', self.padding)
def collect(self):
assert hasattr(self, 'grid'), "init_histogram() must be called first"
coord = self.current_coordinates()
if len(coord) > 0:
self._h[:],self.edges[:] = numpy.histogramdd(coord, bins=self.bins, range=self.arange, normed=False)
self.grid += self._h # accumulate average histogram
return len(coord)
def finish(self, n_frames):
if self.isComplete():
return
self.grid /= float(n_frames)
self.__complete = True
def Density(self):
if not hasattr(self, 'grid'):
errmsg = "DensityCollector.Density(): No data for density available. Run collect() first."
logger.error(errmsg)
raise MissingDataError(errmsg)
u = self.universe
metadata = self.metadata
metadata['collector'] = self.name
metadata['collector_mode'] = self.mode
metadata['psf'] = u.filename # named psf for historical reasons: any topol
metadata['dcd'] = u.trajectory.filename # named dcd for historical reasons: any traj
metadata['atomselection'] = self.atomselection
metadata['n_frames'] = u.trajectory.n_frames
metadata['dt'] = u.trajectory.dt # in ps for default MDAnalysis
metadata['totaltime'] = u.trajectory.totaltime
if self.mode == 'BULK':
metadata['soluteselection'] = self.soluteselection
metadata['cutoff'] = self.cutoff # in Angstrom
parameters = self.parameters
parameters['isDensity'] = False # must override
# Density automatically converts histogram to density for isDensity=False
g = Density(grid=self.grid, edges=self.edges,
unit=dict(length=MDAnalysis.core.flags['length_unit']),
parameters=parameters, metadata=metadata)
logger.info("%-10s: Histogram completed (initial density in %s**-3)",
self.name, MDAnalysis.core.flags['length_unit'])
return g
def isComplete(self):
return self.__complete
def __repr__(self):
if self.mode == "BULK":
return "<DensityCollector %(name)r, delta=%(delta).1f A: "\
"'%(atomselection)s and not around %(cutoff).1f (%(soluteselection)s)'>" % vars(self)
else:
return "<DensityCollector %(name)r, delta=%(delta).1f A: %(atomselection)r>" % vars(self)
class DensityCreator(object):
modes = ("all", "bulk", "solvent")
defaults = {'cutoff': 3.5,
'soluteselection': "protein and not name H*",
'delta':1.0, 'atomselection': "name OH2",
'padding': 2.0,
}
def __init__(self, *args, **kwargs):
"""Create a density grid from a trajectory.
density_from_trajectory(PSF, DCD, delta=1.0, atomselection='name OH2', ...) --> density
or
density_from_trajectory(PDB, XTC, delta=1.0, atomselection='name OH2', ...) --> density
:Arguments:
psf/pdb/gro
topology file
dcd/xtc/trr/pdb
trajectory; if reading a single PDB file it is sufficient to just provide it
once as a single argument
:Keywords:
mode
'solvent', 'bulk' or 'all' ('all' does both 'solvent' and \bulk' at the
same time and thus :meth:`DensityCreator.Density`` returns a list of
densities; this saves time!) ['all']
atomselection
selection string (MDAnalysis syntax) for the species to be analyzed
["name OH2"]
delta
approximate bin size for the density grid in Angstroem (same in x,y,z)
(It is slightly adjusted when the box length is not an integer multiple
of delta.) [1.0]
metadata
dictionary of additional data to be saved with the object
padding
increase histogram dimensions by padding (on top of initial box size)
in Angstroem [2.0]
soluteselection
MDAnalysis selection for the solute, e.g. "protein" [``None``]
cutoff
With *cutoff*, select '<atomsel> NOT WITHIN <cutoff> OF <soluteselection>'
(Special routines that are faster than the standard AROUND selection) [0]
verbosity: int
level of chattiness; 0 is silent, 3 is verbose [3]
:Returns: :class:`hop.sitemap.Density`
:TODO:
* Should be able to also set skip and start/stop for data collection.
.. Note::
* In order to calculate the bulk density, use
atomselection='name OH2',soluteselection='protein and not name H*',cutoff=3.5
This will select water oxygens not within 3.5 A of the protein heavy atoms.
Alternatively, use the VMD-based :func:`density_from_volmap` function.
* The histogramming grid is determined by the initial frames min and max.
* metadata will be populated with psf, dcd, and a few other items.
This allows more compact downstream processing.
"""
_kwargs = self.defaults.copy()
_kwargs.update(kwargs)
kwargs = _kwargs
# workaround for python 2.5 *args,**kwargs only allowed:
universe_kwargs = {'permissive':kwargs.pop('permissive',False)}
self.universe = MDAnalysis.as_Universe(*args, **universe_kwargs)
self.mode = kwargs.pop("mode", "all") # 'all' runs modes[1:]
if not self.mode in self.modes:
errmsg = "DensityCreator: mode must be one of %r, not %r" % (self.modes, self.mode)
logger.fatal(errmsg)
raise ValueError(errmsg)
if self.mode == "all":
modes = self.modes[1:]
else:
modes = [self.mode]
self.collectors = []
min_coords = []
max_coords = []
for mode in modes:
modeargs = kwargs.copy()
if mode == "solvent":
modeargs['soluteselection'] = None
modeargs['cutoff'] = 0
c = DensityCollector(mode, self.universe, **modeargs)
self.collectors.append(c)
min_coords.append(c.min_coordinates()) # with default padding from modeargs
max_coords.append(c.max_coordinates())
# determine maximum bounding box from initial positions of solvent
# (add generous padding... probably more than my default 2 A)
smin = numpy.sort(min_coords, axis=0)[0] # the three smallest values
smax = numpy.sort(max_coords, axis=0)[-1] # the three largest values
for c in self.collectors:
c.init_histogram(smin=smin, smax=smax) # also guarantees compatible grid
self.densities = {} # densities will be stored with mode as key
def create(self):
"""Loop through trajectory and build all densities.
.. SeeAlso::
:class:`DensityCollector`
"""
u = self.universe
pm = CustomProgressMeter(u.trajectory.n_frames, interval=10,
format="Histogramming %(other)s atoms in frame %(step)5d/%(numsteps)d [%(percentage)5.1f%%]\r")
for ts in u.trajectory:
status = ""
for c in self.collectors:
natoms = c.collect()
status += ("%s=%d " % (c.name, natoms))
pm.echo(ts.frame, status)
self.densities = {}
for c in self.collectors:
c.finish(u.trajectory.n_frames) # adjust if we implement trajectory slicing
self.densities[c.name] = c.Density()
# should save precious files!!!
return self.densities
def DensityWithBulk(self, density_unit='water', solvent_threshold=numpy.e, bulk_threshold=0.6):
"""Return a solvent density with bulk site inserted.
DensityWithBulk(self, solvent_threshold=2.72, bulk_threshold=0.6) --> Density
Only works if two densities were generated that are named 'solvent' and
'bulk' (this is the default for the *mode* = "all" keyword for
:class:`DensityCreator`.)
:Arguments:
*density_unit*
Measure density in multiples of this unit; possible values are
'Molar', 'nm^{-3}', 'Angstrom^{-3}', or the density at standard conditions
of 'water' (experimental value), 'TIP3P', 'TIP4P', 'SPC' ['water']
*solvent_threshold*
Hydration sites are considered as regions of density > this
threshold; it is assumed to be given in the *density_unit*.
*bulk_threshold*
The bulk site is the largest region with a density >
*bulk_threshold*; in order to avoid overlap with the hydration
sites, it is necessary to use a special selection for the solvent
that excludes it from the vicinity of the solute.
.. SeeAlso:: This method uses meth:`hop.sitemap.Density.map_sites` and
meth:`hop.sitemap.Density.site_insert_bulk`.
"""
if len(self.densities) != 2:
errmsg = "DensityCreator.DensityWithBulk(): Need exactly two densities ('solvent' and 'bulk')."
logger.fatal(errmsg)
raise MissingDataError(errmsg)
try:
solvent = self.densities['solvent']
bulk = self.densities['bulk']
except KeyError:
errmsg = "Need a 'solvent' and a 'bulk' density in %s.densities" % self.__class__.__name__
logger.fatal(errmsg)
raise MissingDataError(errmsg)
logger.debug("DensityWithBulk: solvent_threshold = %r", solvent_threshold)
logger.debug("DensityWithBulk: bulk_threshold = %r", bulk_threshold)
solvent.convert_density(density_unit)
solvent.map_sites(solvent_threshold)
bulk.convert_density(density_unit)
bulk.map_sites(bulk_threshold)
# ye olde bulk-hack....
solvent.site_insert_bulk(bulk)
# should really save
# solvent.save()
return solvent
def density_from_Universe(*args, **kwargs):
"""Create a :class:`hop.sitemap.Density from a :class:`Universe`.
.. SeeAlso::
:func:`MDAnalysis.analysis.density.density_from_Universe` for
all parameters and :func:`density_from_trajectory` for a
convenience wrapper.
"""
D = MDAnalysis.analysis.density.density_from_Universe(*args, **kwargs)
return Density(grid=D.grid, edges=D.edges, units=D.units,
parameters=D.parameters, metadata=D.metadata)
def density_from_trajectory(*args, **kwargs):
"""Create a density grid from a trajectory.
density_from_trajectory(PSF, DCD, delta=1.0, atomselection='name OH2', ...) --> density
or
density_from_trajectory(PDB, XTC, delta=1.0, atomselection='name OH2', ...) --> density
:Arguments:
psf/pdb/gro
topology file
dcd/xtc/trr/pdb
trajectory; if reading a single PDB file it is sufficient to just provide it
once as a single argument
:Keywords:
atomselection
selection string (MDAnalysis syntax) for the species to be analyzed
["name OH2"]
delta
approximate bin size for the density grid in Angstroem (same in x,y,z)
(It is slightly adjusted when the box length is not an integer multiple
of delta.) [1.0]
metadata
dictionary of additional data to be saved with the object
padding
increase histogram dimensions by padding (on top of initial box size)
in Angstroem [2.0]
soluteselection
MDAnalysis selection for the solute, e.g. "protein" [``None``]
cutoff
With *cutoff*, select '<atomsel> NOT WITHIN <cutoff> OF <soluteselection>'
(Special routines that are faster than the standard AROUND selection) [0]
verbosity: int
level of chattiness; 0 is silent, 3 is verbose [3]
:Returns: :class:`hop.sitemap.Density`
:TODO:
* Should be able to also set skip and start/stop for data collection.
.. Note::
* In order to calculate the bulk density, use
atomselection='name OH2',soluteselection='protein and not name H*',cutoff=3.5
This will select water oxygens not within 3.5 A of the protein heavy atoms.
Alternatively, use the VMD-based :func:`density_from_volmap` function.
* The histogramming grid is determined by the initial frames min and max.
* metadata will be populated with psf, dcd, and a few other items.
This allows more compact downstream processing.
.. SeeAlso:: docs for
:func:`MDAnalysis.analysis.density.density_from_Universe`
(defaults for kwargs are defined there).
"""
return density_from_Universe(MDAnalysis.Universe(*args),**kwargs)
class PDBDensity(Density):
__doc__ = """Density with additional information about original crystal structure.
This is simply the Density class (see below) enhanced by the add_xray2psf(),
W(), and Wequiv() methods.
Note that later analysis often ignores the site with the bulknumber by default
so one should (after computing a site map) also insert an empty bulk site:
# canonical way to build a PDBDensity
# (builds the sitepa at threshold and inserts a pseudo bulk site)
xray = BfactorDensityCreator(...).PDBDensity(threshold)
# rebuild site map
xray.map_sites(threshold) # map sites at density cutoff threshold
xray.site_insert_nobulk() # insert 'fake' bulk site at position SITELABEL['bulk']
# find X-ray waters that correspond to a site in another density Y:
# (1) build the list of equivalence sites, using the x-ray density as reference
Y.find_equivalence_sites(xray) # also updates equiv-sites in xray!
# (2) look at the matches in xray
xray.Wequiv() TODO: not working yet
""" + 60*"-" + "\nDensity Class\n\n" + Density.__doc__
# will probably break under multiple inheritance but I haven't figured out how to use super here
_saved_attributes = Density._saved_attributes + ['_xray2psf', '_psf2xray']
def add_xray2psf(self,pdbfile,regex=r'\s*W\s*|HOH|WAT|.*TIP.*|.*SPC.*'):
"""Add translation table between sequential psf numbering and original pdb numbering for water.
D.add_xray2psf(pdbfilename)
The original pdb is read and all water molecules are sequentially mapped
to the water molecules in the psf (without any checks). The pdb is read
and analyzed using Bio.PDB.
pdbfilename Original crystallographic pdb file
regex extended regular expression to detect water residues
"""
import re
import Bio.PDB
water = re.compile(regex)
parser = Bio.PDB.PDBParser()
m = parser.get_structure('0UNK',pdbfile)
s = m[0]
# number waters sequentially and store the pdb resid
self._xray2psf = dict([(resid_xray,resid_psf+1) for resid_psf,resid_xray in
enumerate([r.id[1] for r in s.get_residues() if water.match(r.resname)])
])
self._psf2xray = dict([(resid_psf,resid_xray) for resid_xray,resid_psf in self._xray2psf.items()])
def _check_site_resid_match(self):
return len(self._xray2psf) == len(self.site_labels('sites',exclude='equivalencesites'))
def W(self,N,returntype="auto",format=False):
"""Returns the resid of water N.
If returntype == 'psf' then N is interpreted as the resid in the
x-ray crystal structure (or original pdb file) and a resid N' in the
psf is returned.
If returntype == 'xray' then N is a resid in the psf and the
corresponding crystal structure water is returned. This is
useful to label water molecules by their published identifier,
eg 'W128'.
If the returntype is set to 'auto' and N starts with a W (eg
'W128') then it is assumed to be a crystal water and the
returntype is automatically set to psf, otherwise it acts like
'xray'.
:Arguments:
N resid of molecule (can be an iterable)
returntype 'auto' | 'psf' | 'xray'
format False: return a integer number
True: default string (either "WN'" for x-ray or "#N'" for psf)
python format string: if the string contains %(resid)d then the string
will be used as a format, otherwise the bare number
is returned without raising an error
"""
if returntype not in ("auto","psf","xray"):
raise ValueError("returntype must be one of 'psf' or 'xray'")
result = numpy.array([self._getN(_N,returntype=returntype,format=format) for _N in asiterable(N)])
if not iterable(N):
return result[0]
else:
return result
def Wequiv(self,format=True):
"""Return a list of the PDB resids of the equivalent sites.
array = Wequiv(format=True)
format True: array of identifiers 'Wnn'
False: array of integers
string: python format string; %(resid)d is replaced
"""
return self.W(self.site_labels('subsites'),format=format)
def _getN(self,N,returntype='auto',format=False):
if returntype is 'auto':
_Nstring = str(N).upper()
returntype = "xray"
if _Nstring.startswith('W'):
# automagically do the right thing
returntype = "psf"
N = int(_Nstring[1:])
elif _Nstring.startswith('#'):
N = int(_Nstring[1:])
if returntype == "psf":
return self._Wpsf(N,format=format)
elif returntype == "xray":
return self._Wxray(N,format=format)
def _Wpsf(self,resid_xray,format=False):
"""Returns resid in psf of crystallographic water W(resid_xray)."""
try:
resid = self._xray2psf[resid_xray]
except KeyError:
raise ValueError("No residue number %(resid_xray)d in x-ray structure." % vars())
except AttributeError:
raise MissingDataError("Add the xray -> psf translation table with add_xray2psf() first.")
return self._Wformatter(resid,format=format,typechar='#')
def _Wxray(self,resid_psf,format=False):
"""Returns the crystal structure resid of water resid_psf in the psf."""
try:
resid = self._psf2xray[resid_psf]
except KeyError:
raise ValueError("No residue number %(resid_psf)d in psf." % vars())
except AttributeError:
raise MissingDataError("Add the psf -> x-ray translation table with add_xray2psf() first.")
return self._Wformatter(resid,format=format,typechar='W')
def _Wformatter(self,resid,format=False,typechar=None):
# no error checks, only call from wrappers
if format is True and typechar is not None:
default_format = str(typechar)+'%(resid)d'
return default_format % vars()
elif str(format).find('%(resid)') >= 0:
return str(format) % vars()
else:
return resid
def site_insert_nobulk(self):
"""Insert an empty bulk site for cases when this is convenient."""
class Nobulk():
def __init__(self,dens):
# copy the attributes that are checked in Density.site_insert_bulk()
self.map = numpy.empty_like(dens.map)
self.unit = dens.unit
self.P = {'threshold': None}
# minimum empty sites 'list'
self.sites = {SITELABEL['bulk']: ()} # normally a list but use a dict :-)
self.site_insert_bulk(Nobulk(self))
def equivalence_sites(self,format=True):
"""All equivalence sites (if defined) together with crystallographic water labels.
recarray <-- equivalence_sites(self,format=True)
The numpy.recarray has columns
equivalence_label the integer label of the equivalence site
equivalence_name the name, a string
xray the identifier of the X-ray water
equivalence_label and equivalence_name are identical between the densities from
which the equivalence sites were computed. The xray identifier is specific for the
structure; by default it is a string such as 'W135'.
format True: print 'W<N>' identifier
False: integer <N>
(see W() for more possibilities)
BUG: THIS IS NOT WORKING AS THOUGHT BECAUSE THERE IS NO 1-1
MAPPING BETWEEN WATER MOLECULES AND SITES AND BECAUSE SITES
ARE NOT NUMBERED IN THE SAME ORDER AS THE WATER MOLECULES
TODO: The proper way to do this is to find all water molecules
within a cutoff of each grid cell that belongs to a site and
then store all the waters as the string name of the site.
"""
records = []
equiv = self.subsites_of(self.site_labels('equivalencesites'))
for equivlabel,subsites in equiv.items():
records.append((equivlabel, self._equivlabel2equivname(equivlabel)[0],
self.W(self.site2resid(subsites.label[0]), returntype='xray', format=format)))
return numpy.rec.fromrecords(records,names="equivalence_label,equivalence_name,xray")
def site2resid(self,sitelabel):
"""Returns the resid of the particle that provided the density for the site.
"""
raise NotImplementedError('site2resid mapping is not working yet')
def print_combined_equivalence_sites(target,reference):
"""Tabulate equivalence sites of target against the reference.
BUG: THIS IS NOT WORKING (because the assignment sites <--> waters
is broken)
"""
raise NotImplementedError('THIS IS NOT WORKING (because the assignment sites <--> waters is broken')
eqs_r = reference.equivalence_sites()
eqs_t = target.equivalence_sites()
eqs_r.equivalence_name.sort()
sorted_t = eqs_t[eqs_t.equivalence_label == eqs_r.equivalence_label]
_header = "%3s %4s %-5s %-6s" % ('i','name', 'ref', 'target')
def ___(): print '-' * len(_header)
___()
print _header
___()
for (l,n,x1),(l,n,x2) in zip(eqs_r,sorted_t):
print "%3d %4s %-5s %-5s" % (l,n,x1,x2)
___()
class BfactorDensityCreator(MDAnalysis.analysis.density.BfactorDensityCreator):
"""Create a density grid from a pdb file using MDAnalysis.
dens = BfactorDensityCreator(psf,pdb,...).PDBDensity()
The main purpose of this function is to convert crystal waters in
an X-ray structure into a density so that one can compare the
experimental density with the one from molecular dynamics
trajectories. Because a pdb is a single snapshot, the density is
estimated by placing Gaussians of width sigma at the position of
all selected atoms.
Sigma can be fixed or taken from the B-factor field, in which case
sigma is taken as sqrt(3.*B/8.)/pi.
TODO:
* Make Gaussian convolution more efficient (at least for same
sigma) because right now it is VERY slow (which may be
acceptable if one only runs this once)
* Using a temporary Creator class with the PDBDensity() helper
method is clumsy (but was chosen as to keep the PDBDensity class
clean and __init__ compatible with Density).
.. SeeAlso::
* :mod:`MDAnalysis.analysis.density`
* :class:`PDBDensity`
"""
def PDBDensity(self, threshold=None):
"""Returns a PDBDensity object.
The PDBDensity is a Density with a xray2psf translation table;
it has also got an empty bulk site inserted (so that any
further analysis which assumes that site number 1 is the bulk)
does not discard a valid site.
threshold Use the given threshold to generate the graph; the threshold
is assumed to be in the same units as the density.
None: choose defaults (1.0 if bfactors were used, 1.3 otherwise)
"""
d = PDBDensity(grid=self.g,edges=self.edges,unit=dict(length='Angstrom'),
parameters=dict(isDensity=False),metadata=self.metadata)
d.add_xray2psf(d.metadata['pdb']) # pdb filename is recorded in metadata
d.convert_density('water')
if threshold is None:
if self.metadata['sigma'] is None:
threshold = 1.0
else:
threshold = 1.3
d.map_sites(threshold)
d.site_insert_nobulk() # fake bulk site
if not d._check_site_resid_match():
wmsg = "BfactorDensityCreator.PDBDensity(): "\
"There are different numbers of water molecules (%d) and sites (%d). " \
"Site <-> water matching will not work." % \
(len(d._xray2psf), len(d.site_labels('sites', exclude='equivalencesites')))
logger.warn(wmsg)
warnings.warn(wmsg, category=InconsistentDataWarning)
return d
|
Becksteinlab/hop
|
hop/density.py
|
Python
|
gpl-3.0
| 30,548
|
[
"CRYSTAL",
"Gaussian",
"MDAnalysis",
"VMD"
] |
34cd57610e0c1a14acd5f7d901cf94a7aba1de39c468c2439917948696fc03b5
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Qt, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import apply_regex_substitutions
from easybuild.tools.run import run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_Qt(ConfigureMake):
"""
Support for building and installing Qt.
"""
@staticmethod
def extra_options():
extra_vars = {
'platform': [None, "Target platform to build for (e.g. linux-g++-64, linux-icc-64)", CUSTOM],
}
extra_vars = ConfigureMake.extra_options(extra_vars)
# allowing to specify prefix_opt doesn't make sense for Qt, since -prefix is hardcoded in configure_step
del extra_vars['prefix_opt']
return extra_vars
def configure_step(self):
"""Configure Qt using interactive `configure` script."""
self.cfg.update('configopts', '-release')
platform = None
comp_fam = self.toolchain.comp_family()
if self.cfg['platform']:
platform = self.cfg['platform']
# if no platform is specified, try to derive it based on compiler in toolchain
elif comp_fam in [toolchain.GCC]: #@UndefinedVariable
platform = 'linux-g++-64'
elif comp_fam in [toolchain.INTELCOMP]: #@UndefinedVariable
if LooseVersion(self.version) >= LooseVersion('4'):
platform = 'linux-icc-64'
else:
platform = 'linux-icc'
# fix -fPIC flag (-KPIC is not correct for recent Intel compilers)
qmake_conf = os.path.join('mkspecs', platform, 'qmake.conf')
apply_regex_substitutions(qmake_conf, [('-KPIC', '-fPIC')])
if platform:
self.cfg.update('configopts', "-platform %s" % platform)
else:
raise EasyBuildError("Don't know which platform to set based on compiler family.")
cmd = "%s ./configure -prefix %s %s" % (self.cfg['preconfigopts'], self.installdir, self.cfg['configopts'])
qa = {
"Type 'o' if you want to use the Open Source Edition.": 'o',
"Do you accept the terms of either license?": 'yes',
"Which edition of Qt do you want to use?": 'o',
}
no_qa = [
"for .*pro",
r"%s.*" % os.getenv('CXX', '').replace('+', '\\+'), # need to escape + in 'g++'
"Reading .*",
"WARNING .*",
"Project MESSAGE:.*",
"rm -f .*",
'Creating qmake...',
'Checking for .*...',
]
run_cmd_qa(cmd, qa, no_qa=no_qa, log_all=True, simple=True, maxhits=120)
def build_step(self):
"""Set $LD_LIBRARY_PATH before calling make, to ensure that all required libraries are found during linking."""
# cfr. https://elist.ornl.gov/pipermail/visit-developers/2011-September/010063.html
if LooseVersion(self.version) >= LooseVersion('5.6'):
libdirs = ['qtbase', 'qtdeclarative']
else:
libdirs = ['']
libdirs = [os.path.join(self.cfg['start_dir'], d, 'lib') for d in libdirs]
self.cfg.update('prebuildopts', 'LD_LIBRARY_PATH=%s' % os.pathsep.join(libdirs + ['$LD_LIBRARY_PATH']))
super(EB_Qt, self).build_step()
def sanity_check_step(self):
"""Custom sanity check for Qt."""
shlib_ext = get_shared_lib_ext()
if LooseVersion(self.version) >= LooseVersion('4'):
libversion = ''
if LooseVersion(self.version) >= LooseVersion('5'):
libversion = self.version.split('.')[0]
libfile = os.path.join('lib', 'libQt%sCore.%s' % (libversion, shlib_ext))
else:
libfile = os.path.join('lib', 'libqt.%s' % shlib_ext)
custom_paths = {
'files': [libfile],
'dirs': ['bin', 'include', 'plugins'],
}
super(EB_Qt, self).sanity_check_step(custom_paths=custom_paths)
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/q/qt.py
|
Python
|
gpl-2.0
| 5,353
|
[
"VisIt"
] |
31e324b1203b05927f2a614fde0e48327eae1bca4f4b49637a9956a0fc33bee9
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
# ==============================================================================
# Note: Avoid adding dependencies to py_utils beyond standard python packages
# and tensorflow.
# ==============================================================================
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
import typing
from typing import Optional, Union
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import gshard_utils
from lingvo.core import hyperparams
from lingvo.core import nested_map
from lingvo.core import ops
from lingvo.core import py_utils_flags
from lingvo.core import retry
from lingvo.core import symbolic
from lingvo.core import thread_local_utils
from lingvo.core import tshape
import numpy as np
import six
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.tf2 import enabled as tf2_enabled
from tensorflow.python.tpu import topology as tf_topology
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
FLAGS = tf.flags.FLAGS
# pylint: disable=protected-access
_FromGlobal = py_utils_flags._FromGlobal
# pylint: enable=protected-access
use_xla = py_utils_flags.use_xla
use_tpu = py_utils_flags.use_tpu
testonly_skip_norm_layers = py_utils_flags.testonly_skip_norm_layers
tpu_compat = py_utils_flags.tpu_compat
use_stateless_vars_init = py_utils_flags.use_stateless_vars_init
ENQUEUE_OPS = '__lingvo_enqueue_ops'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
ThreadLocalStack = thread_local_utils.ThreadLocalStack
ThreadLocalDict = thread_local_utils.ThreadLocalDict
NestedMap = nested_map.NestedMap
def Assert(condition, data, *args, **kwargs):
if py_utils_flags.enable_asserts():
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
x = tf.convert_to_tensor(x)
l = tf.cast(tf.convert_to_tensor(l), x.dtype)
r = tf.cast(tf.convert_to_tensor(r), x.dtype)
return tf.group([
assert_greater_equal(x, l, *args, **kwargs),
assert_less(x, r, *args, **kwargs)
])
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def AssertIdShape(expected_ids_shape_pattern, ids_shape, *args):
"""Asserts shape expected_ids_shape_pattern matches all other input shapes."""
def AssertFn(inputs):
dependencies = [
assert_shape_match(inputs.ids_shape, inputs.expected_ids_shape_pattern)
] + [
assert_shape_match(inputs.ids_shape, x_shape) for x_shape in inputs.args
]
return with_dependencies(dependencies, inputs.ids_shape)
inputs = NestedMap(
expected_ids_shape_pattern=expected_ids_shape_pattern,
ids_shape=ids_shape,
args=args)
return CallDefun(AssertFn, Transform(tf.convert_to_tensor, inputs))
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
x_name = x.name if not tf.executing_eagerly() else '[eager]'
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x_name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x_name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not py_utils_flags.enable_check_numerics():
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
def _VarInCollection(var, collection):
"""Return whether a variable `var` is in the given variable collection."""
# We use variable reference for comparison, since variable is not hashable in
# eager mode.
return var.ref() in [v.ref() for v in collection]
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
match = re.compile(r'Debug\((.*?)(\)|,).*$').search(caller.code_context[0])
if match:
caller_var = match.groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
if more_vars:
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
name = tensor.name if not tf.executing_eagerly() else '[eager]'
info = '{}{} {}'.format(header, caller_var, name)
return tf.identity(
tf.Print(tensor, tensors, info, summarize=summarize),
re.sub(':.*$', '', name))
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor. Elements of expected_shape can
be -1 which indicate that any size is valid for that dimension.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor` with control dependencies that will raise a runtime
error if dynamic shape checks fail.
Raises:
ValueError: A value error if the assertion fails at static shape checks.
"""
if not py_utils_flags.enable_asserts():
return tensor
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
tensor_shape = GetShape(tensor)
if ndims is not None:
tensor_shape = tensor_shape[:ndims]
# TODO(jngiam): Attempt to switch back to tf.Assert after it has better
# support on GPUs.
assert_op = ops.assert_shape_match(tensor_shape, expected_shape, msg=msg)
# If expected_shape is a Tensor, then we are unable to perform static checks.
# In this case, we can do a dynamic check and return.
if isinstance(expected_shape, tf.Tensor):
return with_dependencies([assert_op], tensor)
# Infer ranks from the inputs.
expected_rank = len(expected_shape)
if isinstance(tensor_shape, tf.Tensor):
tensor_rank = tensor.shape.ndims
else:
tensor_rank = len(tensor_shape)
# If ndims is None, then either one of the ranks should not be None, or they
# should both match. If both ranks are None, then they are both tensors and
# should be caught by the earlier short-circuit.
if ndims is None:
if (tensor_rank is not None) and (expected_rank != tensor_rank):
raise ValueError('Tensor does not match rank of expected shape.\n'
'Tensor shape: {} Expected shape: {}'.format(
tensor_shape, expected_shape))
# Both tensors can be assumed to be of same rank.
ndims = expected_rank
else:
if (tensor_rank is not None) and (tensor_rank < ndims):
raise ValueError('Tensor has fewer dimensions than ndims.\n'
'Tensor shape: {} ndims: {}'.format(tensor_shape, ndims))
if expected_rank != ndims:
raise ValueError(
'Expected shape must have number of dimensions equal to ndims.\n'
'Expected shape: {} ndims: {}'.format(expected_shape, ndims))
# Ensure that both tensor_shape and expected_shape are both lists.
tensor_shape = tensor_shape[:ndims]
if isinstance(tensor_shape, tf.Tensor):
tensor_shape = tf.unstack(tensor_shape, num=ndims)
# Map tf.Dimension values to their held values.
tensor_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in tensor_shape
]
expected_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in expected_shape
]
all_static_checks = True
for idx, (dim, expected_dim) in enumerate(zip(tensor_shape, expected_shape)):
if isinstance(expected_dim, tf.Tensor):
all_static_checks = False
elif expected_dim == -1:
continue
elif isinstance(dim, tf.Tensor):
all_static_checks = False
elif dim != expected_dim:
raise ValueError('Tensor does not match expected shape on dimension {}.\n'
'Tensor shape: {} Expected shape: {}'.format(
idx, tensor_shape, expected_shape))
if all_static_checks:
return tf.convert_to_tensor(tensor)
else:
return with_dependencies([assert_op], tensor)
def HasSameShape(x, ref):
return HasShape(x, GetShape(ref))
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def CausalSelfAttenPadding(seqlen, dtype):
"""Wraps tf.linalg.band_part() for tflite compatibility."""
if FLAGS.tflite_compatible:
# [N, 1]
rows = tf.expand_dims(tf.range(seqlen), -1)
# [1, N]
cols = tf.expand_dims(tf.range(seqlen), 0)
row_cols = rows - cols
return tf.where(row_cols < 0, tf.ones([seqlen, seqlen], dtype),
tf.zeros([seqlen, seqlen], tf.float32))
else:
return 1.0 - tf.linalg.band_part(
tf.ones([seqlen, seqlen], dtype=dtype), -1, 0)
def outside_all_rewrites(): # pylint: disable=invalid-name
return tf.control_dependencies(None)
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
# Maps a TPU job name ('/job:xxx') to the job's DeviceAssignment object.
# When there is only a single TPU job, the key could be None.
_tpu_device_assignment_dict = dict()
def SetTpuDeviceAssignment(tpu_device_assignment, job=None):
if job in _tpu_device_assignment_dict:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment_dict[job] = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment_dict
_tpu_device_assignment_dict = dict()
def GetTpuDeviceAssignment(job=None):
return _tpu_device_assignment_dict[job]
# Whether it's running in eager mode. This is different than
# tf.executing_eagerly(), which will return False inside a tf.function.
_IS_EAGER_MODE = False
# If you get an error "tf.enable_eager_execution must be called at program
# startup." but you are calling this function at the start, check if your change
# adds type hints for "tf.data" and wrap those type hints in quotes.
def SetEagerMode(eager_mode=True, test_mode=False):
"""Switch between Eager and Graph mode. Use this instead of TF APIs."""
global _IS_EAGER_MODE
_IS_EAGER_MODE = eager_mode
# Only change the global flag.
# Used in tests. In those scenarios we might want to use Graph mode along with
# Eager mode. All we need is changing the flag `_IS_EAGER_MODE` without
# calling `enable_eager_execution`/`disable_eager_execution`.
if test_mode:
return
if eager_mode:
tf.enable_eager_execution()
tf.config.set_soft_device_placement(True)
else:
tf.disable_eager_execution()
def IsEagerMode():
return _IS_EAGER_MODE
# Maintains a tf.GradientTape stack.
_GRADIENT_TAPE_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GradientTape(*args, **kwargs):
"""Creates a tf.GradientTape and use it for automatic differentiation."""
tape = tf.GradientTape(*args, **kwargs)
_GRADIENT_TAPE_STACK.stack.append(tape)
try:
with tape:
yield
finally:
_GRADIENT_TAPE_STACK.stack.pop()
def CreateEMAForModel(model_params, global_step):
"""Creates an EMA object for model with param `model_params` if applicable."""
p = model_params
# Check that EMA settings for the model and subtasks match.
def CheckEMA(task_name, task_params):
for field in ['ema_decay', 'ema_decay_moving_vars']:
model_value = p.train.Get(field)
task_value = task_params.train.Get(field)
if task_value != model_value:
raise ValueError(
f'Params {field} does not match. Value in model: '
f'{model_value}, value in task {task_name}: {task_value}')
if 'task_params' in p:
# MultiTaskModel. All subtasks should use the same ema settings.
for task_name, task_params in p.task_params.IterParams():
CheckEMA(task_name, task_params)
else:
assert 'task' in p
# SingleTaskModel.
CheckEMA(p.task.name, p.task)
if p.train.ema_decay > 0:
return tf.train.ExponentialMovingAverage(
decay=p.train.ema_decay, num_updates=global_step)
return None
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
session_config.share_cluster_devices_in_session = True
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A `.NestedMap` same as dxs with None replaced by a zero tensor.
"""
fn = lambda x, dx: tf.zeros_like(x) if dx is None else dx
return Transform(fn, xs, dxs)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
class _Unique:
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper:
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler:
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit:
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: A name for the operation. If --stateless_vars_init is set, this name
is used to generate a seed on a per-variable basis. Otherwise, this name
is optional.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
if use_stateless_vars_init():
if name is None:
raise ValueError('InitRNNCellState() requires a `name` argument when '
'--stateless_vars_init is enabled.')
seed = _GenerateStatelessRngSeed(name, init.seed)
init_state = stateless_random_ops.stateless_random_normal(
shape=shape, dtype=dtype, name=name, seed=seed)
else:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit:
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed, custom_v_init=None):
"""Parameters of this class."""
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Define('custom_v_init', custom_v_init,
'A custom tf.init_ops.Initializer instance.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Category(scale=2, seed=None):
"""tf.floor(scale * tf.random.uniform(0., 1.0))."""
return WeightInit._Params('category', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def GaussianSqrtFanAvg(scale=1.0, seed=None):
"""tf.random.normal(0, sqrt(2.0 / (in + out)))."""
return WeightInit._Params('gaussian_sqrt_fanavg', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def UniformUnitScalingFanAvg(scale=1.0, seed=None):
"""Same as tf.variance_scaling_initializer() ...
Samples are drawn from a uniform distribution within [-limit, limit], with
limit = sqrt(3 * scale / n)
where
n = max(1., (fan_in + fan_out) / 2).
See tf.keras.initializers.VarianceScaling for details.
Args:
scale: A Python float.
seed: A Python int or None.
Returns:
A WeightInit param.
"""
return WeightInit._Params('uniform_unit_scaling_fan_avg', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
@staticmethod
def CustomVarInit(custom_v_init):
return WeightInit._Params('custom', 1.0, None, custom_v_init)
@staticmethod
def CustomConstantVarInit(custom_v_init):
return WeightInit._Params('custom_constant', 1.0, None, custom_v_init)
@staticmethod
def ScaledDeltaOrthogonal(scale=1.0, seed=None):
return WeightInit._Params('delta_orthogonal', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
# TODO(rpang, jonathanasdf): explore adding _is_default to hyperparams.Param.
def IsDefaultParamInit(p):
return (p.method == 'xavier' and
abs(p.scale - _DEFAULT_XAVIER_INIT) < 1e-7 and p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
device_mesh=None,
tensor_split_dims_mapping=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
if device_mesh is not None:
if tensor_split_dims_mapping is None:
tensor_split_dims_mapping = (-1,) * len(shape)
tf.logging.info(
'Sets tensor_split_dims_mapping of a param of shape {} to {}'.format(
shape, tensor_split_dims_mapping))
assert len(tensor_split_dims_mapping) == len(shape)
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define(
'device_mesh', device_mesh,
'A numpy.ndarray describing the topology of a device mesh to partition'
' this variable onto. Each element in the np.ndarray is the ID of a'
' device in the topology. device_mesh and tensor_split_dims_mapping below'
' together specifies how this weight tensor should be sharded across'
' different tpu cores. If None, this variable is not sharded.'
' Here are examples: np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d'
' mesh with 8 devices, np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is'
' 2d matrix of 8 devices.')
p.Define(
'tensor_split_dims_mapping', tensor_split_dims_mapping,
'A list of integers that map each tensor axis to the device mesh axis'
' along which it is sharded. Its length is the tensor rank, and'
' split_dims_mapping[i] is device mesh axis for tensor dimension i. Use'
' -1 for tensor dimensions that are not sharded. If the list is set to'
' None and a device_mesh is specified, the sharding will be treated as'
' replicated. Here is a concrete examples: '
' device_mesh=np.array([[0, 1, 2, 3] [4, 5, 6, 7]]), of shape [2, 4]'
' shape=[x, y, z], so this is a 3d variable.'
' tensor_split_dims_mapping=[-1, -1, 1], in this case, the third dim'
' of the variable is split along the second dim of the mesh. Each '
' split of the variable is of the shape [x, y, z/4].')
# The following two flags are used in Jax only.
p.Define(
'repeat_prefix', None,
'If not None, the full shape of this var is repeat_prefix+shape. '
'For example, if repeat_prefix=[16, 2], and shape=[512, 1024], then '
'real shape of variable is [16, 2, 512, 1024]. "repeat_prefix" is '
'often used if a layer is to be used in a recurrent loop, where '
'logically there are n sub-layers, but for performance/hbm usage '
'reasons we stack all the variables in creating those n-layers.')
p.Define('repeat_prefix_split_dims_mapping', None,
'Tensor split dims mapping for the repeat_prefix dims.')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
class _CollectionGetter:
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
if key.startswith('_'):
key = key[1:]
return key.replace('[', '_').replace(']', '')
# Maintain a session for unit tests (initialized in test_utils.py).
_SESSION_SCOPE = ThreadLocalStack()
@contextlib.contextmanager
def UnitTestSessionScope(sess):
_SESSION_SCOPE.stack.append(sess)
try:
yield
finally:
_SESSION_SCOPE.stack.pop()
def GetUnitTestSession():
"""Get the current variable reuse setting."""
return _SESSION_SCOPE.stack[-1] if _SESSION_SCOPE.stack else None
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With GetOpportunisticVariableReuse() == True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE = ThreadLocalStack()
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
_OPPORTUNISTIC_VARIABLE_REUSE.stack.append(enable_opportunistic_reuse)
try:
yield
finally:
_OPPORTUNISTIC_VARIABLE_REUSE.stack.pop()
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
return (_OPPORTUNISTIC_VARIABLE_REUSE.stack[-1]
if _OPPORTUNISTIC_VARIABLE_REUSE.stack else False)
_DISABLE_VARIABLE_NAME_CHECKING = ThreadLocalStack()
@contextlib.contextmanager
def DisableVariableNameChecking(disable=True):
_DISABLE_VARIABLE_NAME_CHECKING.stack.append(disable)
try:
yield
finally:
_DISABLE_VARIABLE_NAME_CHECKING.stack.pop()
_VARIABLE_RENAME_RULES = ThreadLocalStack()
# Global variable to track task calling scope.
# Currently only used for TPU Embedding purposes as a TPUEmbeddingLayer
# may be shared across tasks and the calling task needs to be known
# for tracking embedding activations for backprop.
_TASK_CALL_SCOPE = ThreadLocalStack()
def TaskCallScopeName(task):
"""Get a unique string identifying a task."""
return f'{task.params.name}_{id(task)}'
@contextlib.contextmanager
def TaskCallScope(task):
_TASK_CALL_SCOPE.stack.append(TaskCallScopeName(task))
try:
yield
finally:
_TASK_CALL_SCOPE.stack.pop()
def GetTaskCallScope():
"""Get the current task call scope."""
return _TASK_CALL_SCOPE.stack[-1] if _TASK_CALL_SCOPE.stack else None
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
_VARIABLE_RENAME_RULES.stack.append(renames)
try:
yield
finally:
_VARIABLE_RENAME_RULES.stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _VARIABLE_RENAME_RULES.stack:
tf.logging.log_first_n(
tf.logging.WARN,
('Renaming variables is not supported in eager mode. '
'Please look into migrating away from variable renaming.'), 1)
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
_LIST_REGEX_DTYPE = ThreadLocalStack()
@contextlib.contextmanager
def VariableListDtypeRegexScope(list_regex_dtypes):
"""Append the list of (regex, dtype) to override the dtype.
Args:
list_regex_dtypes: pairs of (regexp, dtype). If the regexp matches, the data
type of the variable will be changed by the corresponding dtype.
Yields:
scope in which the list of (regex, dtype) is applied.
"""
_LIST_REGEX_DTYPE.stack.append(list_regex_dtypes)
try:
yield
finally:
_LIST_REGEX_DTYPE.stack.pop()
def FindDataType(var_name):
"""Find the data type for var_name.
Args:
var_name: A string, name of the variable.
Returns:
The dtype of the first matched regex with var_name, or None if no matching
found.
"""
for regex_dtypes in _LIST_REGEX_DTYPE.stack:
for regex, data_type in regex_dtypes:
if re.match(regex, var_name):
return data_type
return None
def GenerateSeedFromName(name):
"""Generate a random seed from a name string.
Args:
name: A string.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
def MaybeGenerateSeedFromScope():
"""Generate a random seed from the current name of the scope.
If running in eager mode, this returns 0.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if not tf.executing_eagerly():
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
return 0
def GenerateSeedFromId(obj_id):
"""Generate a random seed from the id of an object.
If deterministic execution (i.e. unit test), generate the seed from a fixed
unique name instead.
Args:
obj_id: id(object).
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
with tf.name_scope(''):
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
md5 = hashlib.md5()
md5.update(np.int64(obj_id))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
_VARIABLE_SHAPE_PREFIXES = ThreadLocalStack()
def GetVarLeadingDimsAsCombinedLayers(var):
"""Gets the number of leading dimensions of `var` marked as combined layers.
Such dimensions represent variables from different layers stacked together,
e.g., in RepeatLayer, and optimizers (which have shape-dependant behaviors)
can adjust its behavior based on this information to match the behavior for
separate layer variables.
Args:
var: A variable.
Returns:
An integer representing the number of leading dimensions.
"""
try:
return var.op.get_attr('_num_leading_dims_for_combined_layers')
except ValueError:
return 0
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return 0
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
This new dimension will be marked as combined-layers. See also comments for
GetVarLeadingDimsAsCombinedLayers().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.stack.append(shape_prefix)
try:
yield
finally:
_VARIABLE_SHAPE_PREFIXES.stack.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES.stack
def GetVariableNumLeadingDimsForCombinedLayersContext():
"""Return the number of leading combined-layers dims for CreateVariable()."""
return len(_VARIABLE_SHAPE_PREFIXES.stack)
def GetFanInFanOut(shape, prefix_dims_to_skip):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < prefix_dims_to_skip:
raise ValueError(f'Variable shape is {shape} but prefix_dims_to_skip is '
f'{prefix_dims_to_skip}, larger than the shape rank.')
adjusted_shape = shape[prefix_dims_to_skip:]
if len(adjusted_shape) < 1:
return 1, 1
elif len(adjusted_shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return adjusted_shape[0], adjusted_shape[0]
else:
receptive_field_size = 1
for s in adjusted_shape[:-2]:
receptive_field_size *= s
fan_in = adjusted_shape[-2] * receptive_field_size
fan_out = adjusted_shape[-1] * receptive_field_size
return fan_in, fan_out
@contextlib.contextmanager
def VariableStore(default_store=None):
"""Keeps track of {variable_name: (variable, var_params)}.
When CreateVariable would result in a variable name that exists in the store,
the existing variable is returned, or an error is raised, depending on whether
the variable scope supports reuse.
This mimics the behavior of tf.compat.v1.get_variable() with regards to
variable reuse, while functioning correctly in TF2 eager context. However, it
only applies to variables created via CreateVariable.
When there are nested VariableStore contexts, they all provide the same
variable store object. That is, the scope of the variable store is the
outermost context.
Args:
default_store: variable store dict. If set, and there is no store in the
stack, use this store instead of creating a new dict.
Yields:
A dictionary representing the variable store.
"""
old_store = _GetVariableStore()
default_store = default_store or {}
store = old_store if old_store is not None else default_store
graph = tf.get_default_graph()
while hasattr(graph, 'outer_graph') and graph.outer_graph:
graph = graph.outer_graph
graph.lingvo_variable_store = store
yield store
graph.lingvo_variable_store = old_store
def _GetVariableStore():
graph = tf.get_default_graph()
while hasattr(graph, 'outer_graph') and graph.outer_graph:
graph = graph.outer_graph
if hasattr(graph, 'lingvo_variable_store'):
return graph.lingvo_variable_store
return None
def _DefaultVariableCreator(**kwargs):
kwargs.pop('var_name')
kwargs.pop('var_params')
return tf.get_variable(**kwargs)
_VARIABLE_CREATOR_STACK = ThreadLocalStack()
def _GetVariableCreator():
fn = _DefaultVariableCreator
# Latest entry in _VARIABLE_CREATOR_STACK is called last.
for wrapper in reversed(_VARIABLE_CREATOR_STACK.stack):
fn = functools.partial(wrapper, fn)
return fn
@contextlib.contextmanager
def VariableCreatorScope(variable_creator):
"""Yields a context around a variable_creator, used by `CreateVariable()`.
The function must have the following signature::
def variable_creator(next_creator, **kwargs)
The function may delegate variable creation to the next variable creator, or
return its own tf.Variable.
This differs from tf.variable_creator_scope in that tf.variable_creator_scope
modifies a tf.Variable() call while this modifies a tf.get_variable() call. As
the code is migrated to TF2 and tf.get_variable() is deprecated, this may be
upgraded to using tf.variable_creator_scope instead.
This differs from tf.variable_scope(custom_getter=variable_creator) in that
the kwargs passed can be manipulated.
Variable creators are resolved from the outermost towards the innermost.
The innermost variable creator function is tf.get_variable.
The passed in kwargs must conform to what tf.get_variable accepts, with the
addition of `var_name` and `var_params`.
Args:
variable_creator: A variable creator function.
"""
_VARIABLE_CREATOR_STACK.stack.append(variable_creator)
try:
yield
finally:
_VARIABLE_CREATOR_STACK.stack.pop()
def PlaceOnTpuCore(core_id):
"""Returns a VariableCreatorScope that places variables on a given tpu core.
Only applies when running with TPUs.
Does not yet properly support model parallelism.
Args:
core_id: The tpu core id.
"""
def Creator(next_creator, **kwargs):
cluster = cluster_factory.Current()
if use_tpu():
device = cluster.WorkerDeviceInModelSplit(core_id)
elif (
tpu_compat() and
cluster.params.job in ('controller', 'trainer_client', 'executor_tpu')):
# The job is running in a fleet that uses tpu, but does not itself have
# access to the tpu, e.g. controller job. In this case, the returned
# device needs to be the cpu device on the tpu host for the given core.
# FIXME: the current implementation is wrong for large values of core_id.
device = cluster.ListDevices(cluster.params.worker)[0, 0]
else:
device = ''
with tf.device(device):
return next_creator(**kwargs)
return VariableCreatorScope(Creator)
# Variable creators.
def MaybeReuseFromVariableStore(next_creator, **kwargs):
"""Variable creator that attempts to reuse variables from variable store."""
var_name = kwargs['var_name']
p = kwargs['var_params']
store = _GetVariableStore()
if store is not None:
if var_name in store:
if tf.get_variable_scope().reuse:
var, cached_p = store[var_name]
tf.logging.info('Reusing var %s', var.name)
assert cached_p == p.ToText(), (
'Cached config:\n %s vs new config:\n %s' % (cached_p, p.ToText()))
return var
var = next_creator(**kwargs)
if not _DISABLE_VARIABLE_NAME_CHECKING:
if var.name != f'{var_name}/var:0':
raise ValueError(
'Expected %s but created variable %s. Did you mean to set reuse=True '
'or reuse=tf.AUTO_REUSE in VarScope, or did not create a '
'VariableStore for variable reuse?' % (f'{var_name}/var:0', var.name))
tf.logging.info('Creating var %s shape=%s on device %s', var.name, var.shape,
var.device)
for col in p.collections:
tf.add_to_collection(col, var)
if store is not None:
store[var_name] = (var, p.ToText())
return var
def MaybePinVarsToCpu(next_creator, **kwargs):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return next_creator(**kwargs)
return next_creator(**kwargs)
def MaybeOpportunisticVariableReuse(next_creator, **kwargs):
if GetOpportunisticVariableReuse():
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
return next_creator(**kwargs)
return next_creator(**kwargs)
def GetLingvoVariableCreator(name, var_name):
"""Returns a variable creator function."""
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name):
return next_creator(**kwargs)
return LingvoVariableCreator
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES). Note that specifying a collections
argument in `params` does not override this collection; the caller must
set this field explicitly in the call to CreateVariable().
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
if use_stateless_vars_init():
return _CreateVariableStateless(name, params, trainable, collections,
default_seed, synchronization, aggregation)
else:
return _CreateVariableStateful(name, params, trainable, collections,
default_seed, synchronization, aggregation)
def _CreateVariableStateful(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateful RNGs according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
seed = GenerateSeedFromName(var_name)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
# TODO(b/172827074): we do not natively support var initialization for
# int8 type except for constant initialization.
# NOTE: For int8, we initialize by scaling float32 random values to integer.
if init_dtype == tf.int8:
init_dtype = tf.float32
v_init = _CreateVarInitStateful(name, method, shape, dim0, seed, scale,
init_dtype, p.init.custom_v_init)
if var_dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype):
del dtype
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
if var_dtype == tf.int8:
def FloatToInt8Wrapper(init):
def _Wrapper(shape, dtype):
del dtype
value = init(shape, init_dtype)
scale = tf.math.maximum(
tf.math.reduce_min(value) / -127,
tf.math.reduce_max(value) / 127)
value = tf.divide(value, scale)
return tf.cast(value, tf.int8)
return _Wrapper
v_init = FloatToInt8Wrapper(v_init)
with contextlib.ExitStack() as context_stack:
for variable_creator_fn in (GetLingvoVariableCreator(name, var_name),
MaybeOpportunisticVariableReuse,
MaybePinVarsToCpu, MaybeReuseFromVariableStore):
context_stack.enter_context(VariableCreatorScope(variable_creator_fn))
if method == 'custom_constant':
call_shape = None
else:
call_shape = GetVariableShapePrefixes() + list(shape)
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=call_shape,
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _CreateVariableStateless(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateless RNGs according to `params`.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
user_seed = seed if seed is not None else default_seed
seed = _GenerateStatelessRngSeed(var_name, user_seed)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
v_init = _CreateVarInitStateless(name, method, shape, dim0, seed, scale,
init_dtype, p.init.custom_v_init)
if var_dtype == tf.complex64:
raise TypeError(
'Stateless variable initialization does not support tf.complex64.')
with contextlib.ExitStack() as context_stack:
for variable_creator_fn in (GetLingvoVariableCreator(name, var_name),
MaybeOpportunisticVariableReuse,
MaybeReuseFromVariableStore):
context_stack.enter_context(VariableCreatorScope(variable_creator_fn))
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _RandomXavierUniformInitializer(method, scale, seed):
"""Creates a random Xavier uniform initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
return XavierUniform
def _CreateVarInitStateful(name,
method,
shape,
dim0,
seed,
scale,
init_dtype,
custom_v_init=None):
"""Creates variable initialization function for a stateful RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method == 'category':
uniform_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
v_init = lambda *args, **kwargs: tf.floor(uniform_init(*args, **kwargs))
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = tf.variance_scaling_initializer(
scale=scale,
mode='fan_avg',
distribution='uniform',
seed=seed,
dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
elif method in ['custom', 'custom_constant']:
v_init = custom_v_init
else:
assert False, 'init_type `%s` not supported.' % method
return v_init
def _GenerateStatelessRngSeed(name, seed):
"""Generates a 2-tuple seed for a stateless variable initializer.
We want to ensure that different variables end up with different random values
even when they are passed the same seed and shape. To this aim, this function
generates a pseudo-unique seed by hashing the variable name and mapping it
into a scalar seed. More specifically, the returned value is a 2-tuple of
tf.int32 scalar, where the first element is the user-provided seed and the
second element is obtained by hashing the variable name.
Args:
name: The variable name for which to generate a stateless-like seed.
seed: The user-specified scalar seed.
Returns:
A 2-tuple seed of tf.int32 values (for TPU compatibility).
"""
seed0 = seed or 0
seed1 = GenerateSeedFromName(name)
return tf.constant([seed0, seed1], dtype=tf.int32)
def _DeterministicRandomNormalInitializer(seed, mean, stddev):
"""Creates a random normal initializer."""
def DeterministicNormal(shape, dtype):
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicNormal
def _DeterministicRandomUniformInitializer(seed, minval, maxval):
"""Creates a random uniform initializer."""
def DeterministicUniform(shape, dtype):
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
return DeterministicUniform
def _DeterministicRandomTruncatedNormalInitializer(seed, mean, stddev):
"""Creates a random truncated normal initializer."""
def DeterministicTruncatedNormal(shape, dtype):
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicTruncatedNormal
def _DeterministicRandomUniformUnitScalingInitializer(seed, factor):
"""Creates a random uniform unit scaling initializer."""
def DeterministicUniformUnitScaling(shape, dtype):
# The following logic is originally from (UniformUnitScaling.__call__())
# in TensorFlow: python/ops/init_ops.py
scale_shape = shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
maxval = math.sqrt(3 / input_size) * factor
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-maxval, maxval=maxval, dtype=dtype)
return DeterministicUniformUnitScaling
def _DeterministicRandomVarianceScalingInitializer(scale, mode, distribution,
seed):
"""Creates a variance scaling initializer."""
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
if distribution not in {
'normal', 'uniform', 'truncated_normal', 'untruncated_normal'
}:
raise ValueError('Invalid `distribution` argument:', distribution)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def DeterministicVarianceScaling(shape, dtype):
# This is originally from TensorFlow: python/ops/init_ops.py
scale_shape = shape
# Handle special case of empty list as shape, since fan_in and fan_out
# are numerically added below. Without this, GetFanInFanOut() would
# return None, None instead.
if isinstance(scale_shape, (list, tuple)) and not scale_shape:
fan_in, fan_out = 1, 1
else:
fan_in, fan_out = GetFanInFanOut(scale_shape, combined_layers_dims)
if mode == 'fan_in':
scale_inner = scale / max(1., fan_in)
elif mode == 'fan_out':
scale_inner = scale / max(1., fan_out)
else:
scale_inner = scale / max(1., (fan_in + fan_out) / 2.)
if distribution == 'normal' or distribution == 'truncated_normal':
# constant taken from scipy.stats.truncnorm.std(
# a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale_inner) / .87962566103423978
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
elif distribution == 'untruncated_normal':
stddev = math.sqrt(scale_inner)
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
else:
limit = math.sqrt(3.0 * scale_inner)
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-limit, maxval=limit, dtype=dtype)
return DeterministicVarianceScaling
def _DeterministicRandomXavierUniformInitializer(method, scale, seed):
"""Creates a variance scaling initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * stateless_random_ops.stateless_random_uniform(
shape, seed, -limit, limit, dtype)
return XavierUniform
def _CreateVarInitStateless(name,
method,
shape,
dim0,
seed,
scale,
init_dtype,
custom_v_init=None):
"""Creates variable initialization function for a stateless RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = _DeterministicRandomNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-scale, maxval=scale)
elif method in ['uniform_positive']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=0., maxval=scale)
elif method in ['uniform_unit_scaling']:
v_init = _DeterministicRandomUniformUnitScalingInitializer(
seed=seed, factor=scale)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = _DeterministicRandomVarianceScalingInitializer(
scale=scale, mode='fan_avg', distribution='uniform', seed=seed)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = _DeterministicRandomTruncatedNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
v_init = _DeterministicRandomXavierUniformInitializer(method, scale, seed)
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-bound, maxval=bound)
elif method in ['custom', 'custom_constant']:
v_init = custom_v_init
else:
assert False, 'init_type %s not supported.' % method
return v_init
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.stack.append(global_step_tensor)
try:
yield
finally:
_GLOBAL_STEP_STACK.stack.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK.stack:
return _GLOBAL_STEP_STACK.stack[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(GetGlobalVariableScope(), use_resource=True):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.train.get_or_create_global_step()
else:
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity:
"""Helper class."""
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars,
variable_loading_rules,
var_ignore_rules,
ckpt_path,
suppress_logging=False):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
unused_rules = {
regexp: name_format for regexp, name_format in variable_loading_rules
}
vars_to_load = []
for model_var in all_vars:
loaded = False
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match:
if not suppress_logging:
tf.logging.debug('Loading rules do not match %s.', model_var.name)
continue
elif any(re.match(r, model_var.name) for r in var_ignore_rules):
if not suppress_logging:
tf.logging.debug('Ignoring %s from loading.', model_var.name)
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
if not suppress_logging:
tf.logging.info('Loading %s from %s with regexp: %s', model_var.name,
checkpoint_var_name, regexp)
vars_to_load.append((checkpoint_var_name, model_var))
unused_rules.pop(regexp, None)
loaded = True
break
if not loaded and not suppress_logging:
tf.logging.info(
'Not loading model variable %s from %s as it does not match any rules'
' or matches ignored', model_var.name, ckpt_path)
if not suppress_logging:
for regexp, name_format in unused_rules.items():
tf.logging.warning(f'User provided rule matched no variables: ({regexp}, '
f'{name_format})')
return vars_to_load
def OverrideVarsFromCheckpoint(all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Add TF graph ops to override variables from a provided checkpoint.
Args:
all_vars: List of all the parameters in the model.
checkpoint_path: A path to the checkpoints of a pretrained model.
variable_loading_rules: A list of tuples of strings defining (regex to match
parameter names in the model to override, format string to determine the
corresponding var in the checkpoint).
var_ignore_rules: A list consisting of a list of regexes to match parameter
names in the model which should not be overridden, even if they match
those in the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from the provided checkpoint.
"""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules, checkpoint_path)
if not vars_to_load:
all_rules_text = '\n'.join(
[f'{k} --> {v}' for k, v in variable_loading_rules])
raise ValueError(f'Variable loading rules {all_rules_text} '
f'did not match any of {len(all_vars)} vars.')
load_var_names = '\n'.join(sorted([v.name for _, v in vars_to_load]))
tf.logging.info(f'Overriding {len(vars_to_load)} vars from '
f'{checkpoint_path}:\n{load_var_names}')
savers = []
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
savers.append(tf.train.Saver(var_list=unique_vars_to_load, sharded=True))
vars_to_load = remaining_vars_to_load
def _Restore(sess):
for saver in savers:
saver.restore(sess, checkpoint_path)
return _Restore
def OverrideVarsFromCheckpoints(all_vars, ckpts_loading_rules):
"""Add TF graph ops to override model variables from checkpoints.
Args:
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from checkpoint and return a list of overwritten variables.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
var_names_overridden = set()
restore_fns = []
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
to_load_vars = _GetVarsToLoad(
all_vars,
loading_rules[0],
loading_rules[1],
ckpt_path,
suppress_logging=True)
var_refs_to_override = [var[1].ref() for var in to_load_vars]
var_names_to_override = [var[1].name for var in to_load_vars]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
restore_fns.append(
OverrideVarsFromCheckpoint(all_vars, ckpt_path, loading_rules[0],
loading_rules[1]))
var_refs_overridden.update(var_refs_to_override)
var_names_overridden.update(var_names_to_override)
def _Restore(sess):
for fn in restore_fns:
fn(sess)
tf.logging.info('Model variables overridden: %s', var_names_overridden)
return var_names_overridden
return _Restore
def ComputeGradientsSimple(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
activations_grad=None):
"""Compute gradients."""
tape = _GRADIENT_TAPE_STACK.stack[-1] if _GRADIENT_TAPE_STACK.stack else None
if IsEagerMode() and tape:
tf.logging.info('ComputeGradientsSimple: using gradient tape.')
if activations_grad is not None:
raise ValueError('GradientTape does not accept gradient input values.')
if grad_aggregation_method or colocate_gradients_with_ops or gate_gradients:
tf.logging.warning(
'When GradientTape is used, these field will be ignored: '
f'grad_aggregation_method ({grad_aggregation_method}), '
f'colocate_gradients_with_ops ({colocate_gradients_with_ops}), '
f'gate_gradients ({gate_gradients}).')
return tape.gradient(
loss_or_activations,
all_vars,
unconnected_gradients=tf.UnconnectedGradients.NONE)
return tf.gradients(
loss_or_activations,
all_vars,
grad_ys=activations_grad,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def _ComputeGradientsTpu(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss_or_activations: The loss or activations to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This helps reducing the number of gradient all-reduces
when doing gradient accumulation, which does gradient cross replica sum
only every k steps in a tf.cond. Currently this works only when
skip_zero_gradients is None.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
Gradients to be passed back. If tpu_embedding_activations is set, their
gradients will be placed at the end.
Raises:
ValueError: upon invalid arguments.
"""
if is_activations:
assert activations_grad is not None
if not skip_zero_gradients and not is_activations:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss_or_activations *= tf.constant(
1.0 / shards, dtype=loss_or_activations.dtype)
else:
assert not tpu_embedding_activations, (
'Gradient computation for tpu embedding activations requires proper '
'loss scaling, and so is not compatible with skip_zero_gradients and '
'is_activations.')
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(
loss_or_activations=loss_or_activations,
all_vars=all_vars +
(tpu_embedding_activations if tpu_embedding_activations else []),
grad_aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients,
activations_grad=activations_grad)
if tpu_embedding_activations:
# Note we don't need to aggregate TPU embedding gradients below.
tpu_embedding_grads = all_grads[len(all_vars):]
all_grads = all_grads[:len(all_vars)]
else:
tpu_embedding_grads = []
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
if defer_crs_to_apply_grad:
normalized_g = tf.convert_to_tensor(g)
else:
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads + tpu_embedding_grads
class _VarGrad(typing.NamedTuple):
var: tf.Tensor
grad: Union[tf.Tensor, tf.IndexedSlices]
scale: Optional[tf.Tensor] = None
class VarGrad:
"""A class that holds a variable and a gradient.
This does not inherit from namedtuple so that tf.nest operations do not
recurse into it.
"""
def __init__(self, *args, **kwargs):
self._var_grad = _VarGrad(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
if self._var_grad.scale is None:
return iter((self._var_grad.var, self._var_grad.grad))
return iter(self._var_grad)
def __repr__(self):
return repr(self._var_grad)
def SkipNoneGradients(var_grads):
"""Removes pairs whose grad is None."""
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def ComputeGradients(
loss_or_activations,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
skip_none_gradients=True,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss_or_activations: either the loss, which is a scalar tensor, or
activations, which could be a tensor or a list of tensors.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
- None: do not skip zero gradients;
- `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
- `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
skip_none_gradients: Whether to skip gradients that are None.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This applies to TPU only.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
If tpu_embedding_activations is set, a sub `.NestedMap` named
tpu_embedding_var_grads will be used to store the VarGrads for the
activations. In this case, key is the feature name, and var in the VarGrad
is the activation tensor (not a real variable).
"""
if not is_activations:
loss_or_activations = HasRank(loss_or_activations, 0)
if not tpu_embedding_activations:
tpu_embedding_activations = NestedMap()
assert isinstance(tpu_embedding_activations, NestedMap)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss_or_activations'.
# This doesn't work if the training loop is wrapped inside a tf.function,
# since all variables will be lifted out and trainable_variables will be
# empty. In that case we skip the check.
trainable_variables = set([v.ref() for v in tf.trainable_variables()])
if trainable_variables:
def Needed(v):
if isinstance(v, tf.Variable):
if v.ref() not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
assert not tpu_embedding_activations
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar,
defer_crs_to_apply_grad=defer_crs_to_apply_grad,
activations_grad=activations_grad,
is_activations=is_activations,
tpu_embedding_activations=tpu_embedding_activations.Flatten())
else:
assert not tpu_embedding_activations
take_grad = ComputeGradientsSimple
grads = take_grad(loss_or_activations, filtered_vlist,
grad_aggregation_method, colocate_gradients_with_ops,
gate_gradients)
if tpu_embedding_activations:
tpu_embedding_grads = grads[len(filtered_vlist):]
grads = grads[:len(filtered_vlist)]
else:
tpu_embedding_grads = None
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
if skip_none_gradients:
var_grads = SkipNoneGradients(var_grads)
if tpu_embedding_grads:
# Create VarGrads for TPU embedding activations in a dedicated sub map.
assert 'tpu_embedding_var_grads' not in var_grads
tpu_embedding_activation_list = tpu_embedding_activations.Flatten()
tpu_embedding_var_grads = [
VarGrad(v, g)
for v, g in zip(tpu_embedding_activation_list, tpu_embedding_grads)
]
tpu_embedding_var_grads = tpu_embedding_activations.Pack(
tpu_embedding_var_grads)
# Replace None gradients with zeros, since TPU embedding expect all
# activations to have gradients.
def _NoneToZeros(key, var_grad):
if var_grad.grad is None:
tf.logging.warning(
f'TPU embedding gradient for feature {key} is None. Replacing with '
'zeros.')
return VarGrad(var_grad.var, tf.zeros_like(var_grad.var))
return var_grad
var_grads.tpu_embedding_var_grads = (
tpu_embedding_var_grads.TransformWithKey(_NoneToZeros))
return var_grads
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var: tf.Tensor, grad: tf.Tensor,
scale: tf.Tensor) -> tf.Tensor:
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item: VarGrad) -> VarGrad:
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return not _VarInCollection(v, tf.get_collection(SKIP_LP_REGULARIZATION))
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
v_name = v.name if not tf.executing_eagerly() else '[eager]'
tf.logging.info('AdjustGradientsWithLpLoss: %s', v_name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurrences in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif not _VarInCollection(var, tf.get_collection(SKIP_LP_REGULARIZATION)):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
# divide_no_nan only supports tf.{float,complex}*.
dtype = values.dtype if values.dtype is tf.float64 else tf.float32
avg = tf.math.divide_no_nan(
sum_reduction_fn(tf.cast(values, dtype) * tf.cast(weights, dtype)),
tf.cast(total_weight, dtype))
return tf.cast(avg, values.dtype), total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in m.items():
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(lists_of_metrics.items()):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in m.items():
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(lists_of_per_example.items()):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set(
[k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def AddVN(p, x, per_step=False):
"""Add variational noise to x.
Args:
p: Layer params, with a `vn` subparam containing `VariationalNoiseParams`.
x: Input to add variational noise to.
per_step: Whether to add per_step noise.
Returns:
The input with variational noise added according to params.
"""
tensor_name = x.name if not tf.executing_eagerly() else '[eager]'
if per_step:
if not p.vn.per_step_vn:
tf.logging.info(
'p.vn.per_step_vn is not set. Not adding per-step vn to ' +
tensor_name)
return x
else:
if not p.vn.global_vn:
tf.logging.info('p.vn.global_vn is not set. Not adding global vn to ' +
tensor_name)
return x
tf.logging.info(
f"Add {'per-step' if per_step else 'global'} vn to {tensor_name}: {p.vn}")
if p.vn.scale is None:
raise ValueError('VN scale must be set.')
if p.vn.deterministic:
noises = DeterministicVN(p, tf.shape(x), mean=0.0, std=1.0)
noises = tf.cast(noises, x.dtype)
else:
if per_step:
# recurrent.py does not support stateful random ops in cell_fn due to
# rematerialization.
raise ValueError('per_step vn requires deterministic=True.')
noises = tf.random.normal(
tf.shape(x), stddev=1.0, seed=p.vn.seed, dtype=x.dtype)
scale = tf.where(GetGlobalStep() >= p.vn.start_step, p.vn.scale, 0.0)
return x + tf.cast(scale, x.dtype) * noises
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None,
deterministic=None,
start_step=0):
"""Returns a hyperparams for variational noise."""
if deterministic is None:
deterministic = cluster_factory.Current().in_unit_test
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
p.Define(
'deterministic', deterministic, 'If true, generate noise using'
'stateless random ops that are compatible with TF functional ops.')
p.Define(
'start_step', start_step,
'Step starting from which variational noise is added during training.')
return p
def DefaultVN():
return VariationalNoiseParams(scale=None)
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# which will be updated by parent configuration in CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
# Step seed keyed by graph.
_STEP_SEED_DICT = ThreadLocalDict()
# The step seed will increment by np.prod(_STEP_SEED_INCREMENT.stack)
_STEP_SEED_INCREMENT = ThreadLocalStack()
@contextlib.contextmanager
def StepSeedIncrementContext(step):
"""Adds an element to _STEP_SEED_INCREMENT."""
assert step > 0, ('%s' % step)
_STEP_SEED_INCREMENT.stack.append(step)
try:
yield
finally:
_STEP_SEED_INCREMENT.stack.pop()
def GetStepSeed():
"""Gets step_seed."""
key = id(tf.get_default_graph())
if key not in _STEP_SEED_DICT.dict:
ResetStepSeed()
return _STEP_SEED_DICT.dict[key]
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
key = id(tf.get_default_graph())
_STEP_SEED_DICT.dict[key] = tf.convert_to_tensor(seed, dtype=tf.int64)
def MaybeResetStepSeedFromScope():
"""In graph mode, resets step_seed according to the current named scope.
This is used in graph mode to avoid "tensor is from a different graph"
errors that happen when we share random seend tensors too much.
See b/129159299 for more context.
Eager mode does not have this problem, so in eager mode we do nothing.
"""
if not tf.executing_eagerly():
ResetStepSeed(GenerateSeedFromName(tf.no_op(name='new_step_seed').name))
def MaybeResetStepSeed(seed):
"""If we're in graph mode, reset the step seed."""
if not tf.executing_eagerly():
ResetStepSeed(seed)
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
inc = np.prod(_STEP_SEED_INCREMENT.stack)
ResetStepSeed(step_seed + inc)
return step_seed
def GenerateStepSeedPair(p, op_seed=None):
"""Generates a seed pair for deterministic random operations in ...
functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(GetGlobalStep(), seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
op_seed = tf.cast(op_seed, seed_dtype)
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
seeds = GenerateStepSeedPair(params, params.vn.seed)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - tf.cast(
batch_norm_stats, batch_norm_var.dtype.base_dtype)) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.where(has_nan_or_inf, tf.zeros_like(update_delta),
update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
if not tf.executing_eagerly_outside_functions():
bn_update_dict = _get_batch_norm_updates_dict()
if bn_update.name in bn_update_dict:
raise ValueError(f'BN update {bn_update.name} already exists.')
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
if tf.executing_eagerly_outside_functions():
return [], []
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
f'{bn_update.name} is probably not a valid batch normalization update '
'op. Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
try:
_SAMPLE_STEP_STACK.stack.append(step)
yield step
finally:
_SAMPLE_STEP_STACK.stack.pop()
def _GetSampleStep():
return _SAMPLE_STEP_STACK.stack[-1] if _SAMPLE_STEP_STACK.stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
"""Apply a transform then sum the list."""
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
if not sum_transform:
return tf.constant(0.0)
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: v**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def ReduceRms(x: tf.Tensor) -> tf.Tensor:
"""Computes root mean square of tensor x with numerical stability."""
if not x.shape.is_fully_defined():
raise ValueError('Shape of x must be fully defined.')
if not x.shape.as_list():
return x
denom = functools.reduce((lambda x, y: x * y), x.shape.as_list())
if denom <= 1e8:
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(x)))
tf.logging.info('reduce_rms %s denom=%d', x, denom)
sum_square_x = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.square(x), -1))
avg_square_x = sum_square_x / tf.constant(denom, dtype=sum_square_x.dtype)
return tf.math.sqrt(avg_square_x)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None, axis=1):
"""Pads x to `length` using `pad_val` along the axis dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the axis dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[axis] > length or
x.shape[i] != shape[i] where i != axis.
Args:
x: the tensor to be padded with axis dimension being the time. E.g., x
usually has shape [batch, seq_len, ...], when axis=1.
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
axis: The dimension that x will be padded, default to 1.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x, when axis=1, and similarly for other axes.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[axis]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[axis][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[axis]
pad_len = length - slen
pad = tf.scatter_nd([[axis, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[axis] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, use_select=True, ensure_shape=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x)
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must have a shape broadcastable to 'x' if specified.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
ensure_shape: If true, ensures the shape of the result is the same as of x.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, tf.zeros([], padding.dtype)),
tf.equal(padding, tf.ones([], padding.dtype)))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros([], x.dtype)
if padding.dtype != tf.bool:
padding = padding > tf.zeros([], padding.dtype)
result = tf.where_v2(padding, padded, x)
else:
result = x * tf.cast(1.0 - tf.cast(padding, tf.float32), x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
if ensure_shape:
result = tf.ensure_shape(result, x.shape)
return result
def LengthsFromPaddings(paddings, dtype=None):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x
Args:
paddings: a tensor with shape [batch, length].
dtype: A type to optionally cast the result to.
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
mask = 1 - tf.cast(paddings, tf.int32)
# We cannot just use `tf.reduce_sum(mask, axis=1)` to compute the number of
# elements prior to the trailing padding, because there might be leading
# padding. Thus, to identify the amount of trailing padding, we notice that
# the mask values for all the trailing padding will be zero, and thus in the
# cumsum below they will all be equal to the last element of the cumsum. Note
# that the final unpadded value will also be equal to the final cumsum value.
cumsum = tf.cumsum(mask, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value as the last cumsum value
# gives us num_trailing_paddings + 1, and so counting the number of elements
# that *differ* from the last cumsum value gives us the unpadded_length - 1.
unpadded_length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for when the mask is all zeros.
# In this case, all the entries in the cumsum will be equal to the last
# element, so the number that differ would be zero, and thus the
# unpadded_length value would be 1 (which is incorrect). We thus set it to 0.
all_zero_mask = tf.equal(tf.reduce_sum(mask, axis=1), 0)
result = tf.where(all_zero_mask, tf.zeros_like(unpadded_length),
unpadded_length)
if dtype and result.dtype != dtype:
result = tf.cast(result, dtype)
return result
def PaddingsFromLengths(lengths, maxlen=None):
"""Computes paddings Tensor from lengths.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x.
This method does not generate leading paddings.
Args:
lengths: A int32 Tensor of shape [B].
maxlen: None or a Python int or a scalar Tensor.
Returns:
A 0/1 valued Tensor of shape [B, maxlen or ?] where 1s are padded positions.
"""
lengths = HasRank(lengths, 1)
if maxlen is not None:
lengths = with_dependencies(
[assert_less_equal(tf.cast(tf.reduce_max(lengths), tf.int32), maxlen)],
lengths)
return 1. - tf.sequence_mask(lengths, maxlen=maxlen, dtype=tf.float32)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lengths as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def ShiftLeft(tensor, shift_size, pad_val=0, axis=1):
"""Shifts the values in a tensor to the left along the axis dimension.
The first shift_size values are dropped, and the tensor is padded on the
right with pad_val.
Args:
tensor: the input tensor with the axis dim being time.
shift_size: the number of frames >= 0 to shift.
pad_val: the value to pad on the right of the tensor.
axis: The dimension along which the tensor will be shifted, default to 1.
Returns:
A left shifted tensor on dimension axis.
"""
rank = tensor.shape.rank
with tf.control_dependencies(
[assert_greater_equal(rank, 2),
assert_greater_equal(shift_size, 0)]):
time = GetShape(tensor)[axis]
begin = tf.scatter_nd([[axis]], [shift_size], [rank])
return PadSequenceDimension(
tf.slice(tensor, begin, size=[-1] * rank), time, pad_val, axis=axis)
def CreateIdsAndLabels(ids, paddings, sos_id=1, eos_id=2, trim=False):
"""Creates ids and labels to be used as decoder targets.
Args:
ids: int Tensor of shape [batch, maxlen], without sos or eos.
paddings: float Tensor of shape [batch, maxlen].
sos_id: ID for the sos special token.
eos_id: ID for the eos special token.
trim: Whether to trim the last elements in the output Tensors, so that the
lenghts of the output Tensors are same as the input Tensors. Otherwise,
the output Tensors are longer than the input Tensors by one because of
the added sos / eos.
Returns:
A NestedMap with the following fields, where maxlen' equals maxlen when
trim=True, otherwise maxlen + 1:
- ids: int Tensor of shape [batch, maxlen'], with sos prepended.
- labels: int Tensor of shape [batch, maxlen'], with eos appended.
- paddings: float Tensor of shape [batch, maxlen'].
- weights: float Tensor of shape [batch, maxlen'].
"""
ids = tf.where(
tf.equal(paddings, 0.0), ids, tf.broadcast_to([[eos_id]], GetShape(ids)))
targets = NestedMap()
targets.ids = tf.pad(ids, [[0, 0], [1, 0]], constant_values=sos_id)
targets.labels = tf.pad(ids, [[0, 0], [0, 1]], constant_values=eos_id)
targets.paddings = tf.pad(paddings, [[0, 0], [1, 0]])
targets.weights = 1.0 - targets.paddings
if trim:
targets = targets.Transform(lambda v: v[:, :-1])
return targets
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def ExpandTo(x, target_rank):
"""Expands the last dimension of x until it has rank target_rank."""
if x is None:
return None
shape = GetShape(x)
rank = GetRank(x)
rank_diff = target_rank - rank
if isinstance(rank_diff, tf.Tensor):
new_shape = tf.concat([shape, tf.ones([rank_diff], tf.int32)], -1)
else:
new_shape = shape + [1] * rank_diff
new_x = tf.reshape(x, new_shape)
if not isinstance(target_rank, tf.Tensor):
new_x.shape.with_rank(target_rank)
return new_x
def ExpandAndPadOrTrimTo(x, target_shape, pad_val=0):
"""Ensures that x is broadcast compatible with target_shape.
x is first expanded to the target rank. Thereafter, if x is not broadcast
compatible with target_shape the non-broadcast compatible dimensions are
either padded or trimmed to the target shape.
Args:
x: A tensor.
target_shape: A tensor shape either as a list or Tensor.
pad_val: The value to pad.
Returns:
A tensor which is broadcast compatible with target_shape.
"""
if x is None:
return None
target_rank = None
if isinstance(target_shape, tf.Tensor):
target_rank = GetShape(target_shape)[0]
else:
target_rank = len(target_shape)
x = ExpandTo(x, target_rank)
x_shape = GetShape(x)
is_static = (not isinstance(x_shape, tf.Tensor) and
all(not isinstance(d, tf.Tensor) for d in x_shape))
if is_static:
masked_target_shape = [
1 if x_shape[i] == 1 else target_shape[i] for i in range(len(x_shape))
]
else:
masked_target_shape = tf.where(
tf.equal(x_shape, 1), tf.ones_like(target_shape), target_shape)
new_x = PadOrTrimTo(x, masked_target_shape, pad_val)
return tf.reshape(new_x, masked_target_shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probabilistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
# TODO(jamesqin): follow suggestions in
# b/167460492#comment16
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = MaybeGenerateSeedFromScope()
def Backward(fwd_xs, fwd_ys, d_fwd_ys):
"""The backward function that rematerializes forward outputs."""
del fwd_ys
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in fwd_xs.xs]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
MaybeResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=d_fwd_ys)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return NestedMap(
initial_step_seed=tf.zeros_like(initial_step_seed), xs=dxs_final)
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
def Forward(fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs.xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(fwd_xs.initial_step_seed)
ys = fn(*fwd_xs.xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = CallDefun(
Forward,
NestedMap(initial_step_seed=initial_step_seed, xs=xs),
bak=Backward)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
MaybeResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = frozenset({
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
})
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction or ConcreteFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None, 'Defun {} is not in the graph .'.format(
func.definition.signature.name))
nodes = py_collections.deque(func.definition.node_def)
else:
nodes = py_collections.deque(func.function_def.node_def)
stateful_ops = []
# Recursively search for stateful random op.
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.name)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
elif node.op == 'StatefulPartitionedCall':
_AddDefunNodes(node.attr['f'].func.name)
elif node.op != 'PartitionedCall':
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def Softmax(logits, axis=None, extra_logit=None, name=None):
"""Softmax with extra_logits, might be useful for large xformer LM."""
if extra_logit is None:
return tf.nn.softmax(logits, axis=axis, name=name)
axis = -1 if axis is None else axis
def ReduceLogSumExp(x):
max_logit = tf.math.reduce_max(
tf.stop_gradient(x), axis=axis, keepdims=True)
base_logit = tf.math.maximum(max_logit, extra_logit)
x -= base_logit
exp_x = tf.math.exp(x)
sum_exp_x = tf.math.reduce_sum(exp_x, axis=axis, keepdims=True)
sum_exp_x += tf.math.exp(extra_logit - base_logit)
return tf.math.log(sum_exp_x) + base_logit
def LogSoftmax(x):
return x - ReduceLogSumExp(x)
with tf.name_scope(name):
return tf.math.exp(LogSoftmax(logits))
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None,
stop_gradient_on_focal_loss_coefficient=False):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
stop_gradient_on_focal_loss_coefficient: If true, stops gradient on the
focal loss coefficient (1-p)^gamma to stabilize the gradient.
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
def _ApplyFocalLossCoefficient(loss, log_probs):
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
coefficient = tf.pow(1.0 - probs, gamma)
if stop_gradient_on_focal_loss_coefficient:
coefficient = tf.stop_gradient(coefficient)
loss *= coefficient
return loss
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
loss = _ApplyFocalLossCoefficient(loss, log_probs)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
loss = _ApplyFocalLossCoefficient(loss, -loss)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z_]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the bazel workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('lingvo/', '', 1))
if lines:
lines = lines.splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False, use_einsum=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
use_einsum: If true, use einsum on TPU.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
if use_einsum:
assert isinstance(rank, int) and rank < 26, rank
# Use einsum to avoid data formatting overhead.
a2z = ''.join([chr(i) for i in range(97, 123)]) # abc...xyz
src = a2z[:rank]
if axis == -1:
tgt = src[:-1] + 'z'
else:
tgt = src[:axis] + 'z' + src[axis + 1:]
length = GetShape(x)[axis]
causal_mask = tf.linalg.band_part(
tf.ones([length, length], dtype=x.dtype), 0, -1)
return tf.einsum(f'{src},{src[axis]}z->{tgt}', x, causal_mask)
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
def NoOP(*args, **kwargs): # pylint: disable=unused-argument
return tf.no_op()
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
try:
yield
finally:
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _AssertInputsMatch(op, args, implicit_captures):
"""Assert that op's inputs match with args and implicit_captures.
Args:
op: The operation to check.
args: A nested structure representing the explicit arguments of 'op'.
implicit_captures: A nested structure representing the implicitly captured
inputs of 'op'.
Raises:
ValueError: if the number of inputs mismatch.
"""
expected_inputs = Flatten([args, implicit_captures])
expected_num_inputs = len(expected_inputs)
if len(op.inputs) > expected_num_inputs:
raise ValueError(('Too many inputs. The most likely cause is that fwd '
'captures additional tensors: extra inputs %r vs %r '
'captures=%r') % (list(op.inputs), list(expected_inputs),
list(Flatten(implicit_captures))))
if len(op.inputs) < expected_num_inputs:
raise ValueError(('Mismatched inputs to fwd: Found %d vs expected %d: %r'
'. Implicit captures(%d) = %r') %
(len(op.inputs), expected_num_inputs, list(op.inputs),
len(Flatten(implicit_captures)), implicit_captures))
def TensorSpecs(nmap, keep_shape=True):
"""Transforms tensors in the input nested structure to TensorSpecs."""
if nmap is None:
return None
fn = lambda t: tf.TensorSpec(t.shape if keep_shape else None, t.dtype)
return Transform(fn, nmap)
def _DefineDefun(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- output_dtypes: A nested structure compatible with the outputs of `fwd`
containing the corresponding output dtypes.
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = False
if fwd_sig is None:
fwd_sig = []
get_dtype = lambda x: x.dtype
arg_dtypes = Flatten(Transform(get_dtype, fwd_sig))
get_shape = lambda x: x.shape
arg_shapes = Flatten(Transform(get_shape, fwd_sig))
# Used to hold the backward function used by Grad, which will be defined if
# bak is set.
sigs = NestedMap()
# Output of this method.
res = NestedMap()
python_grad_func = None
if bak:
def Grad(op, *args):
"""Gradient function for the forward function.
Args:
op: The forward operation.
*args: Gradients wrt op.outputs.
Returns:
Tuple of derivatives.
"""
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs = op.inputs[:len(arg_dtypes)] # The rest are captures.
return sigs.backward(*Flatten([xs, op.outputs, args]))
python_grad_func = Grad
def _SetShape(dst_list, shape_list):
for dst, shape in zip(dst_list, shape_list):
if isinstance(dst, tf.Tensor):
dst.set_shape(shape)
@tf.Defun(*arg_dtypes, python_grad_func=python_grad_func, noinline=noinline)
def Forward(*args):
"""The forward function."""
_SetShape(args, arg_shapes)
with RemoveAssertContext(remove=noinline):
call = lambda: fwd(Pack(fwd_sig, args)) if args else fwd()
if device is None:
# Defun will handle the device assignment.
rets = call()
else:
with tf.device(device):
rets = call()
res.outputs = rets
return Flatten(rets)
forward = Forward
if not arg_dtypes:
# In this case Forward is an _OverloadedFunction, we need to instantiate it.
forward = Forward.instantiate([])
# Invokes fwd() to get res.outputs.
forward.add_to_graph(tf.get_default_graph())
res.func = forward
res.stateful_ops = forward.stateful_ops
res.captured_inputs = forward.captured_inputs
output_dtypes = Transform(get_dtype, res.outputs)
output_shapes = Transform(get_shape, res.outputs)
def Call(args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = forward()
else:
flat_rets = forward(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
_SetShape(flat_rets, Flatten(output_shapes))
return Pack(output_dtypes, flat_rets)
res.call = Call
if bak:
def Backward(*args):
"""The backward function."""
_SetShape(args, Flatten([arg_shapes, output_shapes, output_shapes]))
xs, ys, dys = Pack([fwd_sig, output_dtypes, output_dtypes], args)
with RemoveAssertContext(remove=noinline):
if device is None:
# Defun will handle the device assignment.
dxs = bak(xs, ys, dys)
else:
with tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
sigs.backward = tf.Defun(
*Flatten([arg_dtypes, output_dtypes, output_dtypes]),
noinline=noinline)(
Backward)
sigs.backward.add_to_graph(tf.get_default_graph())
else:
sigs.backward = Backward
return res
# Global variable to control rendezvous sharing in tf.function.
# If False (default) rendezvous sharing is disabled in tf.function, that is, the
# function body use a separate rendezvous and can't communicate with parent
# graph via send/recv.
# With _GetSharedRendezvous() == True, the function body share the same
# rendezvous with the parent graph and can talk to it using send/recv. This is
# useful for layers like StackedRecurrent.
_SHARED_RENDEZVOUS = ThreadLocalStack()
@contextlib.contextmanager
def _SharedRendezvousScope(shared_rendezvous=True):
_SHARED_RENDEZVOUS.stack.append(shared_rendezvous)
try:
yield
finally:
_SHARED_RENDEZVOUS.stack.pop()
def _GetSharedRendezvous():
"""Get the current rendezvous sharing setting."""
return _SHARED_RENDEZVOUS.stack[-1] if _SHARED_RENDEZVOUS.stack else False
def _ApplySharedRendezvous(func):
"""Apply the rendezvous sharing setting on the given tf.function func."""
# pylint: disable=protected-access
func._shared_rendezvous = _GetSharedRendezvous()
# pylint: enable=protected-access
def _WrapFunction(func=None, input_signature=None):
"""Wraps func as a tf.function."""
if input_signature is None:
input_signature = []
def Decorated(fn):
@tf.function(input_signature=input_signature, autograph=False)
def Fn(*args):
# TODO(b/163904067): mimic Defun' behavior and reset the step seed to
# avoid it being used as an implicit capture. This is not a desired
# behavior, it should take the step seed from parent graph instead.
ResetStepSeed()
# Mimic Defun and disable collection sharing.
graph = tf.get_default_graph()
# Don't share summaries collection with parent graph (b/168745134).
graph.clear_collection(tf.GraphKeys.SUMMARIES)
return fn(*args)
_ApplySharedRendezvous(Fn)
# Add the function to the graph so it'll be traced under the current
# context. This is necessary if the function body captures any non-tensor
# values from the environment, like symbolic maps.
cf = Fn.get_concrete_function()
cf.add_to_graph()
return cf
# For the `foo = _WrapFunction(foo, ...)` use case.
if func is not None:
return Decorated(func)
# For the `@_WrapFunction(...)` use case.
return Decorated
def _DefineFunction(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- outputs: The outputs of `fwd`. Used for reflection only (e.g. to get the
output dtypes, shapes, etc).
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
if device is None:
# Get the current device to mimic Defun's behavior.
# pylint: disable=protected-access
device_funcs = tf.get_default_graph()._device_functions_outer_to_inner
device = device_funcs[-1] if device_funcs else None
# pylint: enable=protected-access
# Output of this method.
res = NestedMap()
@_WrapFunction(input_signature=Flatten(fwd_sig))
def Forward(*args):
"""The forward function."""
with RemoveAssertContext(remove=noinline), tf.device(device):
if args:
xs = Pack(fwd_sig, args)
rets = fwd(xs)
else:
rets = fwd()
res.outputs = rets
return Flatten(rets)
res.captured_inputs = Forward.captured_inputs
# Get the stateful ops used in cell_fn. Logic borrowed from
# _EagerDefinedFunction.__init__().
graph = Forward.graph
input_ops = set(arg.op for arg in graph.inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
res.stateful_ops = [(o.name, o.type) for o in operations if o._is_stateful] # pylint: disable=protected-access
def Call(func, args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = func()
else:
flat_rets = func(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(res.outputs, flat_rets)
if not bak:
res.func = Forward
res.call = lambda args=None: Call(Forward, args)
return res
shared_rendezvous = _GetSharedRendezvous()
ret_specs = TensorSpecs(res.outputs)
def Backward(*args):
xs, ys, dys = Pack([fwd_sig, ret_specs, ret_specs], args)
with RemoveAssertContext(remove=noinline), tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
backward_cf = _WrapFunction(
Backward, input_signature=Flatten([fwd_sig, ret_specs, ret_specs]))
else:
def BackwardWithSharedRendezvous(*args):
with _SharedRendezvousScope(shared_rendezvous):
return Backward(*args)
backward_cf = BackwardWithSharedRendezvous
@tf.custom_gradient
def ForwardWithGrad(*args):
"""Forward function and its custom gradient."""
# Note that `args` includes implicit captures. This is required by
# tf.custom_gradient so that when the Grad() outputs include gradients to
# implicit captures, they match the inputs to ForwardWithGrad().
#
# However, Forward doesn't take implicit captures as input, so we exclude
# them here.
fwd_args = args[:(len(args) - len(Flatten(res.captured_inputs)))]
op = NestedMap(inputs=args, outputs=Forward(*fwd_args))
def Grad(*args, **kwargs):
"""Gradient function for the forward function.
Args:
*args: Gradients wrt op.outputs.
**kwargs: Additional arguments from tf.custom_gradient.
Returns:
Tuple of derivatives.
"""
if kwargs:
tf.logging.warning(
'Ignoring additional arguments used by tf.custom_gradient: %s',
str(kwargs))
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs, _ = Pack([fwd_sig, res.captured_inputs], op.inputs)
return backward_cf(*Flatten([xs, op.outputs, args]))
return op.outputs, Grad
res.func = None
forward = lambda *xs: ForwardWithGrad(*Flatten([xs, res.captured_inputs]))
res.call = lambda args=None: Call(forward, args)
return res
# Global variable to control whether to use tf.function.
# If not set, the result is determined by tf2 status. See _UseTfFunction for
# details.
# TODO(laigd): remove after b/169869929 is fixed.
_USE_TF_FUNCTION = ThreadLocalStack()
# Constants for propagating framework tensors through Function.
_FRAMEWORK_TENSOR_GLOBAL_STEP = '_global_step'
@contextlib.contextmanager
def TfFunctionScope(use_tf_function=True):
_USE_TF_FUNCTION.stack.append(use_tf_function)
try:
yield
finally:
_USE_TF_FUNCTION.stack.pop()
def _UseTfFunction():
"""Whether to use tf.function instead of tf.Defun."""
if _USE_TF_FUNCTION.stack:
return _USE_TF_FUNCTION.stack[-1]
return tf2_enabled()
class Function(object):
"""Function builds a TensorFlow graph function from a callable.
In the high level this is similar to tf.Defun and tf.function. In fact this
relies on those as underlying implementations, but with specific configuration
so it's easier to use and can work well in some extreme cases in Lingvo.
Example usage:
- No inputs:
>>> @Function()
... def foo():
... return tf.constant(1.0)
>>> y = foo()
- Scalar input:
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32))
... def foo(x):
... return x * 2
>>> y = foo(1.0)
- List input:
>>> @Function(fwd_sig=[tf.TensorSpec(None, tf.float32) for _ in range(2)])
... def foo(xs):
... return xs[0] + xs[1]
>>> y = foo([1.0, 2.0])
- Nested input:
>>> @Function(fwd_sig=NestedMap(x=tf.TensorSpec(None, tf.float32)))
... def foo(nmap):
... return nmap.x * 2
>>> y = foo(NestedMap(x=1.0))
- With custom gradient function (other input types mentioned above are also
supported):
>>> def bar(x, y, dy):
... del y, dy
... return 4.0 * x * dy
>>>
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32), bak=bar)
... def foo(x):
... return 2.0 * x * x
- Used in control flow ops:
>>> then_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: x / 2)
>>> else_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: 3 * x + 1)
>>> y = tf.If(cond, inputs, then_branch.func, else_branch.func)
"""
# TODO(laigd): the use_tf_function option is added for backward compatibility
# reasons. Remove it after the migration.
def __init__(self,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Below we assume `fwd` is the input to `__call__` that is used to build the
TensorFlow graph function encapsulated by this object.
Args:
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
self._bak = bak
self._bak_as_function = bak_as_function
self._device = device
self._use_tf_function = use_tf_function
def __call__(self, fwd):
"""Creates a graph function.
Args:
fwd: a callable xs: Nested Structure -> ys: Nested Structure.
Returns:
A DefinedFunction object encapsulating `fwd` as a graph function.
"""
assert callable(fwd)
return DefinedFunction(fwd, self._fwd_sig, self._bak, self._bak_as_function,
self._device, self._use_tf_function)
class DefinedFunction(object):
"""Encapsulates a TensorFlow graph function and its properties."""
def __init__(self,
fwd,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure. Used to
build the TensorFlow graph function that this object encapsulates.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
wrapped_fwd_sig = fwd_sig
fwd_fn = fwd
bak_fn = bak
graph_random_seed = None
if tf.get_default_graph().seed is not None:
graph_random_seed = tf.get_default_graph().seed
# Wrap the forward function to propagate framework tensors like step_seed
# and global_step.
wrapped_fwd_sig = NestedMap()
self._added_global_step = False
if GetGlobalStep() is not None:
wrapped_fwd_sig[_FRAMEWORK_TENSOR_GLOBAL_STEP] = (
tf.TensorSpec([], tf.int64))
self._added_global_step = True
if fwd_sig is not None:
wrapped_fwd_sig.inputs = fwd_sig
elif not wrapped_fwd_sig:
wrapped_fwd_sig = None
def ForwardWrapped(wrapped_inputs=None):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
global_step = None
if wrapped_inputs:
assert isinstance(wrapped_inputs, NestedMap)
global_step = wrapped_inputs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)
with GlobalStepContext(global_step):
if wrapped_inputs and 'inputs' in wrapped_inputs:
result = fwd(wrapped_inputs.inputs)
else:
result = fwd()
return result
fwd_fn = ForwardWrapped
if bak:
# Wrap the backward function to return zero gradients for framework
# tensors like step_seed and global_step.
def BackwardWrapped(wrapped_xs, ys, dys):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
with GlobalStepContext(
wrapped_xs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)):
result = bak(wrapped_xs.inputs, ys, dys)
dxs = Transform(tf.zeros_like, wrapped_xs)
if isinstance(result, tuple) and len(result) == 2:
dxs.inputs, dcapture = result
return dxs, dcapture
else:
dxs.inputs = result
return dxs
bak_fn = BackwardWrapped
if use_tf_function is None:
use_tf_function = _UseTfFunction()
fn = _DefineFunction if use_tf_function else _DefineDefun
self._data = fn(
fwd=fwd_fn,
fwd_sig=wrapped_fwd_sig,
bak=bak_fn,
bak_as_function=bak_as_function,
device=device)
def __call__(self, args=None):
"""Invokes the graph function.
Args:
args: the inputs to the graph function, must be compatible with `fwd_sig`.
Returns:
The output tensors with the same structure as the output of `fwd`,
returned by a call to the graph function.
"""
assert IsCompatible(args,
self._fwd_sig), '{} vs {}'.format(args, self._fwd_sig)
return self._data.call(self.AddFrameworkInputs(args))
@property
def func(self):
"""The underlying TensorFlow graph function that this object encapsulates.
The returned graph function is created by tracing `fwd` during construction.
If not None, it will be a _DefinedFunction or ConcreteFunction that takes
flat inputs and returns flat outputs, and can be used by routines that
require a TensorFlow function object (e.g. tf.If, tf.While, etc).
If no backprop function is provided during construction, the result is
always not None.
"""
return self._data.func
def AddFrameworkInputs(self, inputs):
"""Add framework tensors like step_seed and global_step to inputs.
This is only necessary when using `func`, as wrapping is handled
automatically in __call__.
Args:
inputs: inputs to the function.
Returns:
Inputs wrapped with framework tensors suitable for use with `func`.
"""
result = NestedMap()
if self._added_global_step:
global_step = GetGlobalStep()
assert global_step is not None
result[_FRAMEWORK_TENSOR_GLOBAL_STEP] = tf.cast(global_step, tf.int64)
if inputs is not None:
result.inputs = inputs
return result if result else None
@property
def output_dtypes(self):
"""Output dtypes of the graph function.
The result will have the same structure as the outputs of `fwd` but contain
the corresponding output dtypes.
"""
return Transform(lambda x: x.dtype, self._data.outputs)
@property
def stateful_ops(self):
"""Stateful ops used by `fwd`, as a list of (op_name, op_type) tuples."""
return self._data.stateful_ops
@property
def captured_inputs(self):
"""Implicit input tensors captured by `fwd`."""
return self._data.captured_inputs
def CallDefun(fwd, args=None, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
args: A Nested Structure of tf.Tensor or None.
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for fwd. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for bak.
device: the device on which to run fwd and bak.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
if args is not None:
args = Transform(tf.convert_to_tensor, args)
sigs = Function(
fwd_sig=TensorSpecs(args),
bak=bak,
bak_as_function=bak_as_function,
device=device)(
fwd=fwd)
if args is None:
return sigs()
else:
return sigs(args)
def If(cond, inputs, then_branch, else_branch):
"""Helper to construct an if/else statement.
Args:
cond: A scalar `Tensor` that can be converted to boolean.
inputs: A flattenable representing the input tensors of the if/else
statement. Can be None to represent no inputs.
then_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'else_branch' returns.
else_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'then_branch' returns.
Returns:
Output returned by the call to either 'then_branch' or 'else_branch'.
"""
fwd_sig = TensorSpecs(inputs)
then_sigs = Function(fwd_sig=fwd_sig)(fwd=then_branch)
else_sigs = Function(fwd_sig=fwd_sig)(fwd=else_branch)
assert IsCompatible(then_sigs.output_dtypes, else_sigs.output_dtypes), (
'Outputs of then_branch and else_branch are not compatible: {} vs {}'
.format(then_sigs.output_dtypes, else_sigs.output_dtypes))
if then_sigs.captured_inputs != else_sigs.captured_inputs:
raise ValueError('Differing captured inputs in then and else. '
'Ensure the same tensors are captured in the same order.')
ret = tf.If(
cond=cond,
inputs=Flatten(then_sigs.AddFrameworkInputs(inputs)) +
then_sigs.captured_inputs,
then_branch=then_sigs.func,
else_branch=else_sigs.func)
return Pack(then_sigs.output_dtypes, ret)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
fwd_sig = TensorSpecs(loop_state)
cond_sigs = Function(fwd_sig=fwd_sig)(fwd=cond)
def BodyWrapped(loop_state):
result = body(loop_state)
# loop_state is augmented with global tensors inside of DefinedFunction.
# WhileLoop needs to return the same structure as the inputs, so we augment
# the return value here to match.
result = cond_sigs.AddFrameworkInputs(result)
return result
body_sigs = Function(fwd_sig=fwd_sig)(fwd=BodyWrapped)
wrapped_inputs = body_sigs.AddFrameworkInputs(loop_state)
new_state = tf.While(
Flatten(wrapped_inputs), cond=cond_sigs.func, body=body_sigs.func)
# The functional `While` used above does not have a registered gradient.
# This was not a problem in Graph mode, however in Eager mode,
# GradientTape will attempt to call the gradient of the While op in the
# forward pass. `stop_gradient` is used to pretend the op is a constant
# in the forward pass. This also avoids calling the gradient of other ops in
# `While` in the forward pass.
# Details in https://www.tensorflow.org/api_docs/python/tf/custom_gradient.
# Guarded by 'IsEagerMode' to limit impact.
if IsEagerMode():
new_state = [tf.stop_gradient(t) for t in new_state]
return Pack(wrapped_inputs, new_state).inputs
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: the `Operation` object for a VarHandleOp.
Raises:
TypeError: if var_op is not a VarHandleOp.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type != 'VarHandleOp':
raise TypeError('var_op should be a VarHandleOp, got %s' % str(var_op.type))
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_TPU_SUMMARY_CONTEXTS = ThreadLocalStack()
def _GetTpuSummaryTensor():
if _TPU_SUMMARY_CONTEXTS.stack:
return _TPU_SUMMARY_CONTEXTS.stack[-1]
return _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY, lambda: [])()
@contextlib.contextmanager
def TpuSummaryTensorContext():
"""Creates a context where AddTpuSummaryTensor() will add tensors."""
_TPU_SUMMARY_CONTEXTS.stack.append([])
try:
yield
finally:
_TPU_SUMMARY_CONTEXTS.stack.pop()
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries, or a local context if any.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _GetTpuSummaryTensor()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _GetTpuSummaryTensor()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ClearTpuSummaryTensors():
tpu_summary_tensors = _GetTpuSummaryTensor()
del tpu_summary_tensors[:]
def ComputationShape(split_size, topology=None):
"""Decides the computation shape based on the split_size.
Args:
split_size: number of accelerators to use per split.
topology: a serialized string of `tensorflow.tpu.TopologyProto`, or a
`tf.tpu.experimental.Topology` object, that describes the TPU cluster
topology. If not set, it'll use a default setting based on split_size.
Returns:
A 4-element list that describes the computation shape.
"""
if topology:
if isinstance(topology, tf.tpu.experimental.Topology):
topology_info = topology
else:
topology_info = tf_topology.Topology(serialized=topology)
computation_shape = None
if topology and functools.reduce(lambda a, b: a * b,
topology_info.mesh_shape) == split_size:
computation_shape = topology_info.mesh_shape
elif split_size == 1:
computation_shape = [1, 1, 1, 1]
elif topology and topology_info.mesh_shape[
-1] == 1 and split_size in topology_info.mesh_shape:
# For Megacore, if we find exact match on mesh shape, map split_size to it
computation_shape = [1, 1, 1, 1]
computation_shape[topology_info.mesh_shape.tolist().index(
split_size)] = split_size
else:
if topology:
cores_per_chip = topology_info.mesh_shape[-1]
else:
cores_per_chip = 2
assert split_size % cores_per_chip == 0
split_chips = split_size // cores_per_chip
if split_chips == 1:
computation_shape = [1, 1, 1, cores_per_chip]
elif split_chips == 2:
computation_shape = [1, 2, 1, cores_per_chip]
elif split_chips == 4:
computation_shape = [2, 2, 1, cores_per_chip]
elif split_chips == 8:
computation_shape = [4, 2, 1, cores_per_chip]
elif split_chips == 12:
computation_shape = [1, 1, 12, cores_per_chip]
elif split_chips == 16:
computation_shape = [4, 4, 1, cores_per_chip]
elif split_chips == 24:
computation_shape = [1, 2, 12, cores_per_chip]
elif split_chips == 32:
if topology and topology_info.mesh_shape[1] == 32:
# Fwd within-replica all-reduces is performed along column;
# Bwd gradient cross-replica all-reduces is performed along row.
# This currently has better performance than the strided patten.
computation_shape = [1, 32, 1, cores_per_chip]
else:
computation_shape = [4, 8, 1, cores_per_chip]
elif split_chips == 64:
computation_shape = [8, 8, 1, cores_per_chip]
elif split_chips == 128:
computation_shape = [8, 16, 1, cores_per_chip]
elif split_chips == 256:
computation_shape = [16, 16, 1, cores_per_chip]
elif split_chips == 512:
computation_shape = [16, 32, 1, cores_per_chip]
elif split_chips == 1024:
computation_shape = [32, 32, 1, cores_per_chip]
elif split_chips == 2048:
computation_shape = [64, 32, 1, cores_per_chip]
elif split_chips == 4096:
computation_shape = [128, 32, 1, cores_per_chip]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
def ShardedFilePatternToGlob(file_pattern):
"""Converts a file pattern path@shards to path-?????-of-shards."""
if ',' in file_pattern:
raise ValueError(
'ShardedFilePatternToGlob does not support multiple file patterns.')
if '@' not in file_pattern:
return file_pattern
path, shards = file_pattern.split('@')
if shards == '*':
return f'{path}-?????-of-*'
return f'{path}-?????-of-{int(shards):05}'
def ComputeNceAndAuc(probs, targets, mask):
"""Compute normalized cross entropy and AUC of the PR curve for a batch.
Args:
probs: a tensor of shape [batch, time].
targets: a tensor of shape [batch, time], where each element is either 0 or
1 indicating wrong or correct.
mask: a tensor of shape [batch, time], a mask for hyp sequence.
Returns:
nce: a tensor of shape [1], the normalized cross entropy value.
auc: a tensor of shape [1], the AUC value.
"""
def LogWithClip(tensor, clip_value_min=1e-8):
"""Clip all elements of a tensor to a minimum before taking log."""
return tf.math.log(tf.clip_by_value(tensor, clip_value_min, 1.0))
bce = -targets * LogWithClip(probs) - (1 - targets) * LogWithClip(1 - probs)
num_cor = tf.reduce_sum(targets * mask)
num_tokens = tf.reduce_sum(mask)
wcr = num_cor / num_tokens
entropy = -wcr * LogWithClip(wcr) - (1 - wcr) * LogWithClip(1 - wcr)
avg_conditional_entropy = tf.reduce_mean(tf.boolean_mask(bce, mask))
nce = (entropy - avg_conditional_entropy) / entropy
auc = tf.metrics.auc(targets, probs, mask, curve='PR')[1]
return nce, auc
def GatherTensorValuesBySeqIndices(tensor, class_indices, keepdims=False):
"""Gather values from a 3d tensor according to sequences of indices.
Args:
tensor: a 3d tensor of [dim0, dim1, num_class], e.g. output from softmax.
class_indices: a 2d tensor of [dim0, dim1], where the second dim is a
sequence of class indices between 0 to num_class - 1, inclusive.
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
A tensor ret of [dim0, dim1], where
ret[b, t] = tensor[b, t, indices[b, t]].
If keepdims is True, then ret has shape [dim0, dim1, 1].
"""
tensor = HasRank(tensor, 3)
class_indices = HasRank(class_indices, 2)
tensor = HasShape(tensor, GetShape(class_indices), 2)
dim0 = GetShape(class_indices)[0]
dim1 = GetShape(class_indices)[1]
dim0_indices = tf.tile(tf.expand_dims(tf.range(dim0), axis=-1), [1, dim1])
dim1_indices = tf.tile(tf.expand_dims(tf.range(dim1), axis=0), [dim0, 1])
gather_indices = tf.stack([
tf.cast(dim0_indices, dtype=class_indices.dtype),
tf.cast(dim1_indices, dtype=class_indices.dtype), class_indices
],
axis=-1)
ret = tf.gather_nd(tensor, gather_indices)
if keepdims:
ret = tf.expand_dims(ret, axis=-1)
return ret
def GetSoftmaxProbsBySeqIndices(logits, indices, keepdims=False):
"""Get softmax probabilities from index sequences given logits sequences.
Args:
logits: a tensor of [batch, time, num_class] or [time, batch, num_class].
indices: a tensor of [batch, time] or [time, batch].
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
a tensor of [batch, time] or [time, batch] for the corresponding softmax
probabilities. If keepdims is True, returned tensor has a third dimension
of size 1.
"""
probs = tf.nn.softmax(logits)
return GatherTensorValuesBySeqIndices(probs, indices, keepdims)
def DivideNoNan(x, y):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
safe_y = tf.where(tf.equal(y, 0.), tf.ones_like(y), y)
return tf.where(tf.equal(y, 0.0), tf.zeros_like(x), x / safe_y)
def SequencePaddings(seqlen, maxlen=None):
mask = tf.sequence_mask(seqlen, maxlen, dtype=tf.float32)
return 1 - mask
def AppendDims(x, ndims):
return tf.reshape(x, GetShape(x) + [1] * ndims)
def MaybeSoftCapLogits(x, cap=0.0):
"""Caps logits x to be within a certain range.
Args:
x: A float tensor, the logit values to be capped.
cap: a float, the limit to cap x within. If cap <= 0.0, x is not capped.
Returns:
logits after capping.
"""
if cap <= 0.0:
return x
else:
return cap * tf.math.tanh(x / cap)
def GetTpuEmbeddingGraphCollection():
"""Return the graph collection that stores the TpuEmbeddingCollection."""
tpu_emb_graph_collection = tf.get_collection_ref('__tpu_embedding_collection')
assert len(tpu_emb_graph_collection) <= 1
return tpu_emb_graph_collection
class AuxLossContext:
"""Context that holds a list of aux-losses.
By default it is non-reentrant, but can be specified as reentrant explicitly
when creating an inner context.
"""
_global_stack = []
@classmethod
def Current(cls):
"""Returns current context or None."""
if cls._global_stack:
return cls._global_stack[-1]
else:
return None
def __init__(self, reentrant=False):
self.aux_loss_tensors = []
self._reentrant = reentrant
def AddLoss(self, loss):
self.aux_loss_tensors.append(loss)
@property
def aux_losses(self):
return self.aux_loss_tensors
def __enter__(self):
if not self._reentrant:
assert not self._global_stack, 'no re-entry'
self._global_stack.append(self)
return self
def __exit__(self, *args):
self._global_stack.pop()
def GetTrainableVariables(scope, bprop_variable_filter,
bprop_variable_exclusion, vmap):
"""Returns trainable vars.
Args:
scope: A Python str.
bprop_variable_filter: see BaseTask.Params().bprop_variable_filter.
bprop_variable_exclusion: see BaseTask.Params().bprop_variable_exclusion.
vmap: A NestedMap of var_path(str) -> tf Variable.
Returns:
A filtered NestedMap of var_path(str) -> trainable tf Variable.
"""
pos = re.compile(bprop_variable_filter) if bprop_variable_filter else None
neg = re.compile(
bprop_variable_exclusion) if bprop_variable_exclusion else None
def VariableFilter(v):
"""Returns True if variable v should be optimized by this learner."""
if not v.trainable:
return False
if pos and not pos.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_filter: %s', scope,
v.name)
return False
if neg and neg.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', scope,
v.name)
return False
return True
return vmap.Filter(VariableFilter)
def BlockDiagonalMatmul(inputs, w, input_num_blocks):
"""Block diagonal matmul.
Args:
inputs: a tf.Tensor with the last dimension being the dimension for matmul.
w: an order-3 tf.Tensor of shape (input_num_blocks, input_dim //
input_num_blocks, output_dim // input_num_blocks)
input_num_blocks: an int specifying number of blocks for the input.
Returns:
A tf.Tensor of shape: inputs.shape[:-1] + [w.shape[-1]].
"""
input_splitted = tf.split(inputs, input_num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, w[i, :, :]))
return tf.concat(output_splitted, axis=-1)
def BlockDiagonalMatmulWithMix(inputs, w, mix_kernel, input_num_blocks):
"""Block diagonal matmul with mix.
With mix, the results from the blocked matmul are (linearly) mixed with
trainable weights in mix_kernel.
Args:
inputs: a tf.Tensor with the last dimension being the dimension for matmul.
w: an order-3 tf.Tensor of shape (input_num_blocks, input_dim //
input_num_blocks, output_dim // input_num_blocks).
mix_kernel: an order-2 tf.Tensor of shape (input_num_blocks,
input_num_blocks).
input_num_blocks: an int specifying number of blocks for the input.
Returns:
A tf.Tensor of shape: inputs.shape[:-1] + [w.shape[-1]].
"""
input_splitted = tf.split(inputs, input_num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, w[i, :, :]))
output_mixed = [0.0] * input_num_blocks
for i in range(input_num_blocks):
for j in range(input_num_blocks):
output_mixed[i] += mix_kernel[i, j] * output_splitted[j]
output_splitted = output_mixed
return tf.concat(output_splitted, axis=-1)
def BlockDiagonalProjectLastDim(inputs,
weight,
input_dim,
output_dim,
num_blocks=1):
"""Block diagonal linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
num_blocks: An integer or a symbolic dim, the number of blocks.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], num_blocks),
assert_equal(GetShape(weight)[1], input_dim // num_blocks),
assert_equal(GetShape(weight)[-1], output_dim // num_blocks)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
# outputs = tf.matmul(inputs, weight)
outputs = BlockDiagonalMatmul(inputs, weight, num_blocks)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), input_i,
weight[i, :, :]))
outputs = tf.concat(output_splitted, axis=-1)
else:
outputs = BlockDiagonalMatmul(
tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight, num_blocks)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
def BlockDiagonalProjectLastDimWithMix(inputs,
weight,
input_dim,
output_dim,
mix_kernel,
num_blocks=1):
"""Block diagonal linear projection on the last dim with mix.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
mix_kernel: an order-2 tf.Tensor of shape (num_blocks, num_blocks).
num_blocks: An integer or a symbolic dim, the number of blocks.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], num_blocks),
assert_equal(GetShape(weight)[1], input_dim // num_blocks),
assert_equal(GetShape(weight)[-1], output_dim // num_blocks)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = BlockDiagonalMatmulWithMix(inputs, weight, mix_kernel,
num_blocks)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), input_i,
weight[i, :, :]))
output_mixed = [0.0] * num_blocks
for i in range(num_blocks):
for j in range(num_blocks):
output_mixed[i] += mix_kernel[i, j] * output_splitted[j]
output_splitted = output_mixed
outputs = tf.concat(output_splitted, axis=-1)
else:
outputs = BlockDiagonalMatmulWithMix(
tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight, mix_kernel,
num_blocks)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
def GetProcessedCheckpoints(runner_dir):
"""Returns the list of checkpoints previously processed by this runner."""
# Set up (or reload) a file storing the list of previously processed
# checkpoints. This caching allows jobs to run on VMs which may be
# interrupted without duplicating work.
processed_ckpts_path = os.path.join(runner_dir, 'processed_ckpts.txt')
if not tf.io.gfile.exists(processed_ckpts_path):
with tf.io.gfile.GFile(processed_ckpts_path, 'w') as f:
f.write('')
with tf.io.gfile.GFile(processed_ckpts_path, 'r') as f:
processed_ckpts = list(line.strip() for line in f.readlines())
return processed_ckpts
def UpdateProcessedCheckpoints(runner_dir, ckpt_path):
"""Denotes 'ckpt_path' as having been processed by this runner."""
processed_ckpts_path = os.path.join(runner_dir, 'processed_ckpts.txt')
# Some file systems don't support append operations, so we rewrite whole
# file to append the latest checkpoint.
processed_ckpts = GetProcessedCheckpoints(runner_dir)
processed_ckpts.append(ckpt_path)
with tf.io.gfile.GFile(processed_ckpts_path, 'w') as f:
f.write('\n'.join(processed_ckpts) + '\n')
def MergeDictsWithValueCheck(dict1, dict2):
"""Merges two dictionaries with same-key values."""
common_keys = set(dict1.keys()) & set(dict2.keys())
for key in common_keys:
# The values must be the same object
if dict1[key] is not dict2[key]:
raise RuntimeError(f'The same key {key} corresponds to different values '
f'in the dictionaries: {dict1[key]} vs {dict2[key]}')
dict1.update(dict2)
return dict1
def MergeDuplicateIds(ids, paddings, extra_tensors=None):
"""Merge consecutive duplicated ids.
Given ids = [4, 4, 5, 6, 6, 5, 0, 0] and paddings =[0, 0, 0, 0, 0, 0, 1, 1],
this function returns ret_ids = [4, 5, 6, 5, 0, 0, 0, 0] and paddings = [
0, 0, 0, 0, 1, 1, 1, 1] by merging consecutive duplicated ids.
Args:
ids: A non-negative tensor of shape [batch, time].
paddings: A padding tensor of shape [batch, time] with "0" non padded, and
"1" as padded..
extra_tensors: A `.NestedMap` containing tensors that need to be
deduplicated according to ids, each tensor at least has two dimensions.
Returns:
ret_ids: same as ids.
ret_paddings: same as paddings.
ret_tensors: same as extra_tensors.
"""
ids = with_dependencies([assert_greater_equal(ids, 0)], ids)
prev_ids = tf.pad(ids, [[0, 0], [1, 0]], constant_values=-1)[:, :-1]
keep = tf.cast(tf.math.not_equal(ids, prev_ids), tf.int32) * tf.cast(
1 - paddings, tf.int32)
b, t = GetShape(ids)
# Generate descend_keep in descending order for each row and set elements in
# the matrix to 0 if they are duplicated ids.
descend_keep = keep * tf.range(t, 0, -1, dtype=tf.int32)
# Get the indices of non-duplicated ids along the time axis.
sorted_indices = tf.argsort(descend_keep, stable=True, direction='DESCENDING')
# Get the batch indices.
batch_indices = tf.tile(tf.expand_dims(tf.range(b), -1), [1, t])
# Stack them to get 2-d indices.
ids_indices = tf.stack([batch_indices, sorted_indices], axis=2)
seq_mask = tf.sequence_mask(tf.reduce_sum(keep, axis=-1), t, paddings.dtype)
ret_paddings = 1. - seq_mask
ret_ids = tf.gather_nd(ids, ids_indices) * tf.cast(seq_mask, ids.dtype)
ret_tensors = NestedMap()
if extra_tensors:
for key, tensor in extra_tensors.items():
tensor_mask = ExpandTo(seq_mask, GetRank(tensor))
ret_tensors[key] = tf.gather_nd(tensor, ids_indices) * tf.cast(
tensor_mask, tensor.dtype)
return ret_ids, ret_paddings, ret_tensors
def DecodeProtoField(serialized_protos, message_type, field_name, output_type):
"""Decodes a non-repeated field in a proto.
Args:
serialized_protos: A string Tensor of shape [batch].
message_type: Name of the proto message type. Since tf.io.decode_proto() is
called with the default descriptor_source='local://', the C++ (not Python!)
proto definition(s) must be linked to the binary. You can link in a proto
descriptor by creating a cc_library target with alwayslink=1.
field_name: Name of the field.
output_type: A DType for the output.
Returns:
A Tensor of shape [batch].
"""
_, [output] = tf.io.decode_proto(serialized_protos, message_type,
[field_name], [output_type])
return tf.squeeze(output, -1)
def DecodeRepeatedProtoField(serialized_protos, message_type, field_name,
output_type):
"""Decodes a repeated field in a proto.
Args:
serialized_protos: A string Tensor of shape [batch].
message_type: Name of the proto message type. Since tf.io.decode_proto() is
called with the default descriptor_source='local://', the C++ (not Python!)
proto definition(s) must be linked to the binary. You can link in a proto
descriptor by creating a cc_library target with alwayslink=1.
field_name: Name of the field.
output_type: A DType for the output.
Returns:
A Tensor of shape [batch, field_name_size].
"""
[output] = tf.io.decode_proto(serialized_protos, message_type, [field_name],
[output_type]).values
return output
|
tensorflow/lingvo
|
lingvo/core/py_utils.py
|
Python
|
apache-2.0
| 235,298
|
[
"Gaussian"
] |
76e4301473db9965712e30c37e8e4bd9775fd17c44f1861add66620459fbe01c
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
from antlr4 import *
from antlr4.InputStream import InputStream
from antlr4.error.ErrorStrategy import DefaultErrorStrategy
from UnitXLexer import UnitXLexer
from UnitXParser import UnitXParser
from eval_visitor import EvalVisitor
from eval_error_strategy import EvalErrorStrategy
from eval_error_listener import EvalErrorIOListener
from eval_error_listener import EvalErrorIntaractiveListener
from eval_error_listener import EvalErrorStringCodeListener
from util import Util
from constants import Constants
from cmd import Cmd
import readline
import rlcompleter
class Example(Cmd):
"""A class running a parser on each mode.
Attributes:
is_intaractive_run: A bool indicating whether an intaractive mode.
stock_line: A string stocking a code which is a block statement
on the intaractive mode.
errhandler: An instance of EvalErrorStrategy for reporting all errors.
visitor: An instance of EvalVisitor called by a parser.
parser: An instance of UnitXParser for parsing codes.
Cmd.prompt: A string displaying against every code line.
"""
#
# A string displaying against every code line.
#
Cmd.prompt = 'unitx> '
def __init__(self, is_intaractive_run):
"""Inits attributes of a Unit class."""
Cmd.__init__(self)
self.is_intaractive_run = is_intaractive_run
self.stock_line = ""
self.errhandler = EvalErrorStrategy(self.is_intaractive_run)
self.visitor = EvalVisitor(self.is_intaractive_run, self.errhandler)
self.parser = UnitXParser(None)
self.parser._errHandler = self.errhandler
self.visitor.set_parser(self.parser)
if is_intaractive_run:
a_listener = EvalErrorIntaractiveListener(self.visitor)
else:
a_listener = EvalErrorIOListener(self.visitor)
self.parser._listeners = [a_listener]
self.visitor.set_errlistener(a_listener)
def do_demo(self, arg_line):
"""Executes demo programs. This is called, when 'demo' is typed by a user.
Args:
arg_line: A string which is given by a user.
"""
if arg_line.isdigit(): print "Xdemo", int(arg_line)
else: pass #error
def do_help(self, arg_line):
"""Prints a help. This is called, when 'help' is typed by a user.
Args:
arg_line: A string which is given by a user.
"""
print "I don't want to help you."
def do_quit(self, arg_line):
"""Quits this program. This is called, when 'quit' is typed by a user.
Args:
arg_line: A string which is given by a user.
"""
sys.exit(Constants.EXIT_SUCCESS)
def do_EOF(self, arg_line):
"""Prints a new line for a new command line. This is called, when <ctrl+D> or <EOF> are called by a user.
Args:
arg_line: A string which is given by a user.
Returns:
A bool whether this function is called the
"""
print
return True
def emptyline(self):
"""Executes a code when '\n' is typed by a user."""
self.talk('' + '\n')
def default(self, a_line):
"""Executes a code when there is a string, except a command string
which is defined in this class.
Attributes:
a_line: a string which is typed by a user, except a command stirng
which is defined in this class.
"""
self.talk(a_line + '\n')
def talk_loop(self):
"""Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument."""
try: Cmd.cmdloop(self)
except KeyboardInterrupt as e:
print 'KeyboardInterrupt!'
self.talk_loop()
return
def eat_string(self, code_str):
"""
"""
#TODO(Tasuku): Changed a_lisener "FORCELY" because it's difficult to fix now
a_listener = EvalErrorStringCodeListener(self.visitor)
self.parser._listeners = [a_listener]
self.visitor.set_errlistener(a_listener)
code_str = code_str.decode('utf-8')
lines = code_str.split('\n')
self.visitor.get_errlistener().set_codelines(lines)
a_stream = InputStream(code_str)
self.parse(a_stream)
return
def eat_code(self, a_path):
"""Executes a code indicated as a_path on the IO mode.
In this version, we just support UTF-8. As the next vision, we need to support UTF-8.
Attributes:
a_path: a string indicating a path of the source code.
"""
self.visitor.get_errlistener().set_codepath(a_path)
a_stream = FileStream(a_path, encoding='utf-8')
self.parse(a_stream)
return
def talk(self, a_line):
"""Executes a code indicated as a_path on the IO mode.
In this version, we just support UTF-8. As the next vision, we need to support UTF-8.
Attributes:
a_line: a string which is typed by a user, except a command string
which is defined in this class.
"""
if self.errhandler.is_ignored_block:
# Errors of block statement never happen until coming empty char.
if not a_line.strip():
self.errhandler.is_ignored_block = False
codeline = self.stock_line + a_line
else:
codeline = a_line
codeline = codeline.decode('utf-8')
lines = codeline.split('\n')
self.visitor.get_errlistener().set_codelines(lines)
a_stream = InputStream(codeline)
self.parse(a_stream)
if self.errhandler.is_ignored_block:
Cmd.prompt = '...... '
self.stock_line = self.stock_line + a_line
else:
Cmd.prompt = 'unitx> '
self.stock_line = ""
return
def parse(self, a_stream):
"""Parses a stream which is FileStream(the IO mode) or InputStream(the intaractive mode).
Attributes:
a_stream: an instance of FileStream(the IO mode) or InputStream(the intaractive mode).
"""
a_lexer = UnitXLexer(a_stream)
token_stream = CommonTokenStream(a_lexer)
self.parser.setTokenStream(token_stream)
a_tree = self.parser.program() #Bug
self.visitor.visit(a_tree)
return
def main(argv):
"""Run an example for a Unit class."""
if len(argv) > 1:
cmd = Example(is_intaractive_run=False)
cmd.eat_code(argv[1])
else:
cmd = Example(is_intaractive_run=True)
import intro_line
print intro_line.get_line()
cmd.talk_loop()
return Constants.EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0ED/UnitX
|
unitx/example.py
|
Python
|
mit
| 6,946
|
[
"VisIt"
] |
38f47bf213d215d864cf031a9e65c2ca2b51f529a32df955b5e3a87478d71224
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.core.kernel Contains the ConvolutionKernel class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
import numpy as np
from scipy import ndimage
from scipy.ndimage.interpolation import shift
# Import astronomical modules
from astropy.modeling import models, fitting
from photutils.morphology import centroid_com, centroid_1dg, centroid_2dg
# Import the relevant PTS classes and modules
from .frame import Frame
from ...core.tools.logging import log
from ..tools import statistics
# -----------------------------------------------------------------
class ConvolutionKernel(Frame):
"""
This class ...
"""
# -----------------------------------------------------------------
def __init__(self, data, *args, **kwargs):
"""
This function ...
"""
# Call the constructor of the base class
super(ConvolutionKernel, self).__init__(data, *args, **kwargs)
# Check the FWHM
if self.fwhm is None: raise ValueError("FWHM must be specified if not present in header")
# Set the WCS to None, but keep the pixelscale
if self._wcs is not None:
if self._pixelscale is None: self._pixelscale = self.wcs.pixelscale
elif not np.isclose(self._pixelscale, self.wcs.pixelscale): raise ValueError("Pixelscale in the header does not correspond to the specified pixelscale")
self._wcs = None
elif self._pixelscale is None: raise ValueError("Pixelscale must be specified if not present in header")
# Prepared
self._prepared = False
if "prepared" in self.meta: self._prepared = self.meta["prepared"]
# Make sure that the data is in 64 bit floating-point precision
self._data = self._data.astype("float64")
# -----------------------------------------------------------------
@property
def prepared(self):
"""
This function ...
:return:
"""
return self._prepared
# -----------------------------------------------------------------
@property
def normalized(self):
"""
This function ...
:return:
"""
#return np.abs(self.sum() - 1.) < 1e-7 # criterion as in astropy.convolution module = 1e-8, BUT I HAD A PROBLEM OF THE SAME FITS FILE HAVING A SLIGHTLY DIFFERNENT SOME ON DIFFERENT SYSTEMS !!! (laptop and nancy)
# NOW I KNOW WHERE THIS PROBLEM WAS COMING FROM (SEE ASTROPY ISSUE #5176)
# the magical number of 32-bit precision floats is 1.1920928955078125e-07
# BUT NOW I MAKE SURE THAT A KERNEL IS ALWAYS IN 64-BIT REPRESENTATION AND SO 1e-8 CAN BE USED
return np.abs(self.sum() - 1.) < 1e-8
# -----------------------------------------------------------------
def prepare_for(self, image, sigma_level=10.0):
"""
This function ...
:param image:
:param sigma_level:
:return:
"""
# Prepare
self.prepare(image.pixelscale, sigma_level)
# -----------------------------------------------------------------
def prepare(self, pixelscale, sigma_level=10.0):
"""
This function ...
:param pixelscale:
:param sigma_level:
:return:
"""
# Inform the user
log.info("Preparing the kernel ...")
# Truncate
self.truncate(sigma_level)
# Adjust pixelscale
self.adjust_pixelscale(pixelscale)
# Recenter
self.recenter()
# Normalize
self.normalize()
# Set prepared flag to True
self._prepared = True
# -----------------------------------------------------------------
def truncate(self, sigma_level=10.0):
"""
This function ...
:return:
"""
# Debugging
log.debug("Truncating the kernel to a sigma level of " + str(sigma_level) + " ...")
# Determine the radius in number of pixels
sigma_pix = statistics.fwhm_to_sigma * self.fwhm_pix
radius = sigma_level * sigma_pix
center_x = 0.5 * (self.xsize - 1.)
center_y = 0.5 * (self.ysize - 1.)
min_x = int(round(center_x - radius))
max_x = int(round(center_x + radius))
min_y = int(round(center_y - radius))
max_y = int(round(center_y + radius))
# Crop
self.crop(min_x, max_x, min_y, max_y)
# -----------------------------------------------------------------
def adjust_pixelscale(self, pixelscale):
"""
This function ...
:return:
"""
# Debugging
log.debug("Adjusting the pixelscale of the kernel to match that of the image ...")
average_pixelscale = 0.5 * (pixelscale.x + pixelscale.y)
# Calculate the zooming factor
factor = (average_pixelscale / self.average_pixelscale).to("").value
# Rebin to the pixelscale
new_data = ndimage.interpolation.zoom(self._data, zoom=1.0 / factor)
# Set the new data and pixelscale
self._data = new_data
self._pixelscale = pixelscale
# -----------------------------------------------------------------
def recenter(self, centroid_method="2dg"):
"""
This function ...
:return:
"""
center_x = 0.5 * (self.xsize - 1)
center_y = 0.5 * (self.ysize - 1)
if centroid_method == "com": x_centroid, y_centroid = self.centroid_com()
elif centroid_method == "fit": x_centroid, y_centroid = self.centroid_fit()
elif centroid_method == "2dg": x_centroid, y_centroid = self.centroid_2dg()
elif centroid_method == "aniano": x_centroid, y_centroid = self.get_maximum_aniano()
else: raise ValueError("Invalid centroid method")
# Debugging
log.debug("The centroid coordinate of the kernel was found to be " + str(x_centroid) + ", " + str(y_centroid))
log.debug("The center of the kernel image is " + str(center_x) + ", " + str(center_y))
# Calculate shift
shift_x = center_x - x_centroid
shift_y = center_y - y_centroid
# If the shift is less than 0.2 pixel, don't shift
if shift_x < 0.2 and shift_y <= 0.2:
log.debug("Kernel is already perfectly aligned with the center: skipping recentering ...")
return
# Debugging
log.debug("Shifting the kernel center by (" + str(shift_x) + ", " + str(shift_y) + " pixels ...")
# Shift
self._data = shift(self._data, [shift_x, shift_y])
# CHECK AGAIN
if centroid_method == "com": x_centroid, y_centroid = self.centroid_com()
elif centroid_method == "fit": x_centroid, y_centroid = self.centroid_fit()
elif centroid_method == "2dg": x_centroid, y_centroid = self.centroid_2dg()
elif centroid_method == "aniano": x_centroid, y_centroid = self.get_maximum_aniano()
else: raise ValueError("Invalid centroid method")
new_shift_x = center_x - x_centroid
new_shift_y = center_y - y_centroid
new_shift_x_relative = abs(new_shift_x) / abs(shift_x)
new_shift_y_relative = abs(new_shift_y) / abs(shift_y)
#print("new shift x relative " + str(new_shift_x_relative))
#print("new shift y relative " + str(new_shift_y_relative))
if new_shift_x_relative >= 0.1: raise RuntimeError("The recentering of the kernel failed: new x shift = " + str(new_shift_x) + ", previous x shift = " + str(shift_x))
if new_shift_y_relative >= 0.1: raise RuntimeError("The recentering of the kernel failed: new y shift = " + str(new_shift_y) + ", previous y shift = " + str(shift_y))
# -----------------------------------------------------------------
def centroid_com(self):
"""
This function ...
:return:
"""
return centroid_com(self._data)
# -----------------------------------------------------------------
def centroid_2dg(self):
"""
This function ...
:return:
"""
return centroid_2dg(self._data)
# -----------------------------------------------------------------
def center_fit(self):
"""
This function ...
:return:
"""
from .box import Box
from ..basics.vector import Position
# Box
box = Box(self._data, 0, self.xsize, 0, self.ysize)
# Fit model
model = box.fit_model(Position(0.5*(self.xsize-1), 0.5*(self.ysize-1)), "Gaussian")
# Get x and y mean
x_mean = model.x_mean.value
y_mean = model.y_mean.value
return x_mean, y_mean
# -----------------------------------------------------------------
def center_aniano(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Centering the kernel ...")
# FROM CONVOLVE_IMAGE.PRO (G. Aniano)
center_x = int(0.5 * (self.xsize - 1))
center_y = int(0.5 * (self.ysize - 1))
# get_maximun,image,x_max,y_max
x_max, y_max = self.get_maximum()
# ; determine the needed shifts
shift_x = center_x - x_max
shift_y = center_y - y_max
# ; make the shift if nonzero
if (shift_x != 0) or (shift_y != 0):
# Debugging
log.debug("Shifting the kernel center by (" + str(shift_x) + ", " + str(shift_y) + " pixels ...")
self._data = shift(self._data, [shift_x,shift_y])
# Y
self._data[:abs(shift_y),:] = 0.0
self._data[self.ysize-1-abs(shift_y):self.ysize,:] = 0.0
# X
self._data[:,:abs(shift_x)] = 0.0
self._data[:,self.xsize-1-abs(shift_x):] = 0.0
# CHECK
# Calculate shift again
x_max, y_max = self.get_maximum()
new_shift_x = center_x - x_max
new_shift_y = center_y - y_max
# Raise exception if there is still a shift
if (new_shift_x != 0) or (new_shift_y != 0): raise RuntimeError("Something went wrong during the kernel centering: "
"new shift x = " + str(new_shift_x) + ", new shift y = "
+ str(new_shift_y) + " (previous shift x = " + str(shift_x)
+ ", previous shift y = " + str(shift_y))
# -----------------------------------------------------------------
def normalize(self):
"""
This function ...
:return:
"""
self.__idiv__(self.sum())
# -----------------------------------------------------------------
def get_maximum_aniano(self):
"""
This function ...
:return:
"""
rad_to_mean = 5
data_copy = self._data.copy()
#
mean_im = data_copy * 0.0
i_range = range(-int(rad_to_mean), int(rad_to_mean)+1)
#print("irange", i_range)
for i in i_range:
j_range = range(-int(math.sqrt(rad_to_mean ** 2 - i ** 2)), int(math.sqrt(rad_to_mean ** 2 - i ** 2))+1)
#print("jrange", j_range)
for j in j_range:
mean_im += shift(data_copy, [i, j])
mean_im_sum = np.sum(mean_im)
#mx = max(mean_im, location)
#index = ARRAY_INDICES(mean_im, location)
#x_max = index[0]
#y_max = index[1]
# Get x and y max
max_index = np.argmax(mean_im)
c = (max_index // len(mean_im[0]), max_index % len(mean_im[0]))
x_max = c[1]
y_max = c[0]
max_value = mean_im[y_max, x_max]
where = np.abs(mean_im - max_value) < (5e-4 * mean_im_sum)
count = np.sum(where)
if count > 1:
log.debug("WARNING: The PSF has " + str(count) + "pixels with values similar to its maximum... we will take their centroid...")
xsize = data_copy.shape[1]
ysize = data_copy.shape[0]
xv, yv = np.meshgrid(np.arange(xsize), np.arange(ysize))
# Average x max
x_max = np.sum(xv[where]) / float(count)
# Average y max
y_max = np.sum(xv[where]) / float(count)
# Return xmax and ymax position
return x_max, y_max
# -----------------------------------------------------------------
def save(self, path):
"""
This function ...
:param path:
:return:
"""
# Call the save function of the base class
super(ConvolutionKernel, self).save(path, extra_header_info={"PREPARED": self.prepared})
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/core/kernel.py
|
Python
|
mit
| 13,284
|
[
"Gaussian"
] |
b65766b5496e3019247fc382896bd6293e1ccb7962f8f1a51b504ffc636b1167
|
#!/usr/bin/env python3
# This script assumes the ASTE binaries and python scripts are in $PATH
# Furthermore, execute in ./contrib/timestep-demo or copy precice.xml from there to $PWD
import os, subprocess
def run(cmd):
print("+ " + cmd)
subprocess.run(cmd, shell = True, check = True)
# Get bunny and red blood cell
if not os.path.isfile("rbc.dt0.vtk"):
run("wget --quiet https://people.sc.fsu.edu/~jburkardt/data/vtk/rbc_001.vtk -O rbc.dt0.vtk")
if not os.path.isfile("bunny.vtk"):
run("wget --quiet https://www.ece.lsu.edu/xinli/Meshing/Data/bunny.vtk -O bunny.vtk")
# Generate 20 timesteps of bunny.vtk
files = ["colored.dt" + str(i) + ".vtk" for i in range(20)]
for t, f in enumerate(files):
if not os.path.isfile(f):
run("eval_mesh.py bunny.vtk -o " + f + " " + str(t))
if not all([os.path.isdir("colored.dt" + str(i)) for i in range(20)]):
run("partition_mesh.py -n 2 " + " ".join(files))
# Partition output mesh as well
run("partition_mesh.py -n 2 rbc.dt0.vtk ")
# Run preCICE. Note that the meshes are named colored.dt1, colored.dt2, ...
os.makedirs("vtkA", exist_ok = True)
os.makedirs("vtkB", exist_ok = True)
run("mpirun -n 2 preciceMap -v -c precice.xml -p A --mesh colored &")
run("mpirun -n 2 preciceMap -v -c precice.xml -p B --mesh rbc --output mapped")
|
precice/aste
|
contrib/timestep-demo/demo.py
|
Python
|
gpl-3.0
| 1,334
|
[
"VTK"
] |
661908582d8076892e99b698367ceccea585cf93056492ad87d81b0a20b0d270
|
import requests
import numpy as np
import h5py
baseUrl = 'http://www.illustris-project.org/api/'
headers = {"api-key":"cc4ff6392e79c9e08c158e5ae5493718"}
# Routine to pull data from online
def get(path, params=None, fName='temp'): # gets data from url, saves to file
# make HTTP GET request to path
r = requests.get(path, params=params, headers=headers)
# raise exception if response code is not HTTP SUCCESS (200)
r.raise_for_status()
if r.headers['content-type'] == 'application/json':
return r.json() # parse json responses automatically
dataFile=fName+'.hdf5'
# Saves to file, currently disabled
if 'content-disposition' in r.headers:
filename = r.headers['content-disposition'].split("filename=")[1]
with open(dataFile, 'wb') as f:
f.write(r.content)
return dataFile # return the filename string
return r
# For a chosen galaxy pulls out all the particle data for a set of fields
particleTypeNames=['gas','dm','error','tracers','stars','bhs']
def getGalaxy(whichGalaxy, fields, # index of a galaxy and the 2d list of fields (particle type and name of fields)
simulation='Illustris-1', snapshot=135, # which simulation and snapshot
fileName='temp',rewriteFile=1): # name of the file where .hdf5 data is stored and whether to rewrite or just read
fields=np.array(fields) # converts to array
order=np.argsort(fields[:,0])
disorder=np.argsort(order) # needed to unsort the fields later...
fields=fields[order,:] # orders by particle type
nFields=order.size
if rewriteFile==1: # redownloads file from the internet
url='http://www.illustris-project.org/api/'+simulation+'/snapshots/'+str(snapshot)+'/subhalos/'+str(whichGalaxy)+'/cutout.hdf5?'
thisParticle=0
thisEntry=0
firstParticle=1
while thisParticle<6: # cycles through all particle type
if (int(fields[thisEntry,0])!=thisParticle): # checks there is at least one field for this particle
thisParticle+=1
continue
if firstParticle==1: # first particle requires no ampersand
firstParticle=0
else: # all later particles do
url+='&'
url+=particleTypeNames[thisParticle]+'=' # adds the name of the particle type
firstEntry=1
while int(fields[thisEntry,0])==thisParticle:
if firstEntry==1: #first entry requires no comma
firstEntry=0
else: # all later entries do
url+=','
url+=fields[thisEntry,1] # adds every associated field
thisEntry+=1
if thisEntry==nFields:
break
if thisEntry==nFields:
break
thisParticle+=1
dataFile=get(url,fName=fileName)
# end of "if rewriteFile==1:"
if rewriteFile == 0: # if we're not redownloading need to set path to the file
dataFile=fileName+'.hdf5'
# actually get the data (saved to .hdf5 file)
data=[] # initially empty list that we will fill up with the data
with h5py.File(dataFile,'r') as f:
for i in range(disorder.size):
thisField=fields[disorder[i],:] # ensures data returned in original order of fields
data.append(np.array(f['PartType'+thisField[0]][thisField[1]]))
# returns all particle data of each field as a numpy array
return data # returns all the particle fields as a list of numpy arrays in the same order as initial fields
def getSubhaloField(field,simulation='Illustris-1',snapshot=135,fileName='temp',rewriteFile=1):
if rewriteFile==1: # redownloads file from the internet
url='http://www.illustris-project.org/api/'+simulation+'/files/groupcat-'+str(snapshot)+'/?Subhalo='+field
dataFile=get(url,fName=fileName)
if rewriteFile == 0: # if we're not redownloading need to set path to the file
dataFile=fileName+'.hdf5'
with h5py.File(dataFile,'r') as f:
data=np.array(f['Subhalo'][field])
return data
def getHaloField(field,simulation='Illustris-1',snapshot=135,fileName='temp',rewriteFile=1):
if rewriteFile==1: # redownloads file from the internet
url='http://www.illustris-project.org/api/'+simulation+'/files/groupcat-'+str(snapshot)+'/?Group='+field
dataFile=get(url,fName=fileName)
if rewriteFile == 0: # if we're not redownloading need to set path to the file
dataFile=fileName+'.hdf5'
with h5py.File(dataFile,'r') as f:
data=np.array(f['Group'][field])
return data
def getSim(simName):
r = get(baseUrl)
names = [sim['name'] for sim in r['simulations']]
i = names.index(simName)
sim = get( r['simulations'][i]['url'] )
return sim
def getSnap(sim,whichSnap):
snaps = get( sim['snapshots'] )
snap = get( snaps[whichSnap]['url'] )
return snap
def getSub(subs,whichSub):
sub_url=subs['results'][whichSub]['url']
sub=get(sub_url)
return sub
|
zpenoyre/illustris
|
illustrisAPI/data.py
|
Python
|
mit
| 5,130
|
[
"Galaxy"
] |
124b3e425ccd6de6d58bca8806cf41e66296ac3ecb197b6095543a24caeafb35
|
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy
from scipy.special import erfc
import math
import glob
import re
def setAxLinesBW(ax):
"""
Take each Line2D in the axes, ax, and convert the line style to be
suitable for black and white viewing.
"""
MARKERSIZE = 3
COLORMAP = {
'b': {'marker': None, 'dash': (None,None)},
'g': {'marker': None, 'dash': [5,5]},
'r': {'marker': None, 'dash': [5,3,1,3]},
'c': {'marker': None, 'dash': [1,3]},
'm': {'marker': None, 'dash': [5,2,5,2,5,10]},
'y': {'marker': None, 'dash': [5,3,1,2,1,10]},
'k': {'marker': 'o', 'dash': (None,None)} #[1,2,1,10]}
}
for line in ax.get_lines():
origColor = line.get_color()
line.set_color('black')
line.set_dashes(COLORMAP[origColor]['dash'])
line.set_marker(COLORMAP[origColor]['marker'])
line.set_markersize(MARKERSIZE)
def setFigLinesBW(fig):
"""
Take each axes in the figure, and for each line in the axes, make the
line viewable in black and white.
"""
for ax in fig.get_axes():
setAxLinesBW(ax)
def plot_histogram(filename):
f = open(filename,'r')
elements = [int(x) for x in f.readline().split()[1:]]
x = numpy.arange(0,1,0.01)
#plt.bar(x,elements,width=0.01)
plt.plot(x,elements)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
#plt.show()
def plot_compart_histogram(filename):
f = open(filename,'r')
elements = [int(x) for x in f.readline().split()[1:]]
h = 1.0/len(elements)
x = numpy.arange(0.0+0.5*h,1.0,h)
#plt.bar(x,numpy.array(elements)*len(elements)/100.0,width=h)
plt.plot(x,numpy.array(elements)*len(elements)/100.0)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
def plot_mi_compart_histogram(filename):
f = open(filename,'r')
elements = [int(x) for x in f.readline().split()[1:]]
h = 1.0/(len(elements))
x = numpy.arange(0.0+0.5*h,1.0,h)
print x
#plt.bar(x-0.5*h,numpy.array(elements)*(len(elements))/100.0,width=h)
plt.plot(x,numpy.array(elements)*(len(elements))/100.0)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
def plot_mi_multifile(base_filename):
list_of_mol_files = glob.glob(base_filename+"_mols*.dat");
list_of_compart_files = glob.glob(base_filename+"_compart*.dat");
list_of_mol_files.sort()
list_of_compart_files.sort()
pattern = re.compile("time_[0-9]*.[0-9]*")
fig = plt.figure()
plt.xlabel('bin coordinate');
plt.ylabel('# of particles')
alpha = 1.0
D = 0.1
beta = math.sqrt(alpha/D)
num_particles = 10000.0
h_in = 0.1
s0 = num_particles
maxc = s0/(D*beta)/100
plt.ylim([0,maxc])
plt.xlim([0.0,1.0])
x = numpy.arange(0,1,0.01)
yexact = numpy.zeros(len(x))
pexact, = plt.plot(x,yexact,'r')
xmol = numpy.arange(0,1,0.01)
ymol = numpy.zeros(len(xmol))
xcomp = numpy.arange(0,1,0.1)
ycomp = numpy.zeros(len(xcomp))
pmol = plt.bar(xmol,ymol,width=0.01)
pcomp = plt.bar(xcomp,ycomp,width=0.1,color='green')
plt.legend(['exact','Molecules','Compartments'],loc='upper left')
plt.plot(x,[0.3*maxc]*len(x),'--')
plt.plot(x,[0.25*maxc]*len(x),'--')
for i in range(0,len(list_of_mol_files)):
print str(i)+' of '+str(len(list_of_mol_files))
mol_fn = list_of_mol_files[i]
compart_fn = list_of_compart_files[i]
f = open(mol_fn,'r')
for line,rect in zip(f,pmol):
split = line.split();
rect.set_height(float(split[3]))
f = open(compart_fn,'r')
for line,rect in zip(f,pcomp):
split = line.split();
rect.set_height(float(split[3])*0.1)
m = pattern.search(mol_fn)
plt.title('time = '+m.string[m.start()+5:m.end()])
time = float(m.string[m.start()+5:m.end()])
if time == 0:
yexact = numpy.zeros(len(x))
else:
yexact = numpy.exp(-beta*(1.0-x)) - 0.5*numpy.exp(-beta*(1.0-x))*erfc((2.0*beta*D*time-(1.0-x))/numpy.sqrt(4.0*D*time)) - 0.5*numpy.exp(beta*(1.0-x))*erfc((2.0*beta*D*time+(1.0-x))/numpy.sqrt(4.0*D*time))
yexact = (s0/(D*beta)) * yexact / 100
pexact.set_ydata(yexact)
plt.savefig(base_filename + '%04d'%i + ".png")
plt.close()
def plot_mi_combined_multifile(base_filename):
list_of_files = glob.glob(base_filename+"_time_*.dat");
list_of_files.sort()
pattern = re.compile("time_[0-9]*.[0-9]*")
fig = plt.figure()
plt.xlabel('bin coordinate');
plt.ylabel('# of particles')
alpha = 1.0
D = 0.1
beta = math.sqrt(alpha/D)
num_particles = 10000.0
res = 0.1
s0 = num_particles
maxc = s0/(D*beta)*res
plt.ylim([0,maxc])
plt.xlim([0.0,1.0])
x = numpy.arange(0,1,0.01)
yexact = numpy.zeros(len(x))
pexact, = plt.plot(x,yexact,'r')
xall = numpy.arange(0,1,res)
yall = numpy.zeros(len(xall))
pall = plt.bar(xall,yall,width=res,color='green')
plt.legend(['exact','RD_3D'],loc='upper left')
plt.plot(x,[0.3*maxc]*len(x),'--')
plt.plot(x,[0.25*maxc]*len(x),'--')
for i in range(0,len(list_of_files)):
print str(i)+' of '+str(len(list_of_files))
fn = list_of_files[i]
f = open(fn,'r')
for line,rect in zip(f,pall):
split = line.split();
rect.set_height(float(split[3]))
m = pattern.search(fn)
plt.title('time = '+m.string[m.start()+5:m.end()])
time = float(m.string[m.start()+5:m.end()])
if time == 0:
yexact = numpy.zeros(len(x))
else:
yexact = numpy.exp(-beta*(1.0-x)) - 0.5*numpy.exp(-beta*(1.0-x))*erfc((2.0*beta*D*time-(1.0-x))/numpy.sqrt(4.0*D*time)) - 0.5*numpy.exp(beta*(1.0-x))*erfc((2.0*beta*D*time+(1.0-x))/numpy.sqrt(4.0*D*time))
yexact = (s0/(D*beta)) * yexact * res
pexact.set_ydata(yexact)
plt.savefig(base_filename + '%04d'%i + ".png")
plt.close()
def plot_compart_couple_histogram(filename):
f = open(filename,'r')
elements = [int(x) for x in f.readline().split()[1:]]
h = 0.5/(len(elements)-2)
print h
print len(elements)
x = numpy.arange(0.25-0.5*h,0.75+0.500001*h,h)
print x
plt.plot(x,numpy.array(elements)*0.01/h)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
def plot_small_compart_couple_histogram(filename):
f = open(filename,'r')
elements = [int(x) for x in f.readline().split()[1:]]
h = 0.3/(len(elements)-2)
print h
print len(elements)
x = numpy.arange(0.25-0.5*h,0.55+0.500001*h,h)
print x
plt.plot(x,numpy.array(elements)*0.01/h)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
def plot_finite_difference(add):
import pickle
c_file = open(add+'c_dump.pkl')
x_file = open(add+'x_dump.pkl')
c = pickle.load(c_file)
x = pickle.load(x_file)
plt.plot(x,c*0.01)
plt.xlabel('bin coordinate')
plt.ylabel('# of particles')
plot_mi_combined_multifile("../c/output/simpleReact")
#plot_mi_multifile("../c/simpleReact_moving_interface")
#plot_mi_multifile("../c/simpleReact_mi_after_growth")
#plot_mi_multifile("../c/simpleReact_mi_after_shrink")
fig = plt.figure()
plot_histogram('../c/output/simpleReact_mi_mols_10000_0.1.dat')
plot_mi_compart_histogram('../c/output/simpleReact_mi_comparts_10000_0.1.dat')
x = numpy.arange(0,1,0.01)
y = 100.0*numpy.sinh(x)/(numpy.cosh(1)-1)
plt.plot(x,y)
plt.title('3D Unimolecular Reaction - moving interface');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments','exact'],loc='upper left')
plt.savefig('simpleReact_mi.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleBD_100000.dat')
plot_compart_histogram('../c/output/simpleBD_compart_100000_0.1.dat')
plot_histogram('../smoldyn/simpleBD_100000.dat')
plt.title('3D Brownian Diffusion');
#setFigLinesBW(fig)
plt.legend(['martin M','martin C','smoldyn'],loc='upper left')
plt.savefig('simpleBD.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleBD_couple_single_mols_0.1.dat')
plot_compart_couple_histogram('../c/output/simpleBD_couple_single_comparts_0.1.dat')
plt.title('3D Brownian Diffusion - single');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments'],loc='upper left')
plt.savefig('simpleBD_couple_single.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleBD_couple_mols_5000_0.1.dat')
plot_compart_couple_histogram('../c/output/simpleBD_couple_comparts_5000_0.1.dat')
plt.title('3D Brownian Diffusion');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments'],loc='upper left')
plt.savefig('simpleBD_couple.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleBD_couple_mols_100000_0.1.dat')
plot_compart_couple_histogram('../c/output/simpleBD_couple_comparts_100000_0.1.dat')
plt.title('3D Brownian Diffusion');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments'],loc='upper left')
plt.savefig('simpleBD_couple.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleReact_couple_mols_10000_0.1.dat')
plot_compart_couple_histogram('../c/output/simpleReact_couple_comparts_10000_0.1.dat')
x = numpy.arange(0,1,0.01)
y = 100.0*numpy.sinh(x)/(numpy.cosh(1)-1)
plt.plot(x,y)
plt.title('3D Unimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments','exact'],loc='upper left')
plt.savefig('simpleReact_couple.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleReact_couple_mols_100000_0.1.dat')
plot_compart_couple_histogram('../c/output/simpleReact_couple_comparts_100000_0.1.dat')
x = numpy.arange(0,1,0.01)
y = 1000.0*numpy.sinh(x)/(numpy.cosh(1)-1)
plt.plot(x,y)
plt.title('3D Unimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments','exact'],loc='upper left')
plt.savefig('simpleReact_couple.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/biMolarReact_couple_mols_100000_0.1.dat')
plot_compart_couple_histogram('../c/output/biMolarReact_couple_comparts_100000_0.1.dat')
#plot_finite_difference('mu0.1_')
plot_finite_difference('')
plt.title('3D Bimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['Molecules','Compartments','fd mu=0.02'],loc='upper left')
plt.savefig('biMolarReact_couple.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleReact_1000000.dat')
plot_histogram('../c/output/simpleReactWithJumpCorrection_1000000.dat')
#plot_histogram('../smoldyn/simpleReact_100000.dat')
x = numpy.arange(0,1,0.01)
y = 10000.0*numpy.sinh(x)/(numpy.cosh(1)-1)
plt.plot(x,y)
plot_finite_difference('uni_')
#plt.legend(['exact','finite difference'],loc='upper left')
#plt.legend(['martin','martin - with correction', 'smoldyn','exact'],loc='upper left')
plt.title('3D Unimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['martin M','martin M - with correction','exact','finite difference'],loc='upper left')
#plt.legend(['martin M','martin C','martin M - with correction'],loc='upper left')
plt.savefig('simpleReact.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/simpleReactWithJumpCorrection_10000.dat')
plot_compart_histogram('../c/output/simpleReact_compart_10000_0.1.dat')
plot_histogram('../smoldyn/simpleReact_10000.dat')
x = numpy.arange(0,1,0.01)
y = 100.0*numpy.sinh(x)/(numpy.cosh(1)-1)
plt.plot(x,y)
#plot_finite_difference('uni_')
#plt.legend(['exact','finite difference'],loc='upper left')
#plt.legend(['martin','martin - with correction', 'smoldyn','exact'],loc='upper left')
plt.title('3D Unimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['martin M','martin C', 'smoldyn','exact'],loc='upper left')
plt.savefig('simpleReact2.eps')
plt.show()
fig = plt.figure()
plot_histogram('../c/output/biMolarReact_100000.dat')
plot_compart_histogram('../c/output/biMolarReact_compart_100000_0.1.dat')
plot_histogram('../smoldyn/biMolarReact_100000.dat')
plot_finite_difference('mu0.1_')
plot_finite_difference('')
#x = numpy.arange(0,1,0.01)
#y = 50.0*numpy.sinh(x)/(numpy.cosh(1)-1)
#plt.plot(x,y)
plt.title('3D Bimolecular Reaction');
#setFigLinesBW(fig)
plt.legend(['martin M', 'martin C','smoldyn','finite difference - mu = 0.01','finite difference - mu = 0.02'],loc='upper left')
plt.savefig('biMolarReact.eps')
plt.show()
|
martinjrobins/RD_3D
|
scripts/plot.py
|
Python
|
lgpl-3.0
| 12,174
|
[
"Smoldyn"
] |
81ffd7b4cc9bde770361b51164971bbad8124b5454eabd8382fc0231225574d2
|
#!/usr/bin/env python
# PythonJS to C++ Translator
# by Brett Hartshorn - copyright 2014
# License: "New BSD"
import os, sys
import ast
import pythonjs_to_rust
JVM_HEADER = '''
#include <jni.h>
JavaVM* __create_javavm__() {
JavaVM* jvm = new JavaVM();
JNIEnv* env;
JavaVMInitArgs args;
JavaVMOption options[2];
args.version = JNI_VERSION_1_4;
args.nOptions = 2;
options[0].optionString = const_cast<char*>("-Djava.class.path=.%s");
options[1].optionString = const_cast<char*>("-Xcheck:jni");
args.options = options;
args.ignoreUnrecognized = JNI_FALSE;
JNI_CreateJavaVM(&jvm, (void **)&env, &args);
return jvm;
}
static JavaVM* __javavm__ = __create_javavm__();
'''
def gen_jvm_header( jars ):
if jars:
a = ':' + ':'.join(jars)
return JVM_HEADER %a
else:
return JVM_HEADER %''
NIM_HEADER = '''
extern "C" {
void PreMain();
void NimMain();
}
'''
def gen_nim_header():
return NIM_HEADER
class CppGenerator( pythonjs_to_rust.RustGenerator ):
def _visit_call_helper_new(self, node):
'''
low level `new` for interfacing with external c++.
Also used for code that is blocked with `with pointers:`
to create a class without having to create a temp variable,
`f( new(MyClass(x,y)) )`, directly calls the constructor,
if MyClass is a Rusthon class then __init__ will be called.
TODO fix mixing with std::shared_ptr by keeping a weak_ptr
in each object that __init__ returns (also fixes the _ref_hacks)
'''
if isinstance(node.args[0], ast.BinOp): # makes an array or map
a = self.visit(node.args[0])
if type(a) is not tuple:
raise SyntaxError(self.format_error('TODO some extended type'))
atype, avalue = a
if atype.endswith('*'): atype = atype[:-1]
else: pass ## this should never happen
return '(new %s %s)' %(atype, avalue)
## Rusthon class ##
elif isinstance(node.args[0], ast.Call) and isinstance(node.args[0].func, ast.Name) and node.args[0].func.id in self._classes:
classname = node.args[0].func.id
args = [self.visit(arg) for arg in node.args[0].args ]
if self._classes[classname]._requires_init:
return '(new %s)->__init__(%s)' %(classname, ','.join(args))
else:
if args:
raise SyntaxError('class %s: takes no init args' %classname)
return '(new %s)' %classname
## external c++ class ##
else:
return '(new %s)' %self.visit(node.args[0])
def __init__(self, source=None, requirejs=False, insert_runtime=False):
pythonjs_to_rust.RustGenerator.__init__(self, source=source, requirejs=False, insert_runtime=False)
self._cpp = True
self._rust = False ## can not be true at the same time self._cpp is true, conflicts in switch/match hack.
self._shared_pointers = True
self._noexcept = False
self._polymorphic = False ## by default do not use polymorphic classes (virtual methods)
self._has_jvm = False
self._jvm_classes = dict()
self._has_nim = False
def visit_Delete(self, node):
targets = [self.visit(t) for t in node.targets]
if len(targets)==0:
raise RuntimeError('no delete targets')
r = []
if self._shared_pointers:
for t in targets:
## shared_ptr.reset only releases if no there are no other references,
## is there a way to force the delete on all shared pointers to something?
#r.append('delete %s;' %t) ## only works on pointers
r.append('%s.reset();' %t)
else:
for t in targets:
if t in self._known_arrays:
r.append('delete[] %s;')
else:
r.append('delete %s;')
return '\n'.join(r)
def visit_Str(self, node):
s = node.s.replace("\\", "\\\\").replace('\n', '\\n').replace('\r', '\\r').replace('"', '\\"')
return 'std::string("%s")' % s
def visit_Print(self, node):
r = []
for e in node.values:
s = self.visit(e)
if isinstance(e, ast.List) or isinstance(e, ast.Tuple):
for sube in e.elts:
r.append('std::cout << %s;' %self.visit(sube))
if r:
r[-1] += 'std::cout << std::endl;'
else:
r.append('std::cout << std::endl;')
else:
r.append('std::cout << %s << std::endl;' %s)
return '\n'.join(r)
def visit_TryExcept(self, node, finallybody=None):
out = []
out.append( 'try {' )
self.push()
for b in node.body:
out.append( self.indent()+self.visit(b) )
self.pull()
out.append( self.indent() + '} catch (const std::exception& e) {' )
self.push()
for h in node.handlers:
out.append(self.indent()+self.visit(h))
self.pull()
if finallybody:
## wrap in another try that is silent, always throw e
out.append('try { // finally block')
for b in finallybody:
out.append(self.visit(b))
out.append('} throw e;')
out.append( '}' )
out.append( self.indent() + 'catch (const std::overflow_error& e) { std::cout << "OVERFLOW ERROR" << std::endl; }' )
out.append( self.indent() + 'catch (const std::runtime_error& e) { std::cout << "RUNTIME ERROR" << std::endl; }' )
out.append( self.indent() + 'catch (...) { std::cout << "UNKNOWN ERROR" << std::endl; }' )
return '\n'.join( out )
def visit_Import(self, node):
r = [alias.name.replace('__SLASH__', '/') for alias in node.names]
includes = []
if r:
for name in r:
if name == 'jvm':
self._has_jvm = True
elif name == 'nim':
self._has_nim = True
else:
includes.append('#include <%s>' %name)
return '\n'.join(includes)
def visit_Module(self, node):
header = [
'#include <cmath>',
'#include <memory>',
'#include <vector>',
'#include <array>',
'#include <iostream>',
'#include <fstream>',
'#include <string>',
'#include <map>',
'#include <algorithm>', ## c++11
'#include <functional>', ## c++11
#'#include <sstream>', ## c++11
'#include <thread>', ## c++11
'#include <chrono>', ## c++11
]
lines = []
for b in node.body:
line = self.visit(b)
if line is not None:
for sub in line.splitlines():
if sub==';':
#raise SyntaxError('bad semicolon')
pass
else:
lines.append( sub )
else:
if isinstance(b, ast.Import):
header.append( self.visit(b) )
else:
raise SyntaxError(b)
if self._has_channels:
## https://github.com/ahorn/cpp-channel
#header.append('#include <channel>')
## instead of including, just directly inline cpp-channel source
dirname = os.path.dirname(os.path.abspath(__file__))
header.append(
open( os.path.join(dirname, 'runtime/c++/cpp-channel.h') ).read()
)
if self._has_jvm:
header.append( gen_jvm_header(self._java_classpaths) )
if self._has_nim:
header.append( gen_nim_header() )
## forward declare all classes
for classname in self._classes:
header.append('class %s;' %classname)
if len(self._kwargs_type_.keys()):
impl = []
header.append('class _KwArgs_;') ## forward declare
header.append('class _KwArgs_ {')
header.append(' public:')
for name in self._kwargs_type_:
type = self._kwargs_type_[name]
header.append( ' %s _%s_;' %(type,name))
header.append( ' bool __use__%s;' %name)
for name in self._kwargs_type_:
type = self._kwargs_type_[name]
header.append( ' _KwArgs_* %s(%s %s);' %(name, type, name))
impl.append( ' _KwArgs_* _KwArgs_::%s(%s %s) {' %(name, type, name))
impl.append( ' this->__use__%s = true;' %name)
impl.append( ' this->_%s_ = %s;' %(name, name))
impl.append( ' return this;')
impl.append('};')
header.append('};')
header.extend( impl )
self.output_pak = pak = {'c_header':'', 'cpp_header':'', 'main':''}
cheader = None
cppheader = None
if len(self._cheader):
cheader = []
cppheader = ['extern "C" {']
for line in self._cheader:
cheader.append(line)
cppheader.append('\t'+line)
cppheader.append('}')
if cheader:
pak['header.c'] = '\n'.join( cheader )
if cppheader:
pak['header.cpp'] = '\n'.join( cppheader )
if 'int main() {' in lines:
main_index = lines.index('int main() {')
for idef in self._cpp_class_impl:
lines.insert(main_index,idef)
else:
## might want to warn user there is no main
pass
lines = header + list(self._imports) + lines
pak['main'] = '\n'.join( lines )
return pak['main']
def main(script, insert_runtime=True):
#raise SyntaxError(script)
if insert_runtime:
dirname = os.path.dirname(os.path.abspath(__file__))
dirname = os.path.join(dirname, 'runtime')
runtime = open( os.path.join(dirname, 'cpp_builtins.py') ).read()
script = runtime + '\n' + script
try:
tree = ast.parse(script)
except SyntaxError as err:
e = ['%s: %s'%(i+1, line) for i,line in enumerate(script.splitlines())]
sys.stderr.write('\n'.join(e))
raise err
g = CppGenerator( source=script )
g.visit(tree) # first pass gathers classes
pass2 = g.visit(tree)
g.reset()
pass3 = g.visit(tree)
#open('/tmp/pass3.cpp', 'wb').write( pass3 )
return g.output_pak
def command():
scripts = []
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg.endswith('.py'):
scripts.append( arg )
if len(scripts):
a = []
for script in scripts:
a.append( open(script, 'rb').read() )
data = '\n'.join( a )
else:
data = sys.stdin.read()
out = main( data )
print( out )
if __name__ == '__main__':
command()
|
kustomzone/Rusthon
|
pythonjs/pythonjs_to_cpp.py
|
Python
|
bsd-3-clause
| 9,155
|
[
"VisIt"
] |
7f878dc45e62c77ea0e0f5571452ffc2662274232e69c6b91c5552d43dd7ec84
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Tuple
import numpy as np
from ...quadrature.interfaces.base_gp import IBaseGaussianProcess
from .warped_bq_model import WarpedBayesianQuadratureModel
from .warpings import IdentityWarping
class VanillaBayesianQuadrature(WarpedBayesianQuadratureModel):
"""Vanilla Bayesian quadrature.
Vanilla Bayesian quadrature uses a Gaussian process as surrogate for the integrand.
"""
def __init__(self, base_gp: IBaseGaussianProcess, X: np.ndarray, Y: np.ndarray):
"""
:param base_gp: The underlying Gaussian process model.
:param X: The initial locations of integrand evaluations, shape (n_points, input_dim).
:param Y: The values of the integrand at X, shape (n_points, 1).
"""
super(VanillaBayesianQuadrature, self).__init__(base_gp=base_gp, warping=IdentityWarping(), X=X, Y=Y)
def predict_base(self, X_pred: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Compute predictive means and variances of the warped GP as well as the base GP.
:param X_pred: Locations at which to predict, shape (n_points, input_dim).
:returns: Predictive mean and variances of warped GP, and predictive mean and variances of base-GP in that
order all shapes (n_points, 1).
"""
m, cov = self.base_gp.predict(X_pred)
return m, cov, m, cov
def predict_base_with_full_covariance(
self, X_pred: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Compute predictive means and covariance of the warped GP as well as the base GP.
:param X_pred: Locations at which to predict, shape (n_points, input_dim).
:returns: Predictive mean and covariance of warped GP, predictive mean and covariance of base-GP in that order.
mean shapes both (n_points, 1) and covariance shapes both (n_points, n_points).
"""
m, cov = self.base_gp.predict_with_full_covariance(X_pred)
return m, cov, m, cov
def integrate(self) -> Tuple[float, float]:
"""Compute an estimator of the integral as well as its variance.
:returns: Estimator of integral and its variance.
"""
kernel_mean_X = self.base_gp.kern.qK(self.X)
integral_mean = np.dot(kernel_mean_X, self.base_gp.graminv_residual())[0, 0]
integral_var = self.base_gp.kern.qKq() - (kernel_mean_X @ self.base_gp.solve_linear(kernel_mean_X.T))[0, 0]
return integral_mean, integral_var
def get_prediction_gradients(self, X: np.ndarray) -> Tuple:
"""Compute predictive gradients of mean and variance at given points.
:param X: Points to compute gradients at, shape (n_points, input_dim).
:returns: Tuple of gradients of mean and variance, shapes of both (n_points, input_dim).
"""
return self.base_gp.get_prediction_gradients(X)
|
EmuKit/emukit
|
emukit/quadrature/methods/vanilla_bq.py
|
Python
|
apache-2.0
| 3,029
|
[
"Gaussian"
] |
c4766f9a5c032903358ba6e22364f48683df307d5ed19c4f5be2749e48cf0489
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.