text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
import pytest
from cloudevents.http import CloudEvent, to_binary, to_structured
from functions_framework import create_app
TEST_FUNCTIONS_DIR = pathlib.Path(__file__).resolve().parent / "test_functions"
TEST_DATA_DIR = pathlib.Path(__file__).resolve().parent / "test_data"
# Python 3.5: ModuleNotFoundError does not exist
try:
_ModuleNotFoundError = ModuleNotFoundError
except:
_ModuleNotFoundError = ImportError
@pytest.fixture
def data_payload():
return {"name": "john"}
@pytest.fixture
def cloud_event_1_0():
attributes = {
"specversion": "1.0",
"id": "my-id",
"source": "from-galaxy-far-far-away",
"type": "cloud_event.greet.you",
"time": "2020-08-16T13:58:54.471765",
}
data = {"name": "john"}
return CloudEvent(attributes, data)
@pytest.fixture
def cloud_event_0_3():
attributes = {
"id": "my-id",
"source": "from-galaxy-far-far-away",
"type": "cloud_event.greet.you",
"specversion": "0.3",
"time": "2020-08-16T13:58:54.471765",
}
data = {"name": "john"}
return CloudEvent(attributes, data)
@pytest.fixture
def create_headers_binary():
return lambda specversion: {
"ce-id": "my-id",
"ce-source": "from-galaxy-far-far-away",
"ce-type": "cloud_event.greet.you",
"ce-specversion": specversion,
"time": "2020-08-16T13:58:54.471765",
}
@pytest.fixture
def create_structured_data():
return lambda specversion: {
"id": "my-id",
"source": "from-galaxy-far-far-away",
"type": "cloud_event.greet.you",
"specversion": specversion,
"time": "2020-08-16T13:58:54.471765",
}
@pytest.fixture
def background_event():
with open(TEST_DATA_DIR / "pubsub_text-legacy-input.json", "r") as f:
return json.load(f)
@pytest.fixture
def client():
source = TEST_FUNCTIONS_DIR / "cloud_events" / "main.py"
target = "function"
return create_app(target, source, "cloudevent").test_client()
@pytest.fixture
def empty_client():
source = TEST_FUNCTIONS_DIR / "cloud_events" / "empty_data.py"
target = "function"
return create_app(target, source, "cloudevent").test_client()
@pytest.fixture
def converted_background_event_client():
source = TEST_FUNCTIONS_DIR / "cloud_events" / "converted_background_event.py"
target = "function"
return create_app(target, source, "cloudevent").test_client()
def test_event(client, cloud_event_1_0):
headers, data = to_structured(cloud_event_1_0)
resp = client.post("/", headers=headers, data=data)
assert resp.status_code == 200
assert resp.data == b"OK"
def test_binary_event(client, cloud_event_1_0):
headers, data = to_binary(cloud_event_1_0)
resp = client.post("/", headers=headers, data=data)
assert resp.status_code == 200
assert resp.data == b"OK"
def test_event_0_3(client, cloud_event_0_3):
headers, data = to_structured(cloud_event_0_3)
resp = client.post("/", headers=headers, data=data)
assert resp.status_code == 200
assert resp.data == b"OK"
def test_binary_event_0_3(client, cloud_event_0_3):
headers, data = to_binary(cloud_event_0_3)
resp = client.post("/", headers=headers, data=data)
assert resp.status_code == 200
assert resp.data == b"OK"
@pytest.mark.parametrize("specversion", ["0.3", "1.0"])
def test_cloud_event_missing_required_binary_fields(
client, specversion, create_headers_binary, data_payload
):
headers = create_headers_binary(specversion)
for remove_key in headers:
if remove_key == "time":
continue
invalid_headers = {key: headers[key] for key in headers if key != remove_key}
resp = client.post("/", headers=invalid_headers, json=data_payload)
assert resp.status_code == 400
assert "MissingRequiredFields" in resp.get_data().decode()
@pytest.mark.parametrize("specversion", ["0.3", "1.0"])
def test_cloud_event_missing_required_structured_fields(
client, specversion, create_structured_data
):
headers = {"Content-Type": "application/cloudevents+json"}
data = create_structured_data(specversion)
for remove_key in data:
if remove_key == "time":
continue
invalid_data = {key: data[key] for key in data if key != remove_key}
resp = client.post("/", headers=headers, json=invalid_data)
assert resp.status_code == 400
assert "MissingRequiredFields" in resp.data.decode()
def test_invalid_fields_binary(client, create_headers_binary, data_payload):
# Testing none specversion fails
headers = create_headers_binary("not a spec version")
resp = client.post("/", headers=headers, json=data_payload)
assert resp.status_code == 400
assert "InvalidRequiredFields" in resp.data.decode()
def test_unparsable_cloud_event(client):
resp = client.post("/", headers={}, data="")
assert resp.status_code == 400
assert "MissingRequiredFields" in resp.data.decode()
@pytest.mark.parametrize("specversion", ["0.3", "1.0"])
def test_empty_data_binary(empty_client, create_headers_binary, specversion):
headers = create_headers_binary(specversion)
resp = empty_client.post("/", headers=headers, json="")
assert resp.status_code == 200
assert resp.get_data() == b"OK"
@pytest.mark.parametrize("specversion", ["0.3", "1.0"])
def test_empty_data_structured(empty_client, specversion, create_structured_data):
headers = {"Content-Type": "application/cloudevents+json"}
data = create_structured_data(specversion)
resp = empty_client.post("/", headers=headers, json=data)
assert resp.status_code == 200
assert resp.get_data() == b"OK"
@pytest.mark.parametrize("specversion", ["0.3", "1.0"])
def test_no_mime_type_structured(empty_client, specversion, create_structured_data):
data = create_structured_data(specversion)
resp = empty_client.post("/", headers={}, json=data)
assert resp.status_code == 200
assert resp.get_data() == b"OK"
def test_background_event(converted_background_event_client, background_event):
resp = converted_background_event_client.post(
"/", headers={}, json=background_event
)
assert resp.status_code == 200
assert resp.get_data() == b"OK"
|
GoogleCloudPlatform/functions-framework-python
|
tests/test_cloud_event_functions.py
|
Python
|
apache-2.0
| 6,917
|
[
"Galaxy"
] |
e71584641d989d2629385b605948fdb3042e59d6ad712d36faebaa3f626d162d
|
#-------------------------------------------------------------------------------
# Name: utils
# Purpose: Helper functions for unit tests
#
# Author: Brian Skinn
# [email protected]
#
# Created: 12 Mar 2016
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
def assertErrorAndTypecode(self, errtype, cobj, tc, *args, **kwargs):
""" Wrapper for asserting correct OpanErrors and proper typecodes.
Function tests (using testclass.assertX methods) whether 'cobj' raises
'errtype' with typecode 'tc' when instantiated/called with *args and
**kwargs.
If imported into a :class:`~unittest.TestCase` subclass, this function
is treated by Python as a class method and the initial `testclass`
argument behaves like the `self` argument of such a class method.
In particular, this means that the :class:`~unittest.TestCase` class
should NOT be passed as the first argument in this usage situation.
Parameters
----------
self : referenced object
Subclass of unittest.TestCase (or related), from which the .assertX
methods should be called
errtype : object reference
Subclass of OpanError expected to be raised
cobj : object reference
Callable object to be instantiated or called
tc : str / typecode "enum"
Typecode to check for
*args and **kwargs are passed to the instantiation of 'cobj'
Returns
-------
(none)
"""
# Assert the proper error
self.assertRaises(errtype, cobj, *args, **kwargs)
# Ensure correct typecode; suppress repeated error, and ignore any
# other error raised, as it will have been reported by the above
# .assertRaises call
try:
out = cobj(*args, **kwargs)
except errtype as err:
self.assertEqual(err.tc, tc)
except Exception: # pragma: no cover
pass
## end def assertErrorAndTypecode
def setUpTestDir(dirname):
""" Create and change working directory to test directory.
Parameters
----------
dirname : str
Name of desired working directory
"""
import os, time
# Wait 10ms for folder access to clear
time.sleep(0.01)
# Check if test directory already exists (or file of same name);
# error if so
if os.path.isdir(dirname) or os.path.isfile(dirname): # pragma: no cover
raise IOError("Cannot create new test directory!")
# Create and change to test directory
os.mkdir(dirname)
os.chdir(dirname)
def tearDownTestDir(dirname):
""" Exit and attempt removal of test directory
Parameters
----------
dirname: str
Name of working directory
"""
import os
# Switch to parent directory
os.chdir(os.path.pardir)
# Try to remove the temp directory
os.rmdir(dirname)
def inject_tests(ns, data, namestr, f_template):
""" Function to automate test method creation/injection
Parameters
----------
ns
|dict| -- Namespace into which functions are to be injected
data
|dict| -- Lookup dictionary with data to be passed to `f_template`
namestr
|str| -- String template for the names of the test methods to be
injected. Must contain exactly one substitution field ("{0}" is
preferred), into which each key from `data` will be substituted
to define the unique method name
f_template
`callable` -- Function to call to carry out the desired test
"""
for k, d in data.items():
fxnname = namestr.format(k)
fxn = lambda self, k=k, d=d: f_template(self, k, d)
ns.update({fxnname: fxn})
if __name__ == '__main__': # pragma: no cover
print("Module not executable.")
|
bskinn/opan
|
opan/test/utils.py
|
Python
|
mit
| 4,217
|
[
"Brian"
] |
c8a5f8ec48deb3ca8733b92004e4ec4b335085f49aaf40d3f989485bd1e3a8af
|
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from decimal import Decimal
import json
import analytics
from io import BytesIO
from django.db.models import Q
import pytz
import logging
import smtplib
import StringIO
import csv
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from django.db.models import Sum, Count
from django.db.models.signals import post_save, post_delete
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from model_utils.models import TimeStampedModel
from django.core.mail.message import EmailMessage
from xmodule.modulestore.django import modulestore
from eventtracking import tracker
from courseware.courses import get_course_by_id
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from .exceptions import (
InvalidCartItem,
PurchasedCallbackException,
ItemAlreadyInCartException,
AlreadyEnrolledInCourseException,
CourseDoesNotExistException,
MultipleCouponsNotAllowedException,
InvalidStatusToRetire,
UnexpectedOrderItemStatus,
ItemNotFoundInCartException
)
from microsite_configuration import microsite
from shoppingcart.pdf import PDFInvoice
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
# The user's order went through, but the order was erroneously left
# in 'cart'.
('defunct-cart', 'defunct-cart'),
# The user's order went through, but the order was erroneously left
# in 'paying'.
('defunct-paying', 'defunct-paying'),
)
# maps order statuses to their defunct states
ORDER_STATUS_MAP = {
'cart': 'defunct-cart',
'paying': 'defunct-paying',
}
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=invalid-name
class OrderTypes(object):
"""
This class specify purchase OrderTypes.
"""
PERSONAL = 'personal'
BUSINESS = 'business'
ORDER_TYPES = (
(PERSONAL, 'personal'),
(BUSINESS, 'business'),
)
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
# bulk purchase registration code workflow billing details
company_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_name = models.CharField(max_length=255, null=True, blank=True)
company_contact_email = models.CharField(max_length=255, null=True, blank=True)
recipient_name = models.CharField(max_length=255, null=True, blank=True)
recipient_email = models.CharField(max_length=255, null=True, blank=True)
customer_reference_number = models.CharField(max_length=63, null=True, blank=True)
order_type = models.CharField(max_length=32, default='personal', choices=OrderTypes.ORDER_TYPES)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def does_user_have_cart(cls, user):
"""
Returns a boolean whether a shopping cart (Order) exists for the specified user
"""
return cls.objects.filter(user=user, status='cart').exists()
@classmethod
def user_cart_has_items(cls, user, item_types=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
if not item_types:
# check to see if the cart has at least some item in it
return cart.has_items()
else:
# if the caller is explicitly asking to check for particular types
for item_type in item_types:
if cart.has_items(item_type):
return True
return False
@classmethod
def remove_cart_item_from_order(cls, item, user):
"""
Removes the item from the cart if the item.order.status == 'cart'.
Also removes any code redemption associated with the order_item
"""
if item.order.status == 'cart':
log.info("order item %s removed for user %s", str(item.id), user)
item.delete()
# remove any redemption entry associated with the item
CouponRedemption.remove_code_redemption_from_item(item, user)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=no-member
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists() # pylint: disable=no-member
else:
items = self.orderitem_set.all().select_subclasses() # pylint: disable=no-member
for item in items:
if isinstance(item, item_type):
return True
return False
def reset_cart_items_prices(self):
"""
Reset the items price state in the user cart
"""
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.is_discounted:
item.unit_cost = item.list_price
item.save()
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete() # pylint: disable=no-member
@transaction.commit_on_success
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def update_order_type(self):
"""
updating order type. This method wil inspect the quantity associated with the OrderItem.
In the application, it is implied that when qty > 1, then the user is to purchase
'RegistrationCodes' which are randomly generated strings that users can distribute to
others in order for them to enroll in paywalled courses.
The UI/UX may change in the future to make the switching between PaidCourseRegistration
and CourseRegCodeItems a more explicit UI gesture from the purchaser
"""
cart_items = self.orderitem_set.all() # pylint: disable=no-member
is_order_type_business = False
for cart_item in cart_items:
if cart_item.qty > 1:
is_order_type_business = True
items_to_delete = []
old_to_new_id_map = []
if is_order_type_business:
for cart_item in cart_items:
if hasattr(cart_item, 'paidcourseregistration'):
course_reg_code_item = CourseRegCodeItem.add_to_order(self, cart_item.paidcourseregistration.course_id, cart_item.qty)
# update the discounted prices if coupon redemption applied
course_reg_code_item.list_price = cart_item.list_price
course_reg_code_item.unit_cost = cart_item.unit_cost
course_reg_code_item.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": course_reg_code_item.id})
else:
for cart_item in cart_items:
if hasattr(cart_item, 'courseregcodeitem'):
paid_course_registration = PaidCourseRegistration.add_to_order(self, cart_item.courseregcodeitem.course_id)
# update the discounted prices if coupon redemption applied
paid_course_registration.list_price = cart_item.list_price
paid_course_registration.unit_cost = cart_item.unit_cost
paid_course_registration.save()
items_to_delete.append(cart_item)
old_to_new_id_map.append({"oldId": cart_item.id, "newId": paid_course_registration.id})
for item in items_to_delete:
item.delete()
self.order_type = OrderTypes.BUSINESS if is_order_type_business else OrderTypes.PERSONAL
self.save()
return old_to_new_id_map
def generate_pdf_receipt(self, order_items):
"""
Generates the pdf receipt for the given order_items
and returns the pdf_buffer.
"""
items_data = []
for item in order_items:
item_total = item.qty * item.unit_cost
items_data.append({
'item_description': item.pdf_receipt_display_name,
'quantity': item.qty,
'list_price': item.get_list_price(),
'discount': item.get_list_price() - item.unit_cost,
'item_total': item_total
})
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=self.purchase_time,
is_invoice=False,
total_cost=self.total_cost,
payment_received=self.total_cost,
balance=0
).generate_pdf(pdf_buffer)
return pdf_buffer
def generate_registration_codes_csv(self, orderitems, site_name):
"""
this function generates the csv file
"""
course_info = []
csv_file = StringIO.StringIO()
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Course Name', 'Registration Code', 'URL'])
for item in orderitems:
course_id = item.course_id
course = get_course_by_id(getattr(item, 'course_id'), depth=0)
registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id, order=self)
course_info.append((course.display_name, ' (' + course.start_datetime_text() + '-' + course.end_datetime_text() + ')'))
for registration_code in registration_codes:
redemption_url = reverse('register_code_redemption', args=[registration_code.code])
url = '{base_url}{redemption_url}'.format(base_url=site_name, redemption_url=redemption_url)
csv_writer.writerow([unicode(course.display_name).encode("utf-8"), registration_code.code, url])
return csv_file, course_info
def send_confirmation_emails(self, orderitems, is_order_type_business, csv_file, pdf_file, site_name, courses_info):
"""
send confirmation e-mail
"""
recipient_list = [(self.user.username, getattr(self.user, 'email'), 'user')] # pylint: disable=no-member
if self.company_contact_email:
recipient_list.append((self.company_contact_name, self.company_contact_email, 'company_contact'))
joined_course_names = ""
if self.recipient_email:
recipient_list.append((self.recipient_name, self.recipient_email, 'email_recipient'))
courses_names_with_dates = [course_info[0] + course_info[1] for course_info in courses_info]
joined_course_names = " " + ", ".join(courses_names_with_dates)
if not is_order_type_business:
subject = _("Order Payment Confirmation")
else:
subject = _('Confirmation and Registration Codes for the following courses: {course_name_list}').format(
course_name_list=joined_course_names
)
dashboard_url = '{base_url}{dashboard}'.format(
base_url=site_name,
dashboard=reverse('dashboard')
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.PAYMENT_SUPPORT_EMAIL
)
# Send a unique email for each recipient. Don't put all email addresses in a single email.
for recipient in recipient_list:
message = render_to_string(
'emails/business_order_confirmation_email.txt' if is_order_type_business else 'emails/order_confirmation_email.txt',
{
'order': self,
'recipient_name': recipient[0],
'recipient_type': recipient[2],
'site_name': site_name,
'order_items': orderitems,
'course_names': ", ".join([course_info[0] for course_info in courses_info]),
'dashboard_url': dashboard_url,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'order_placed_by': '{username} ({email})'.format(username=self.user.username, email=getattr(self.user, 'email')), # pylint: disable=no-member
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO'],
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL),
'payment_email_signature': microsite.get_value('payment_email_signature'),
}
)
email = EmailMessage(
subject=subject,
body=message,
from_email=from_address,
to=[recipient[1]]
)
# Only the business order is HTML formatted. A single seat order confirmation is plain text.
if is_order_type_business:
email.content_subtype = "html"
if csv_file:
email.attach(u'RegistrationCodesRedemptionUrls.csv', csv_file.getvalue(), 'text/csv')
if pdf_file is not None:
email.attach(u'Receipt.pdf', pdf_file.getvalue(), 'application/pdf')
else:
file_buffer = StringIO.StringIO(_('pdf download unavailable right now, please contact support.'))
email.attach(u'pdf_not_available.txt', file_buffer.getvalue(), 'text/plain')
email.send()
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=no-member
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
log.error(
u"`purchase` method called on order {}, but order is already purchased.".format(self.id) # pylint: disable=no-member
)
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
if self.order_type == OrderTypes.BUSINESS:
self.update_order_type()
for item in orderitems:
item.purchase_item()
csv_file = None
courses_info = []
if self.order_type == OrderTypes.BUSINESS:
#
# Generate the CSV file that contains all of the RegistrationCodes that have already been
# generated when the purchase has transacted
#
csv_file, courses_info = self.generate_registration_codes_csv(orderitems, site_name)
try:
pdf_file = self.generate_pdf_receipt(orderitems)
except Exception: # pylint: disable=broad-except
log.exception('Exception at creating pdf file.')
pdf_file = None
try:
self.send_confirmation_emails(
orderitems, self.order_type == OrderTypes.BUSINESS,
csv_file, pdf_file, site_name, courses_info
)
except Exception: # pylint: disable=broad-except
# Catch all exceptions here, since the Django view implicitly
# wraps this in a transaction. If the order completes successfully,
# we don't want to roll back just because we couldn't send
# the confirmation email.
log.exception('Error occurred while sending payment confirmation email')
self._emit_order_event('Completed Order', orderitems)
def refund(self):
"""
Refund the given order. As of right now, this just marks the order as refunded.
"""
self.status = 'refunded'
self.save()
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
self._emit_order_event('Refunded Order', orderitems)
def _emit_order_event(self, event_name, orderitems):
"""
Emit an analytics event with the given name for this Order. Will iterate over all associated
OrderItems and add them as products in the event as well.
"""
try:
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(self.user.id, event_name, { # pylint: disable=no-member
'orderId': self.id, # pylint: disable=no-member
'total': str(self.total_cost),
'currency': self.currency,
'products': [item.analytics_data() for item in orderitems]
}, context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
})
except Exception: # pylint: disable=broad-except
# Capturing all exceptions thrown while tracking analytics events. We do not want
# an operation to fail because of an analytics event, so we will capture these
# errors in the logs.
log.exception(
u'Unable to emit {event} event for user {user} and order {order}'.format(
event=event_name, user=self.user.id, order=self.id) # pylint: disable=no-member
)
def add_billing_details(self, company_name='', company_contact_name='', company_contact_email='', recipient_name='',
recipient_email='', customer_reference_number=''):
"""
This function is called after the user selects a purchase type of "Business" and
is asked to enter the optional billing details. The billing details are updated
for that order.
company_name - Name of purchasing organization
company_contact_name - Name of the key contact at the company the sale was made to
company_contact_email - Email of the key contact at the company the sale was made to
recipient_name - Name of the company should the invoice be sent to
recipient_email - Email of the company should the invoice be sent to
customer_reference_number - purchase order number of the organization associated with this Order
"""
self.company_name = company_name
self.company_contact_name = company_contact_name
self.company_contact_email = company_contact_email
self.recipient_name = recipient_name
self.recipient_email = recipient_email
self.customer_reference_number = customer_reference_number
self.save()
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
def retire(self):
"""
Method to "retire" orders that have gone through to the payment service
but have (erroneously) not had their statuses updated.
This method only works on orders that satisfy the following conditions:
1) the order status is either "cart" or "paying" (otherwise we raise
an InvalidStatusToRetire error)
2) the order's order item's statuses match the order's status (otherwise
we throw an UnexpectedOrderItemStatus error)
"""
# if an order is already retired, no-op:
if self.status in ORDER_STATUS_MAP.values():
return
if self.status not in ORDER_STATUS_MAP.keys():
raise InvalidStatusToRetire(
"order status {order_status} is not 'paying' or 'cart'".format(
order_status=self.status
)
)
for item in self.orderitem_set.all(): # pylint: disable=no-member
if item.status != self.status:
raise UnexpectedOrderItemStatus(
"order_item status is different from order status"
)
self.status = ORDER_STATUS_MAP[self.status]
self.save()
for item in self.orderitem_set.all(): # pylint: disable=no-member
item.retire()
def find_item_by_course_id(self, course_id):
"""
course_id: Course id of the item to find
Returns OrderItem from the Order given a course_id
Raises exception ItemNotFoundException when the item
having the given course_id is not present in the cart
"""
cart_items = OrderItem.objects.filter(order=self).select_subclasses()
found_items = []
for item in cart_items:
if getattr(item, 'course_id', None):
if item.course_id == course_id:
found_items.append(item)
if not found_items:
raise ItemNotFoundInCartException
return found_items
class OrderItem(TimeStampedModel):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def is_discounted(self):
"""
Returns True if the item a discount coupon has been applied to the OrderItem and False otherwise.
Earlier, the OrderItems were stored with an empty list_price if a discount had not been applied.
Now we consider the item to be non discounted if list_price is None or list_price == unit_cost. In
these lines, an item is discounted if it's non-None and list_price and unit_cost mismatch.
This should work with both new and old records.
"""
return self.list_price and self.list_price != self.unit_cost
def get_list_price(self):
"""
Returns the unit_cost if no discount has been applied, or the list_price if it is defined.
"""
return self.list_price if self.list_price else self.unit_cost
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""
Individual instructions for this order item.
Currently, only used for emails.
"""
return ''
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
This can be overridden by the subclasses of OrderItem
"""
course_key = getattr(self, 'course_id', None)
if course_key:
course = get_course_by_id(course_key, depth=0)
return course.display_name
else:
raise Exception(
"Not Implemented. OrderItems that are not Course specific should have"
" a overridden pdf_receipt_display_name property"
)
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
The default implementation returns defaults for most attributes. When no name or
category is specified by the implementation, the string 'N/A' is placed for the
name and category. This should be handled appropriately by all implementations.
Returns
A dictionary containing analytics data for this OrderItem.
"""
return {
'id': self.id, # pylint: disable=no-member
'sku': type(self).__name__,
'name': 'N/A',
'price': str(self.unit_cost),
'quantity': self.qty,
'category': 'N/A',
}
def retire(self):
"""
Called by the `retire` method defined in the `Order` class. Retires
an order item if its (and its order's) status was erroneously not
updated to "purchased" after the order was processed.
"""
self.status = ORDER_STATUS_MAP[self.status]
self.save()
class Invoice(TimeStampedModel):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True, blank=True)
address_line_3 = models.CharField(max_length=255, null=True, blank=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
# This field has been deprecated.
# The total amount can now be calculated as the sum
# of each invoice item associated with the invoice.
# For backwards compatibility, this field is maintained
# and written to during invoice creation.
total_amount = models.FloatField()
# This field has been deprecated in order to support
# invoices for items that are not course-related.
# Although this field is still maintained for backwards
# compatibility, you should use CourseRegistrationCodeInvoiceItem
# to look up the course ID for purchased redeem codes.
course_id = CourseKeyField(max_length=255, db_index=True)
internal_reference = models.CharField(
max_length=255,
null=True,
blank=True,
help_text=ugettext_lazy("Internal reference code for this invoice.")
)
customer_reference_number = models.CharField(
max_length=63,
null=True,
blank=True,
help_text=ugettext_lazy("Customer's reference code for this invoice.")
)
is_valid = models.BooleanField(default=True)
@classmethod
def get_invoice_total_amount_for_course(cls, course_key):
"""
returns the invoice total amount generated by course.
"""
result = cls.objects.filter(course_id=course_key, is_valid=True).aggregate(total=Sum('total_amount'))
total = result.get('total', 0)
return total if total else 0
def generate_pdf_invoice(self, course, course_price, quantity, sale_price):
"""
Generates the pdf invoice for the given course
and returns the pdf_buffer.
"""
discount_per_item = float(course_price) - sale_price / quantity
list_price = course_price - discount_per_item
items_data = [{
'item_description': course.display_name,
'quantity': quantity,
'list_price': list_price,
'discount': discount_per_item,
'item_total': quantity * list_price
}]
pdf_buffer = BytesIO()
PDFInvoice(
items_data=items_data,
item_id=str(self.id), # pylint: disable=no-member
date=datetime.now(pytz.utc),
is_invoice=True,
total_cost=float(self.total_amount),
payment_received=0,
balance=float(self.total_amount)
).generate_pdf(pdf_buffer)
return pdf_buffer
def snapshot(self):
"""Create a snapshot of the invoice.
A snapshot is a JSON-serializable representation
of the invoice's state, including its line items
and associated transactions (payments/refunds).
This is useful for saving the history of changes
to the invoice.
Returns:
dict
"""
return {
'internal_reference': self.internal_reference,
'customer_reference': self.customer_reference_number,
'is_valid': self.is_valid,
'contact_info': {
'company_name': self.company_name,
'company_contact_name': self.company_contact_name,
'company_contact_email': self.company_contact_email,
'recipient_name': self.recipient_name,
'recipient_email': self.recipient_email,
'address_line_1': self.address_line_1,
'address_line_2': self.address_line_2,
'address_line_3': self.address_line_3,
'city': self.city,
'state': self.state,
'zip': self.zip,
'country': self.country,
},
'items': [
item.snapshot()
for item in InvoiceItem.objects.filter(invoice=self).select_subclasses()
],
'transactions': [
trans.snapshot()
for trans in InvoiceTransaction.objects.filter(invoice=self)
],
}
def __unicode__(self):
label = (
unicode(self.internal_reference)
if self.internal_reference
else u"No label"
)
created = (
self.created.strftime("%Y-%m-%d") # pylint: disable=no-member
if self.created
else u"No date"
)
return u"{label} ({date_created})".format(
label=label, date_created=created
)
INVOICE_TRANSACTION_STATUSES = (
# A payment/refund is in process, but money has not yet been transferred
('started', 'started'),
# A payment/refund has completed successfully
# This should be set ONLY once money has been successfully exchanged.
('completed', 'completed'),
# A payment/refund was promised, but was cancelled before
# money had been transferred. An example would be
# cancelling a refund check before the recipient has
# a chance to deposit it.
('cancelled', 'cancelled')
)
class InvoiceTransaction(TimeStampedModel):
"""Record payment and refund information for invoices.
There are two expected use cases:
1) We send an invoice to someone, and they send us a check.
We then manually create an invoice transaction to represent
the payment.
2) We send an invoice to someone, and they pay us. Later, we
need to issue a refund for the payment. We manually
create a transaction with a negative amount to represent
the refund.
"""
invoice = models.ForeignKey(Invoice)
amount = models.DecimalField(
default=0.0, decimal_places=2, max_digits=30,
help_text=ugettext_lazy(
"The amount of the transaction. Use positive amounts for payments"
" and negative amounts for refunds."
)
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
comments = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy("Optional: provide additional information for this transaction")
)
status = models.CharField(
max_length=32,
default='started',
choices=INVOICE_TRANSACTION_STATUSES,
help_text=ugettext_lazy(
"The status of the payment or refund. "
"'started' means that payment is expected, but money has not yet been transferred. "
"'completed' means that the payment or refund was received. "
"'cancelled' means that payment or refund was expected, but was cancelled before money was transferred. "
)
)
created_by = models.ForeignKey(User)
last_modified_by = models.ForeignKey(User, related_name='last_modified_by_user')
@classmethod
def get_invoice_transaction(cls, invoice_id):
"""
if found Returns the Invoice Transaction object for the given invoice_id
else returns None
"""
try:
return cls.objects.get(Q(invoice_id=invoice_id), Q(status='completed') | Q(status='refunded'))
except InvoiceTransaction.DoesNotExist:
return None
@classmethod
def get_total_amount_of_paid_course_invoices(cls, course_key):
"""
returns the total amount of the paid invoices.
"""
result = cls.objects.filter(amount__gt=0, invoice__course_id=course_key, status='completed').aggregate(
total=Sum('amount')
)
total = result.get('total', 0)
return total if total else 0
def snapshot(self):
"""Create a snapshot of the invoice transaction.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'amount': unicode(self.amount),
'currency': self.currency,
'comments': self.comments,
'status': self.status,
'created_by': self.created_by.username, # pylint: disable=no-member
'last_modified_by': self.last_modified_by.username # pylint: disable=no-member
}
class InvoiceItem(TimeStampedModel):
"""
This is the basic interface for invoice items.
Each invoice item represents a "line" in the invoice.
For example, in an invoice for course registration codes,
there might be an invoice item representing 10 registration
codes for the DemoX course.
"""
objects = InheritanceManager()
invoice = models.ForeignKey(Invoice, db_index=True)
qty = models.IntegerField(
default=1,
help_text=ugettext_lazy("The number of items sold.")
)
unit_price = models.DecimalField(
default=0.0,
decimal_places=2,
max_digits=30,
help_text=ugettext_lazy("The price per item sold, including discounts.")
)
currency = models.CharField(
default="usd",
max_length=8,
help_text=ugettext_lazy("Lower-case ISO currency codes")
)
def snapshot(self):
"""Create a snapshot of the invoice item.
The returned dictionary is JSON-serializable.
Returns:
dict
"""
return {
'qty': self.qty,
'unit_price': unicode(self.unit_price),
'currency': self.currency
}
class CourseRegistrationCodeInvoiceItem(InvoiceItem):
"""
This is an invoice item that represents a payment for
a course registration.
"""
course_id = CourseKeyField(max_length=128, db_index=True)
def snapshot(self):
"""Create a snapshot of the invoice item.
This is the same as a snapshot for other invoice items,
with the addition of a `course_id` field.
Returns:
dict
"""
snapshot = super(CourseRegistrationCodeInvoiceItem, self).snapshot()
snapshot['course_id'] = unicode(self.course_id)
return snapshot
class InvoiceHistory(models.Model):
"""History of changes to invoices.
This table stores snapshots of invoice state,
including the associated line items and transactions
(payments/refunds).
Entries in the table are created, but never deleted
or modified.
We use Django signals to save history entries on change
events. These signals are fired within a database
transaction, so the history record is created only
if the invoice change is successfully persisted.
"""
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
invoice = models.ForeignKey(Invoice)
# JSON-serialized representation of the current state
# of the invoice, including its line items and
# transactions (payments/refunds).
snapshot = models.TextField(blank=True)
@classmethod
def save_invoice_snapshot(cls, invoice):
"""Save a snapshot of the invoice's current state.
Arguments:
invoice (Invoice): The invoice to save.
"""
cls.objects.create(
invoice=invoice,
snapshot=json.dumps(invoice.snapshot())
)
@staticmethod
def snapshot_receiver(sender, instance, **kwargs): # pylint: disable=unused-argument
"""Signal receiver that saves a snapshot of an invoice.
Arguments:
sender: Not used, but required by Django signals.
instance (Invoice, InvoiceItem, or InvoiceTransaction)
"""
if isinstance(instance, Invoice):
InvoiceHistory.save_invoice_snapshot(instance)
elif hasattr(instance, 'invoice'):
InvoiceHistory.save_invoice_snapshot(instance.invoice)
class Meta(object): # pylint: disable=missing-docstring
get_latest_by = "timestamp"
# Hook up Django signals to record changes in the history table.
# We record any change to an invoice, invoice item, or transaction.
# We also record any deletion of a transaction, since users can delete
# transactions via Django admin.
# Note that we need to include *each* InvoiceItem subclass
# here, since Django signals do not fire automatically for subclasses
# of the "sender" class.
post_save.connect(InvoiceHistory.snapshot_receiver, sender=Invoice)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=CourseRegistrationCodeInvoiceItem)
post_save.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
post_delete.connect(InvoiceHistory.snapshot_receiver, sender=InvoiceTransaction)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(auto_now_add=True)
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
mode_slug = models.CharField(max_length=100, null=True)
is_valid = models.BooleanField(default=True)
# For backwards compatibility, we maintain the FK to "invoice"
# In the future, we will remove this in favor of the FK
# to "invoice_item" (which can be used to look up the invoice).
invoice = models.ForeignKey(Invoice, null=True)
invoice_item = models.ForeignKey(CourseRegistrationCodeInvoiceItem, null=True)
@classmethod
def order_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via bulk purchase scenario.
"""
return cls.objects.filter(order__isnull=False, course_id=course_id)
@classmethod
def invoice_generated_registration_codes(cls, course_id):
"""
Returns the registration codes that were generated
via invoice.
"""
return cls.objects.filter(invoice__isnull=False, course_id=course_id)
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(auto_now_add=True, null=True)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def registration_code_used_for_enrollment(cls, course_enrollment):
"""
Returns RegistrationCodeRedemption object if registration code
has been used during the course enrollment else Returns None.
"""
# theoretically there could be more than one (e.g. someone self-unenrolls
# then re-enrolls with a different regcode)
reg_codes = cls.objects.filter(course_enrollment=course_enrollment).order_by('-redeemed_at')
if reg_codes:
# return the first one. In all normal use cases of registration codes
# the user will only have one
return reg_codes[0]
return None
@classmethod
def is_registration_code_redeemed(cls, course_reg_code):
"""
Checks the existence of the registration code
in the RegistrationCodeRedemption
"""
return cls.objects.filter(registration_code__code=course_reg_code).exists()
@classmethod
def get_registration_code_redemption(cls, code, course_id):
"""
Returns the registration code redemption object if found else returns None.
"""
try:
code_redemption = cls.objects.get(registration_code__code=code, registration_code__course_id=course_id)
except cls.DoesNotExist:
code_redemption = None
return code_redemption
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user): # pylint: disable=invalid-name
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
return code_redemption
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_query_set(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_query_set().filter(is_active=True)
def get_query_set(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_query_set()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
is_active = models.BooleanField(default=True)
expiration_date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
@property
def display_expiry_date(self):
"""
return the coupon expiration date in the readable format
"""
return (self.expiration_date - timedelta(days=1)).strftime("%B %d, %Y") if self.expiration_date else None
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def remove_code_redemption_from_item(cls, item, user):
"""
If an item removed from shopping cart then we will remove
the corresponding redemption info of coupon code
"""
order_item_course_id = getattr(item, 'course_id')
try:
# Try to remove redemption information of coupon code, If exist.
coupon_redemption = cls.objects.get(
user=user,
coupon__course_id=order_item_course_id if order_item_course_id else CourseKeyField.Empty,
order=item.order_id
)
coupon_redemption.delete()
log.info(
u'Coupon "%s" redemption entry removed for user "%s" for order item "%s"',
coupon_redemption.coupon.code,
user,
str(item.id),
)
except CouponRedemption.DoesNotExist:
log.debug(u'Code redemption does not exist for order item id=%s.', str(item.id))
@classmethod
def remove_coupon_redemption_from_cart(cls, user, cart):
"""
This method delete coupon redemption
"""
coupon_redemption = cls.objects.filter(user=user, order=cart)
if coupon_redemption:
coupon_redemption.delete()
log.info(u'Coupon redemption entry removed for user %s for order %s', user, cart.id)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception(
u"Coupon redemption already exist for user '%s' against order id '%s'",
order.user.username,
order.id,
)
raise MultipleCouponsNotAllowedException
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info(
u"Discount generated for user %s against order id '%s'",
order.user.username,
order.id,
)
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
@classmethod
def get_top_discount_codes_used(cls, course_id):
"""
Returns the top discount codes used.
QuerySet = [
{
'coupon__percentage_discount': 22,
'coupon__code': '12',
'coupon__used_count': '2',
},
{
...
}
]
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).values(
'coupon__code', 'coupon__percentage_discount'
).annotate(coupon__used_count=Count('coupon__code')).order_by('-coupon__used_count')
@classmethod
def get_total_coupon_code_purchases(cls, course_id):
"""
returns total seats purchases using coupon codes
"""
return cls.objects.filter(order__status='purchased', coupon__course_id=course_id).aggregate(Count('coupon'))
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
course_enrollment = models.ForeignKey(CourseEnrollment, null=True)
@classmethod
def get_self_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the count of paid_course items filter by course_id and status.
"""
return cls.objects.filter(course_id=course_key, status=status).count()
@classmethod
def get_course_item_for_user_enrollment(cls, user, course_id, course_enrollment):
"""
Returns PaidCourseRegistration object if user has payed for
the course enrollment else Returns None
"""
try:
return cls.objects.filter(course_id=course_id, user=user, course_enrollment=course_enrollment,
status='purchased').latest('id')
except PaidCourseRegistration.DoesNotExist:
return None
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning(
u"User %s tried to add PaidCourseRegistration for course %s, already in cart id %s",
order.user.email,
course_id,
order.id,
)
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.list_price = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
# enroll in course and link to the enrollment_id
self.course_enrollment = CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
self.save()
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = _(
u"Please visit your {link_start}dashboard{link_end} "
u"to see your new course."
).format(
link_start=u'<a href="{url}">'.format(url=reverse('dashboard')),
link_end=u'</a>',
)
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the Order Item is associated with a course, additional fields will be populated with
course information. If there is a mode associated, the mode data is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(PaidCourseRegistration, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItem(OrderItem):
"""
This is an inventory item for paying for
generating course registration codes
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def get_bulk_purchased_seat_count(cls, course_key, status='purchased'):
"""
returns the sum of bulk purchases seats.
"""
total = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(total=Sum('qty'))
if result['total'] is not None:
total = result['total']
return total
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("courseregcodeitem")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key, status='purchased'):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status=status).aggregate(
total=Sum('unit_cost', field='qty * unit_cost')
)
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, qty, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None): # pylint: disable=arguments-differ
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks:
# actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't.
course = modulestore().get_course(course_id)
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(CourseRegCodeItem, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id) # pylint: disable=unused-variable
item.status = order.status
item.mode = course_mode.slug
item.unit_cost = cost
item.list_price = cost
item.qty = qty
item.line_desc = _(u'Enrollment codes for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
The purchase is completed, this OrderItem type will generate Registration Codes that will
be redeemed by users
"""
if not modulestore().has_course(self.course_id):
msg = u"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id)
log.error(msg)
raise PurchasedCallbackException(msg)
total_registration_codes = int(self.qty)
# we need to import here because of a circular dependency
# we should ultimately refactor code to have save_registration_code in this models.py
# file, but there's also a shared dependency on a random string generator which
# is in another PR (for another feature)
from instructor.views.api import save_registration_code
for i in range(total_registration_codes): # pylint: disable=unused-variable
save_registration_code(self.user, self.course_id, self.mode, order=self.order)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=no-member
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return CourseRegCodeItemAnnotation.objects.get(course_id=self.course_id).annotation
except CourseRegCodeItemAnnotation.DoesNotExist:
return u""
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the OrderItem is associated with a course, additional fields will be populated with
course information. If a mode is available, it will be included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CourseRegCodeItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class CourseRegCodeItemAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, skip_refund=False, **kwargs): # pylint: disable=no-self-argument,unused-argument
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if (not course_enrollment.refundable()) or skip_refund:
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error(
u"Matching CertificateItem not found while trying to refund. User %s, Course %s",
course_enrollment.user,
course_enrollment.course_id,
)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.refund()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
msg = u"Mode {mode} does not exist for {course_id}".format(mode=mode, course_id=course_id)
log.error(msg)
raise InvalidCartItem(
_(u"Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id)
)
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
item.list_price = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
def additional_instruction_text(self):
verification_reminder = ""
is_enrollment_mode_verified = self.course_enrollment.is_verified_enrollment() # pylint: disable=E1101
if is_enrollment_mode_verified:
domain = microsite.get_value('SITE_NAME', settings.SITE_NAME)
path = reverse('verify_student_verify_now', kwargs={'course_id': unicode(self.course_id)})
verification_url = "http://{domain}{path}".format(domain=domain, path=path)
verification_reminder = _(
"If you haven't verified your identity yet, please start the verification process ({verification_url})."
).format(verification_url=verification_url)
refund_reminder = _(
"You have up to two weeks into the course to unenroll and receive a full refund."
"To receive your refund, contact {billing_email}. "
"Please include your order number in your email. "
"Please do NOT include your credit card information."
).format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL
)
# Need this to be unicode in case the reminder strings
# have been translated and contain non-ASCII unicode
return u"{verification_reminder} {refund_reminder}".format(
verification_reminder=verification_reminder,
refund_reminder=refund_reminder
)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the CertificateItem is associated with a course, additional fields will be populated with
course information. If there is a mode associated with the certificate, it is included in the SKU.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(CertificateItem, self).analytics_data()
sku = data['sku']
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
if self.mode:
data['sku'] = sku + u'.' + unicode(self.mode)
return data
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
pass
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
def additional_instruction_text(self, **kwargs): # pylint: disable=unused-argument
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=settings.PLATFORM_NAME)
@classmethod
def _line_item_description(cls, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
msg = u"Could not find a course with the ID '{course_id}'".format(course_id=course_id)
log.error(msg)
raise CourseDoesNotExistException(
_(u"Could not find a course with the ID '{course_id}'").format(course_id=course_id)
)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(platform_name=settings.PLATFORM_NAME)
@property
def single_item_receipt_context(self):
return {
'receipt_has_donation_item': True,
}
def analytics_data(self):
"""Simple function used to construct analytics data for the OrderItem.
If the donation is associated with a course, additional fields will be populated with
course information. When no name or category is specified by the implementation, the
platform name is used as a default value for required event fields, to declare that
the Order is specific to the platform, rather than a specific product name or category.
Returns
A dictionary containing analytics data for this OrderItem.
"""
data = super(Donation, self).analytics_data()
if self.course_id != CourseKeyField.Empty:
data['name'] = unicode(self.course_id)
data['category'] = unicode(self.course_id.org)
else:
data['name'] = settings.PLATFORM_NAME
data['category'] = settings.PLATFORM_NAME
return data
@property
def pdf_receipt_display_name(self):
"""
How to display this item on a PDF printed receipt file.
"""
return self._line_item_description(course_id=self.course_id)
|
adoosii/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 89,116
|
[
"VisIt"
] |
ac8ad8f7f39b4fe96f3190fce12b2356c63d0d6748d82f9a27e7124c70f193bb
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMzid(RPackage):
"""A parser for mzIdentML files implemented using the XML package. The
parser tries to be general and able to handle all types of mzIdentML
files with the drawback of having less 'pretty' output than a vendor
specific parser. Please contact the maintainer with any problems and
supply an mzIdentML file so the problems can be fixed quickly."""
homepage = "https://www.bioconductor.org/packages/mzID/"
url = "https://git.bioconductor.org/packages/mzID"
version('1.14.0', git='https://git.bioconductor.org/packages/mzID', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.14.0')
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-mzid/package.py
|
Python
|
lgpl-2.1
| 2,260
|
[
"Bioconductor"
] |
787bd08e8d1d346671cf7f4f242d045a2e70f5d810399094cfcfdccc54c4ca91
|
# Copyright (C) 2015, Carlo de Franchis <[email protected]>
# Copyright (C) 2015, Gabriele Facciolo <[email protected]>
# Copyright (C) 2015, Enric Meinhardt <[email protected]>
# Copyright (C) 2015, Julien Michel <[email protected]>
import os
import numpy as np
from s2plib import common
from s2plib.config import cfg
def rectify_secondary_tile_only(algo):
if algo in ['tvl1_2d']:
return True
else:
return False
def compute_disparity_map(im1, im2, disp, mask, algo, disp_min=None,
disp_max=None, extra_params=''):
"""
Runs a block-matching binary on a pair of stereo-rectified images.
Args:
im1, im2: rectified stereo pair
disp: path to the output diparity map
mask: path to the output rejection mask
algo: string used to indicate the desired binary. Currently it can be
one among 'hirschmuller02', 'hirschmuller08',
'hirschmuller08_laplacian', 'hirschmuller08_cauchy', 'sgbm',
'msmw', 'tvl1', 'mgm', 'mgm_multi' and 'micmac'
disp_min : smallest disparity to consider
disp_max : biggest disparity to consider
extra_params: optional string with algorithm-dependent parameters
"""
if rectify_secondary_tile_only(algo) is False:
disp_min = [disp_min]
disp_max = [disp_max]
# limit disparity bounds
np.alltrue(len(disp_min) == len(disp_max))
for dim in range(len(disp_min)):
if disp_min[dim] is not None and disp_max[dim] is not None:
image_size = common.image_size_gdal(im1)
if disp_max[dim] - disp_min[dim] > image_size[dim]:
center = 0.5 * (disp_min[dim] + disp_max[dim])
disp_min[dim] = int(center - 0.5 * image_size[dim])
disp_max[dim] = int(center + 0.5 * image_size[dim])
# round disparity bounds
if disp_min[dim] is not None:
disp_min[dim] = int(np.floor(disp_min[dim]))
if disp_max is not None:
disp_max[dim] = int(np.ceil(disp_max[dim]))
if rectify_secondary_tile_only(algo) is False:
disp_min = disp_min[0]
disp_max = disp_max[0]
# define environment variables
env = os.environ.copy()
env['OMP_NUM_THREADS'] = str(cfg['omp_num_threads'])
# call the block_matching binary
if algo == 'hirschmuller02':
bm_binary = 'subpix.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
# extra_params: LoG(0) regionRadius(3)
# LoG: Laplacian of Gaussian preprocess 1:enabled 0:disabled
# regionRadius: radius of the window
if algo == 'hirschmuller08':
bm_binary = 'callSGBM.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
# extra_params: regionRadius(3) P1(default) P2(default) LRdiff(1)
# regionRadius: radius of the window
# P1, P2 : regularization parameters
# LRdiff: maximum difference between left and right disparity maps
if algo == 'hirschmuller08_laplacian':
bm_binary = 'callSGBM_lap.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
if algo == 'hirschmuller08_cauchy':
bm_binary = 'callSGBM_cauchy.sh'
common.run('{0} {1} {2} {3} {4} {5} {6} {7}'.format(bm_binary, im1, im2, disp, mask, disp_min,
disp_max, extra_params))
if algo == 'sgbm':
# opencv sgbm function implements a modified version of Hirschmuller's
# Semi-Global Matching (SGM) algorithm described in "Stereo Processing
# by Semiglobal Matching and Mutual Information", PAMI, 2008
p1 = 8 # penalizes disparity changes of 1 between neighbor pixels
p2 = 32 # penalizes disparity changes of more than 1
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
win = 3 # matched block size. It must be a positive odd number
lr = 1 # maximum difference allowed in the left-right disparity check
cost = common.tmpfile('.tif')
common.run('sgbm {} {} {} {} {} {} {} {} {} {}'.format(im1, im2,
disp, cost,
disp_min,
disp_max,
win, p1, p2, lr))
# create rejection mask (0 means rejected, 1 means accepted)
# keep only the points that are matched and present in both input images
common.run('plambda {0} "x 0 join" | backflow - {2} | plambda {0} {1} - "x isfinite y isfinite z isfinite and and" -o {3}'.format(disp, im1, im2, mask))
if algo == 'tvl1':
tvl1 = 'callTVL1.sh'
common.run('{0} {1} {2} {3} {4}'.format(tvl1, im1, im2, disp, mask),
env)
if algo == 'tvl1_2d':
tvl1 = 'callTVL1.sh'
common.run('{0} {1} {2} {3} {4} {5}'.format(tvl1, im1, im2, disp, mask,
1), env)
if algo == 'msmw':
bm_binary = 'iip_stereo_correlation_multi_win2'
common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o 0.25 -f 0 -P 32 -m {1} -M {2} {3} {4} {5} {6}'.format(bm_binary, disp_min, disp_max, im1, im2, disp, mask))
if algo == 'msmw2':
bm_binary = 'iip_stereo_correlation_multi_win2_newversion'
common.run('{0} -i 1 -n 4 -p 4 -W 5 -x 9 -y 9 -r 1 -d 1 -t -1 -s 0 -b 0 -o -0.25 -f 0 -P 32 -D 0 -O 25 -c 0 -m {1} -M {2} {3} {4} {5} {6}'.format(
bm_binary, disp_min, disp_max, im1, im2, disp, mask), env)
if algo == 'msmw3':
bm_binary = 'msmw'
common.run('{0} -m {1} -M {2} -il {3} -ir {4} -dl {5} -kl {6}'.format(
bm_binary, disp_min, disp_max, im1, im2, disp, mask))
if algo == 'mgm':
env['MEDIAN'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
env['TSGM'] = '3'
conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
common.run('{0} -r {1} -R {2} -s vfit -t census -O 8 {3} {4} {5} -confidence_consensusL {6}'.format('mgm',
disp_min,
disp_max,
im1, im2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if algo == 'mgm_multi_lsd':
ref = im1
sec = im2
wref = common.tmpfile('.tif')
wsec = common.tmpfile('.tif')
# TODO TUNE LSD PARAMETERS TO HANDLE DIRECTLY 12 bits images?
# image dependent weights based on lsd segments
image_size = common.image_size_gdal(ref)
common.run('qauto %s | \
lsd - - | \
cut -d\' \' -f1,2,3,4 | \
pview segments %d %d | \
plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(ref,image_size[0], image_size[1],wref))
# image dependent weights based on lsd segments
image_size = common.image_size_gdal(sec)
common.run('qauto %s | \
lsd - - | \
cut -d\' \' -f1,2,3,4 | \
pview segments %d %d | \
plambda - "255 x - 255 / 2 pow 0.1 fmax" -o %s'%(sec,image_size[0], image_size[1],wsec))
env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
env['SUBPIX'] = '2'
env['MEDIAN'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
regularity_multiplier = cfg['stereo_regularity_multiplier']
# increasing these numbers compensates the loss of regularity after incorporating LSD weights
P1 = 12*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels
P2 = 48*regularity_multiplier # penalizes disparity changes of more than 1
conf = disp+'.confidence.tif'
common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census -O 8 -P1 {7} -P2 {8} -wl {3} -wr {4} -confidence_consensusL {10} {5} {6} {9}'.format('mgm_multi',
disp_min,
disp_max,
wref,wsec,
im1, im2,
P1, P2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if algo == 'mgm_multi':
env['REMOVESMALLCC'] = str(cfg['stereo_speckle_filter'])
env['MINDIFF'] = '1'
env['CENSUS_NCC_WIN'] = str(cfg['census_ncc_win'])
env['SUBPIX'] = '2'
# it is required that p2 > p1. The larger p1, p2, the smoother the disparity
regularity_multiplier = cfg['stereo_regularity_multiplier']
P1 = 8*regularity_multiplier # penalizes disparity changes of 1 between neighbor pixels
P2 = 32*regularity_multiplier # penalizes disparity changes of more than 1
conf = '{}_confidence.tif'.format(os.path.splitext(disp)[0])
common.run('{0} -r {1} -R {2} -S 6 -s vfit -t census {3} {4} {5} -confidence_consensusL {6}'.format('mgm_multi',
disp_min,
disp_max,
im1, im2,
disp, conf),
env)
# produce the mask: rejected pixels are marked with nan of inf in disp
# map
common.run('plambda {0} "isfinite" -o {1}'.format(disp, mask))
if (algo == 'micmac'):
# add micmac binaries to the PATH environment variable
s2p_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
micmac_bin = os.path.join(s2p_dir, 'bin', 'micmac', 'bin')
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + micmac_bin
# prepare micmac xml params file
micmac_params = os.path.join(s2p_dir, '3rdparty', 'micmac_params.xml')
work_dir = os.path.dirname(os.path.abspath(im1))
common.run('cp {0} {1}'.format(micmac_params, work_dir))
# run MICMAC
common.run('MICMAC {0:s}'.format(os.path.join(work_dir, 'micmac_params.xml')))
# copy output disp map
micmac_disp = os.path.join(work_dir, 'MEC-EPI',
'Px1_Num6_DeZoom1_LeChantier.tif')
disp = os.path.join(work_dir, 'rectified_disp.tif')
common.run('cp {0} {1}'.format(micmac_disp, disp))
# compute mask by rejecting the 10% of pixels with lowest correlation score
micmac_cost = os.path.join(work_dir, 'MEC-EPI',
'Correl_LeChantier_Num_5.tif')
mask = os.path.join(work_dir, 'rectified_mask.png')
common.run('plambda {0} "x x%q10 < 0 255 if" -o {1}'.format(micmac_cost, mask))
|
dyoussef/s2p
|
s2plib/block_matching.py
|
Python
|
agpl-3.0
| 12,358
|
[
"Gaussian"
] |
1681b330d49e8d3df77d2397225927f9029564fc5c293b64df3852a0f87c45ec
|
__author__ = 'bptripp'
from os import listdir
from os.path import isfile, join
import time
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from PIL import Image
from scipy.optimize import bisect
from quaternion import angle_between_quaterions, to_quaternion
def get_random_points(n, radius, surface=False):
point_directions = np.random.randn(3, n)
norms = np.sum(point_directions**2, axis=0)**.5
points = radius * point_directions / norms
if not surface:
# points = points * np.random.rand(n)**(1./3.)
palm = .035 #TODO: whoops, this is in metres, not fraction of radius
points = points * (palm + (1-palm)*np.random.rand(n))
return points
def get_random_angles(n, std=np.pi/8.):
"""
:param n: Number of angles needed
:return: Random angles in restricted ranges, meant as deviations in perspective around
looking staight at something.
"""
angles = std*np.random.randn(3, n)
angles[2,:] = 2*np.pi*np.random.rand(1, n)
return angles
def get_rotation_matrix(point, angle):
"""
:param point: Location of camera
:param angle: Not what you expect: this is a list of angles relative to looking
at (0,0,0), about world-z (azimuth), camera-y (elevation), and camera-z (roll).
Random samples are produced by get_random_angles().
:return: just what you expect
"""
z = -point #location of (0,0,0) relative to point
alpha = np.arctan(z[1]/z[0])
if z[0] < 0: alpha = alpha + np.pi
if alpha < 0: alpha = alpha + 2.*np.pi
alpha = alpha + angle[0]
# rotate by alpha about z
Rz = np.array([[np.cos(alpha), -np.sin(alpha), 0], [np.sin(alpha), np.cos(alpha), 0], [0, 0, 1]])
# find elevation in new coordinates
beta = -np.arctan(np.sqrt(z[0]**2+z[1]**2)/z[2])
if z[2] < 0: beta = beta + np.pi
if beta < 0: beta = beta + 2.*np.pi
beta = beta + angle[1]
# rotate by beta about y
Ry = np.array([[np.cos(beta), 0, -np.sin(beta)], [0, 1, 0], [np.sin(beta), 0, np.cos(beta)]])
gamma = angle[2]
Rz2 = np.array([[np.cos(-gamma), -np.sin(-gamma), 0], [np.sin(-gamma), np.cos(-gamma), 0], [0, 0, 1]])
return np.dot(Rz, np.dot(Ry, Rz2))
def check_rotation_matrix(scatter=False):
from mpl_toolkits.mplot3d import axes3d, Axes3D
n = 6
points = get_random_points(n, 2)
angles = get_random_angles(n)
# point = np.array([1,1e-6,1e-6])
# point = np.array([1e-6,1,1e-6])
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
for i in range(points.shape[1]):
point = points[:,i]
angle = angles[:,i]
if not scatter:
angle[0] = 0
angle[1] = 0
R = get_rotation_matrix(point, angle)
ax.scatter(0, 0, 0, color='b')
ax.scatter(point[0], point[1], point[2], color='r')
x = np.dot(R, np.array([1,0,0]))
y = np.dot(R, np.array([0,1,0]))
z = np.dot(R, np.array([0,0,1]))
ax.plot([point[0],point[0]+x[0]], [point[1],point[1]+x[1]], [point[2],point[2]+x[2]], color='r')
ax.plot([point[0],point[0]+y[0]], [point[1],point[1]+y[1]], [point[2],point[2]+y[2]], color='g')
ax.plot([point[0],point[0]+z[0]], [point[1],point[1]+z[1]], [point[2],point[2]+z[2]], color='b')
plt.xlabel('x')
plt.ylabel('y')
plt.ylabel('z')
if not scatter:
plt.title('blue axes should point AT blue dot (zero)')
else:
plt.title('blue axes should point NEAR blue dot (zero)')
plt.show()
def check_depth_from_random_perspective():
from depthmap import loadOBJ, Display
filename = '../data/obj_files/24_bowl-02-Mar-2016-07-03-29.obj'
verts, faces = loadOBJ(filename)
# put vertical centre at zero
verts = np.array(verts)
minz = np.min(verts, axis=0)[2]
maxz = np.max(verts, axis=0)[2]
verts[:,2] = verts[:,2] - (minz+maxz)/2
n = 6
points = get_random_points(n, .25)
angles = get_random_angles(n)
point = points[:,0]
angle = angles[:,0]
rot = get_rotation_matrix(point, angle)
im_width = 80
d = Display(imsize=(im_width,im_width))
d.set_camera_position(point, rot, .5)
d.set_mesh(verts, faces)
depth = d.read_depth()
d.close()
X = np.arange(0, im_width)
Y = np.arange(0, im_width)
X, Y = np.meshgrid(X, Y)
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure()
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot_wireframe(X, Y, depth)
ax.set_xlabel('x')
plt.show()
def find_vertical(point):
"""
Find new angle[2] so that camera-up points up. In terms of rotation matrix,
R[2,0] should be 0 (x-axis horizontal) and R[2,1] should be positive (pointing
up rather than down).
"""
def f(gamma):
return get_rotation_matrix(point, np.array([0, 0, gamma]))[2][0]
gamma = bisect(f, 0, np.pi)
# if get_rotation_matrix(point, np.array([0, 0, gamma]))[2][1] < 0:
if get_rotation_matrix(point, np.array([0, 0, gamma]))[2][1] > 0:
gamma = gamma + np.pi
return gamma
def check_find_vertical():
n = 3
points = get_random_points(n, .35, surface=True)
for i in range(n):
point = points[:,i]
gamma = find_vertical(point)
rot = get_rotation_matrix(point, np.array([0, 0, gamma]))
print(rot)
# if np.abs(rot[2,0] > 1e-6) or rot[2,1] < 0:
if np.abs(rot[2,0] > 1e-6) or rot[2,1] > 0:
print('error with gamma: ' + str(gamma) + ' should be 0: ' + str(rot[2,0]) + ' should be +ve: ' + str(rot[2,1]))
def plot_random_samples():
n = 1000
points = get_random_points(n, .25)
angles = get_random_angles(n)
from mpl_toolkits.mplot3d import axes3d, Axes3D
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(1,2,1,projection='3d')
ax.scatter(points[0,:], points[1,:], points[2,:])
ax = fig.add_subplot(1,2,2,projection='3d')
ax.scatter(angles[0,:], angles[1,:], angles[2,:])
plt.show()
def get_perspectives(obj_filename, points, angles, im_width=80, near_clip=.25, far_clip=0.8, fov=45, camera_offset=.45, target_point=None):
from depthmap import loadOBJ, Display, get_distance
verts, faces = loadOBJ(obj_filename)
# put vertical centre at zero
verts = np.array(verts)
min_bounding_box = np.min(verts, axis=0)
max_bounding_box = np.max(verts, axis=0)
# set bounding box centre to 0,0,0
verts[:,0] = verts[:,0] - (min_bounding_box[0]+max_bounding_box[0])/2.
verts[:,1] = verts[:,1] - (min_bounding_box[1]+max_bounding_box[1])/2.
verts[:,2] = verts[:,2] - (min_bounding_box[2]+max_bounding_box[2])/2.
if target_point is not None:
verts[:,0] = verts[:,0] - target_point[0]
verts[:,1] = verts[:,1] - target_point[1]
verts[:,2] = verts[:,2] - target_point[2]
d = Display(imsize=(im_width,im_width))
d.set_perspective(fov=fov, near_clip=near_clip, far_clip=far_clip)
perspectives = np.zeros((points.shape[1],im_width,im_width), dtype='float32')
for i in range(points.shape[1]):
point = points[:,i]
angle = angles[:,i]
rot = get_rotation_matrix(point, angle)
d.set_camera_position(point, rot, camera_offset)
d.set_mesh(verts, faces)
depth = d.read_depth()
distance = get_distance(depth, near_clip, far_clip)
perspectives[i,:,:] = distance
d.close()
return perspectives
def process_directory(obj_dir, data_dir, n):
from os import listdir
from os.path import isfile, join
import time
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
points = get_random_points(n, .15)
angles = get_random_angles(n, std=0)
print(angles)
perspectives = get_perspectives(obj_filename, points, angles)
f = open(data_filename, 'wb')
cPickle.dump((points, angles, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def process_eye_directory(obj_dir, data_dir, n):
#TODO: save image files here to allow random ordering during training
from os import listdir
from os.path import isfile, join
import time
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
points = get_random_points(n, .35, surface=True) #.75m with offset
angles = np.zeros_like(points)
# Set camera-up to vertical via third angle (angle needed is always
# 3pi/4, but we'll find it numerically in case other parts of code
# change while we're not looking).
for i in range(n):
angles[2,i] = find_vertical(points[:,i])
perspectives = get_perspectives(obj_filename, points, angles, near_clip=.4, fov=30)
f = open(data_filename, 'wb')
cPickle.dump((points, angles, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def check_maps(data_dir):
"""
Checks pkl files in given directory to see if any of the depth maps they contain
are empty.
"""
from os import listdir
from os.path import isfile, join
for f in listdir(data_dir):
data_filename = join(data_dir, f)
if isfile(data_filename) and f.endswith('.pkl'):
print('Checking ' + f)
f = open(data_filename, 'rb')
(points, angles, perspectives) = cPickle.load(f)
f.close()
for i in range(perspectives.shape[0]):
sd = np.std(perspectives[i,:,:].flatten())
if sd < 1e-3:
print(' map ' + str(i) + ' is empty')
def calculate_metrics(perspectives, im_width=80, fov=45.0, camera_offset=.45):
"""
:param perspectives: numpy array of depth images of object from gripper perspective
"""
asymmetry_scale = 13.0 #TODO: calculate from camera params (13 pixels is ~5cm with default params)
from heuristic import finger_path_template, calculate_grip_metrics
finger_path = finger_path_template(fov*np.pi/180., im_width, camera_offset)
collision_template = np.zeros_like(finger_path)
collision_template[finger_path > 0] = camera_offset + 0.033
# print(np.max(collision_template))
# print(np.max(finger_path))
# plt.imshow(collision_template)
# plt.show()
metrics = []
collisions = []
for perspective in perspectives:
intersections, qualities = calculate_grip_metrics(perspective, finger_path)
q1 = qualities[0]
q2 = qualities[1]
q3 = qualities[2]
if intersections[0] is None or intersections[2] is None:
a1 = 1
else:
a1 = ((intersections[0]-intersections[2])/asymmetry_scale)**2
if intersections[1] is None or intersections[2] is None:
a2 = 1
else:
a2 = ((intersections[1]-intersections[2])/asymmetry_scale)**2
m = np.minimum((q1+q2)/1.5, q3) / (1 + (q1*a1+q2*a2) / (q1+q2+1e-6))
collision = np.max(collision_template - perspective) > 0
collisions.append(collision)
# if collision:
# m = 0
metrics.append(m)
# plt.subplot(1,2,1)
# plt.imshow(perspective)
# plt.subplot(1,2,2)
# plt.imshow(np.maximum(0, finger_path-perspective))
# print(collision)
# print((a1,a2))
# print(intersections)
# print(qualities)
# print('metric: ' + str(m))
# plt.show()
# print((intersections, qualities))
return metrics, collisions
def get_quaternion_distance(points, angles):
"""
Get new representation of camera/gripper configurations as rotation quaternions and
distances from origin, rather than 3D points and rotations about axis pointing to origin.
"""
# print(points)
# print(angles)
quaternions = []
distances = []
for point, angle in zip(points.T, angles.T):
distances.append(np.linalg.norm(point))
quaternions.append(to_quaternion(get_rotation_matrix(point, angle)))
return np.array(quaternions), np.array(distances)
def smooth_metrics(quaternions, distances, metrics):
from interpolate import interpolate
smoothed = []
for i in range(len(metrics)):
# print(i)
interpolated = interpolate(quaternions[i], distances[i], quaternions, distances, metrics,
sigma_d=.02, sigma_a=(16*np.pi/180))
smoothed.append(interpolated)
# print(interpolated - metrics[one])
return smoothed
def load_target_points(filename):
objects = []
indices = []
points = []
for line in open(filename, "r"):
vals = line.translate(None, '"\n').split(',')
assert len(vals) == 5
objects.append(vals[0])
indices.append(int(vals[1]))
points.append([float(vals[2]), float(vals[3]), float(vals[4])])
return objects, indices, points
def get_target_points_for_object(objects, indices, points, object):
indices_for_object = []
points_for_object = []
for o, i, p in zip(objects, indices, points):
if o == object:
indices_for_object.append(i)
points_for_object.append(p)
return np.array(indices_for_object), np.array(points_for_object)
def check_target_points():
objects, indices, points = load_target_points('../../grasp-conv/data/obj-points.csv')
print(objects)
print(indices)
print(points)
indices, points = get_target_points_for_object(objects, indices, points, '28_Spatula_final-11-Nov-2015-14-22-01.obj')
print(indices)
print(points)
def check_metrics():
# points, angles, metrics, collisions = calculate_metrics('../../grasp-conv/data/perspectives/28_Spatula_final-11-Nov-2015-14-22-01.pkl')
# with open('spatula-perspectives.pkl', 'wb') as f:
# cPickle.dump((points, angles, metrics, collisions), f)
with open('spatula-perspectives.pkl', 'rb') as f:
(points, angles, metrics, collisions) = cPickle.load(f)
metrics = np.array(metrics)
smoothed = smooth_metrics(points, angles, metrics)
with open('spatula-perspectives-smoothed.pkl', 'wb') as f:
cPickle.dump((points, angles, metrics, collisions, smoothed), f)
plt.hist(metrics, bins=50)
plt.show()
def make_grip_perspective_depths(obj_dir, data_dir, target_points_file, n=1000):
objects, indices, points = load_target_points(target_points_file)
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'):
data_filename = join(data_dir, f[:-4] + '.pkl')
if isfile(data_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
target_indices, target_points = get_target_points_for_object(objects, indices, points, f)
start_time = time.time()
#TODO: is there any reason to make points & angles these the same or different across targets?
gripper_points = get_random_points(n, .15)
gripper_angles = get_random_angles(n, std=0)
perspectives = []
for target_point in target_points:
print(' ' + str(target_point))
p = get_perspectives(obj_filename, gripper_points, gripper_angles, target_point=target_point)
perspectives.append(p)
f = open(data_filename, 'wb')
cPickle.dump((gripper_points, gripper_angles, target_indices, target_points, perspectives), f)
f.close()
print(' ' + str(time.time()-start_time) + 's')
def make_metrics(perspective_dir, metric_dir):
"""
We'll store in separate pkl files per object to allow incremental processing, even through results
won't take much memory.
"""
for f in listdir(perspective_dir):
perspective_filename = join(perspective_dir, f)
if isfile(perspective_filename) and f.endswith('.pkl'):
metric_filename = join(metric_dir, f[:-4] + '-metrics.pkl')
if isfile(metric_filename):
print('Skipping ' + f)
else:
print('Processing ' + f)
start_time = time.time()
with open(perspective_filename) as perspective_file:
gripper_points, gripper_angles, target_indices, target_points, perspectives = cPickle.load(perspective_file)
print('unpickle: ' + str(time.time() - start_time))
quaternions, distances = get_quaternion_distance(gripper_points, gripper_angles)
collisions = []
free_smoothed = []
coll_smoothed = []
for p in perspectives: # one per target point
fm, c = calculate_metrics(p)
fm = np.array(fm)
c = np.array(c)
fs = smooth_metrics(quaternions, distances, fm)
cm = fm * (1-c)
cs = smooth_metrics(quaternions, distances, cm)
collisions.append(c)
free_smoothed.append(fs)
coll_smoothed.append(cs)
f = open(metric_filename, 'wb')
cPickle.dump((gripper_points, gripper_angles, target_indices, target_points,
collisions, free_smoothed, coll_smoothed), f)
f.close()
def make_eye_perspective_depths(obj_dir, data_dir, target_points_file, n=20):
all_objects, all_target_indices, all_target_points = load_target_points(target_points_file)
camera_offset=.45
near_clip=.6
far_clip=1.0
eye_points = []
eye_angles = []
objects = []
target_indices = []
target_points = []
for f in listdir(obj_dir):
obj_filename = join(obj_dir, f)
if isfile(obj_filename) and f.endswith('.obj'): #and int(f[0]) >= 0: #TODO: set f[0] range here
print('Processing ' + f)
ti, tp= get_target_points_for_object(all_objects, all_target_indices, all_target_points, f)
objects.append(f)
target_indices.append(ti)
target_points.append(tp)
start_time = time.time()
points = get_random_points(n, .35, surface=True) #.8m with offset
# points = np.array([[ 0.00001], [0.35], [0.00001]]) # TODO: bug with x=0 and with z=0
print(points)
angles = np.zeros_like(points)
eye_points.append(points)
eye_angles.append(angles)
# Set camera-up to vertical via third angle (angle needed is always
# 3pi/4, but we'll find it numerically in case other parts of code
# change while we're not looking).
for i in range(n):
angles[2,i] = find_vertical(points[:,i])
perspectives = []
for target_index, target_point in zip(ti, tp):
print(' ' + str(target_point))
perspectives = get_perspectives(obj_filename, points, angles,
near_clip=near_clip, far_clip=far_clip, camera_offset=camera_offset,
fov=30, target_point=target_point)
for i in range(len(perspectives)):
distance = perspectives[i]
rescaled_distance = np.maximum(0, (distance-camera_offset)/(far_clip-camera_offset))
imfile = data_dir + f[:-4] + '-' + str(target_index) + '-' + str(i) + '.png'
Image.fromarray((255.0*rescaled_distance).astype('uint8')).save(imfile)
print(' ' + str(time.time()-start_time) + 's')
data_filename = join(data_dir, 'eye-perspectives-murata.pkl')
f = open(data_filename, 'wb')
cPickle.dump((objects, target_indices, target_points, eye_points, eye_angles), f)
f.close()
def merge_eye_perspectives(data_dir):
# make_eye_perspective_depths mysteriously does not run all the way through, so I've
# done it in two parts which are merged here:
files = ['eye-perspectives1.pkl', 'eye-perspectives2.pkl']
objects = []
target_indices = []
target_points = []
eye_points = []
eye_angles = []
for file in files:
with open(join(data_dir, file), 'rb') as f:
o, ti, tp, ep, ea = cPickle.load(f)
objects.extend(o)
target_indices.extend(ti)
target_points.extend(tp)
eye_points.extend(ep)
eye_angles.extend(ea)
with open(join(data_dir, 'eye-perspectives.pkl'),'wb') as f:
cPickle.dump((objects, target_indices, target_points, eye_points, eye_angles), f)
def export_neuron_perspectives():
import csv
with open('../data/neuron-points.pkl', 'rb') as f:
neuron_points, neuron_angles = cPickle.load(f)
with open('neuron-perspectives.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for point, angle in zip(neuron_points.T, neuron_angles.T):
R = get_rotation_matrix(point, angle)
row = list(point)
row.extend(R.flatten())
writer.writerow(row)
def export_eye_perspectives(eye_perspectives_file):
import csv
with open(eye_perspectives_file) as f:
objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
with open('eye-perspectives.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
for object, ep, ea, tp in zip(objects, eye_points, eye_angles, target_points):
print('Processing ' + object)
for target_point in tp:
for eye_point, eye_angle in zip(ep.T, ea.T):
eye_R = get_rotation_matrix(eye_point, eye_angle)
# row = [object]
row = []
row.extend(target_point)
row.extend(eye_point)
row.extend(eye_R.flatten())
writer.writerow(row)
def make_relative_metrics(eye_perspectives_file, metrics_dir, result_dir, n=500, neuron_points=None, neuron_angles=None):
from quaternion import difference_between_quaternions
from interpolate import interpolate
# each of these points/angles will correspond to an output neuron ...
if neuron_points is None or neuron_angles is None:
neuron_points = get_random_points(n, .15)
neuron_angles = get_random_angles(n, std=0)
with open(join(result_dir, 'neuron-points.pkl'), 'wb') as f:
cPickle.dump((neuron_points, neuron_angles), f)
neuron_quaternions, neuron_distances = get_quaternion_distance(neuron_points, neuron_angles)
with open(eye_perspectives_file) as f:
objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
for object, object_eye_points, object_eye_angles in zip(objects, eye_points, eye_angles):
print('Processing ' + object)
start_time = time.time()
eye_quaternions, eye_distances = get_quaternion_distance(object_eye_points, object_eye_angles)
metrics_file = join(metrics_dir, object[:-4] + '-metrics.pkl')
with open(metrics_file) as f:
gripper_points, gripper_angles, target_indices, target_points, collisions, free_smoothed, coll_smoothed = cPickle.load(f)
gripper_quaternions, gripper_distances = get_quaternion_distance(gripper_points, gripper_angles)
# note that for each object, gripper configs are the same relative to each target point
#TODO: do we want coll_smoothed instead / as well?
metrics = free_smoothed
# interpolate relative to each eye point
neuron_metrics_for_object = []
for target_index, target_metrics in zip(target_indices, metrics):
print(' target ' + str(target_index))
neuron_metrics_for_target = []
for eye_quaternion in eye_quaternions:
rel_quaternions = []
for gripper_quaternion in gripper_quaternions:
rel_quaternions.append(difference_between_quaternions(eye_quaternion, gripper_quaternion))
rel_quaternions = np.array(rel_quaternions)
#interpolate ...
neuron_metrics = []
for neuron_quaternion, neuron_distance in zip(neuron_quaternions, neuron_distances):
interpolated = interpolate(neuron_quaternion, neuron_distance, rel_quaternions, gripper_distances, target_metrics,
sigma_d=.02, sigma_a=(16*np.pi/180))
neuron_metrics.append(interpolated)
neuron_metrics_for_target.append(neuron_metrics)
neuron_metrics_for_object.append(neuron_metrics_for_target)
neuron_metrics_for_object = np.array(neuron_metrics_for_object)
result_file = join(result_dir, object[:-4] + '-neuron.pkl')
with open(result_file, 'wb') as f:
cPickle.dump((target_indices, target_points, object_eye_points, object_eye_angles, neuron_metrics_for_object), f)
print(' ' + str(time.time() - start_time) + 's')
def make_XY():
eye_image_files = []
metrics = []
return eye_image_files, metrics
if __name__ == '__main__':
# check_rotation_matrix(scatter=True)
# check_depth_from_random_perspective()
# plot_random_samples()
# check_find_vertical()
# check_target_points()
# check_metrics()
# make_grip_perspective_depths('../../grasp-conv/data/obj_tmp/',
# '../../grasp-conv/data/perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# make_grip_perspective_depths('../../grasp-conv/data/obj_files/',
# '/Volumes/TrainingData/grasp-conv/data/perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# make_eye_perspective_depths('../../grasp-conv/data/obj_tmp/',
# '../../grasp-conv/data/eye-tmp/',
# '../../grasp-conv/data/obj-points.csv')
make_eye_perspective_depths('../../grasp-conv/data/obj_files_murata/',
'../../grasp-conv/data/eye-perspectives-murata/',
'../../grasp-conv/data/obj-points-murata.csv',
n=1)
# make_eye_perspective_depths('../../grasp-conv/data/obj_files/',
# '/Volumes/TrainingData/grasp-conv/data/eye-perspectives/',
# '../../grasp-conv/data/obj-points.csv')
# merge_eye_perspectives('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/')
# with open('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl','rb') as f:
# objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
# print(objects)
# print(target_indices)
# print(target_points)
# print(eye_angles)
# make_relative_metrics('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl',
# '/Volumes/TrainingData/grasp-conv/data/metrics/',
# '/Volumes/TrainingData/grasp-conv/data/relative/')
# export_neuron_perspectives()
# export_eye_perspectives('/Volumes/TrainingData/grasp-conv/data/eye-perspectives/eye-perspectives.pkl')
# import scipy
# image = scipy.misc.imread('../../grasp-conv/data/eye-tmp/1_Coffeecup_final-03-Mar-2016-18-50-40-0-7.png')
# plt.imshow(image)
# plt.show()
# with open('../../grasp-conv/data/eye-tmp/eye-perspectives.pkl') as f:
# objects, target_indices, target_points, eye_points, eye_angles = cPickle.load(f)
# print(objects)
# print(target_indices)
# print(target_points)
# print(np.array(eye_points))
# print(np.array(eye_angles))
# with open('spatula-perspectives.pkl', 'rb') as f:
# gripper_points, gripper_angles, target_indices, target_points, perspectives = cPickle.load(f)
# make_metrics('../../grasp-conv/data/perspectives/', '../../grasp-conv/data/metrics/')
# make_relative_metrics('../../grasp-conv/data/eye-tmp/eye-perspectives.pkl',
# '../../grasp-conv/data/metrics/',
# '../../grasp-conv/data/relative/')
# checking files look OK ...
# with open('../../grasp-conv/data/relative/neuron-points.pkl', 'rb') as f:
# neuron_points, neuron_angles = cPickle.load(f)
# print(neuron_angles.shape)
# with open('../../grasp-conv/data/relative/1_Coffeecup_final-03-Mar-2016-18-50-40-neuron.pkl', 'rb') as f:
# target_indices, target_points, object_eye_points, object_eye_angles, neuron_metrics_for_object = cPickle.load(f)
# print(neuron_metrics_for_object.shape)
# print(np.min(neuron_metrics_for_object))
# print(np.max(neuron_metrics_for_object))
# print(np.std(neuron_metrics_for_object))
# make_metrics('/Volumes/TrainingData/grasp-conv/data/perspectives/',
# '/Volumes/TrainingData/grasp-conv/data/metrics/')
# process_eye_directory('../../grasp-conv/data/obj_tmp/', '../../grasp-conv/data/eye-perspectives-tmp/', 100)
# process_directory('../data/obj_files/', '../data/perspectives/', 10)
# process_directory('../../grasp-conv/data/obj_tmp/', '../../grasp-conv/data/perspectives/', 5000)
# check_maps('../../grasp-conv/data/perspectives/')
|
bptripp/grasp-convnet
|
py/perspective.py
|
Python
|
mit
| 30,382
|
[
"NEURON"
] |
6ec0c97e0ac0dd0c323f847c8d0601fdb13550231748c4a1d878544bc3b99b63
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from findyour3d.contact.views import contact
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
url(r'^about/$', contact, name='about'),
url(r'^business/$', TemplateView.as_view(template_name='pages/business.html'), name='business'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('findyour3d.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^customers/', include('findyour3d.customer.urls', namespace='customers')),
url(r'^company/', include('findyour3d.company.urls', namespace='company')),
url(r'^contact/', include('findyour3d.contact.urls', namespace='contact')),
url(r'^find/', include('findyour3d.dashboard.urls', namespace='dashboard')),
url(r'^quote/', include('findyour3d.quote.urls', namespace='quote')),
url(r'^payments/', include('findyour3d.payment.urls', namespace='payments')),
url(r'^ratings/', include('star_ratings.urls', namespace='ratings', app_name='ratings')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
hqpr/findyour3d
|
config/urls.py
|
Python
|
mit
| 2,600
|
[
"VisIt"
] |
13bba454297a5f3daff33d500c9edb7dae7193388898942c8060c7906b375d0f
|
from __future__ import annotations
import math
import pickle
import random
from scitbx import matrix
from dials.model.data import Shoebox
def random_shoeboxes(num, mask=False):
for i in range(num):
x0 = random.randint(0, 100)
y0 = random.randint(0, 100)
z0 = random.randint(0, 100)
x1 = random.randint(x0 + 5, x0 + 20)
y1 = random.randint(y0 + 5, y0 + 20)
z1 = random.randint(z0 + 5, z0 + 20)
bbox = (x0, x1, y0, y1, z0, z1)
xc0 = (x1 + x0) / 2.0
yc0 = (y1 + y0) / 2.0
zc0 = (z1 + z0) / 2.0
xc = random.uniform(xc0 - 1, xc0 + 1)
yc = random.uniform(yc0 - 1, yc0 + 1)
zc = random.uniform(zc0 - 1, zc0 + 1)
centre = (xc, yc, zc)
intensity = random.randint(10, 10000)
shoebox = generate_shoebox(bbox, centre, intensity, mask=mask)
yield (shoebox, (centre, intensity))
def generate_shoebox(bbox, centre, intensity, mask=False):
from dials.algorithms.shoebox import MaskCode
shoebox = Shoebox()
shoebox.bbox = bbox
shoebox.allocate()
for i in range(len(shoebox.mask)):
shoebox.mask[i] = MaskCode.Valid | MaskCode.Foreground
shoebox.data = gaussian(
shoebox.size(),
1.0,
[c - o for c, o in zip(centre[::-1], shoebox.offset())],
[s / 8.0 for s in shoebox.size()],
)
if mask:
shoebox.mask = create_mask(
shoebox.size(),
[c - o for c, o in zip(centre[::-1], shoebox.offset())],
MaskCode.Valid | MaskCode.Foreground,
)
tot = 0
mask_code = MaskCode.Valid | MaskCode.Foreground
for i in range(len(shoebox.data)):
if shoebox.mask[i] & mask_code == mask_code:
tot += shoebox.data[i]
if tot > 0:
shoebox.data *= intensity / tot
return shoebox
def create_mask(size, x0, value):
from scitbx.array_family import flex
mask = flex.int(flex.grid(size), 0)
rad = min(s - c for s, c in zip(size, x0))
for k in range(size[0]):
for j in range(size[1]):
for i in range(size[2]):
d = math.sqrt((j - x0[1]) ** 2 + (i - x0[2]) ** 2)
if d < rad:
mask[k, j, i] = value
return mask
def evaluate_gaussian(x, a, x0, sx):
assert len(x) == len(x0)
assert len(x) == len(sx)
g = 0.0
for xi, x0i, sxi in zip(x, x0, sx):
g += (xi - x0i) ** 2 / (2.0 * sxi**2)
return a * math.exp(-g)
def gaussian(size, a, x0, sx):
from dials.array_family import flex
result = flex.real(flex.grid(size))
index = [0] * len(size)
while True:
result[index] = evaluate_gaussian(index, a, x0, sx)
for j in range(len(size)):
index[j] += 1
if index[j] < size[j]:
break
index[j] = 0
if j == len(size) - 1:
return result
def test_allocate():
for i in range(10):
x0 = random.randint(0, 1000)
y0 = random.randint(0, 1000)
z0 = random.randint(0, 1000)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
shoebox.allocate()
assert shoebox.data.all() == (z1 - z0, y1 - y0, x1 - x0)
assert shoebox.mask.all() == (z1 - z0, y1 - y0, x1 - x0)
shoebox.deallocate()
assert shoebox.data.all() == (0, 0, 0)
assert shoebox.mask.all() == (0, 0, 0)
def test_offset():
for i in range(10):
x0 = random.randint(0, 1000)
y0 = random.randint(0, 1000)
z0 = random.randint(0, 1000)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
assert shoebox.xoffset() == x0
assert shoebox.yoffset() == y0
assert shoebox.zoffset() == z0
assert shoebox.offset() == (z0, y0, x0)
def test_size():
for i in range(10):
x0 = random.randint(0, 1000)
y0 = random.randint(0, 1000)
z0 = random.randint(0, 1000)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
assert shoebox.xsize() == x1 - x0
assert shoebox.ysize() == y1 - y0
assert shoebox.zsize() == z1 - z0
assert shoebox.size() == (z1 - z0, y1 - y0, x1 - x0)
def test_consistent():
from dials.array_family import flex
for i in range(1000):
x0 = random.randint(0, 1000)
y0 = random.randint(0, 1000)
z0 = random.randint(0, 1000)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
try:
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
assert not shoebox.is_consistent()
shoebox.allocate()
assert shoebox.is_consistent()
shoebox.data = flex.real(flex.grid(20, 20, 20))
assert not shoebox.is_consistent()
shoebox.deallocate()
assert not shoebox.is_consistent()
except Exception:
print(x0, y0, z0, x1, y1, z1)
raise
def test_is_bbox_within_image_volume():
isize = (1000, 1000)
srange = (0, 100)
shoebox = Shoebox((10, 20, 10, 20, 10, 20))
assert shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((-10, 20, 10, 20, 10, 20))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((10, 20, -10, 20, 10, 20))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((10, 20, 10, 20, -10, 20))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((10, 1020, 10, 20, 10, 20))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((10, 20, 10, 1020, 10, 20))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
shoebox = Shoebox((10, 20, 10, 20, 10, 1020))
assert not shoebox.is_bbox_within_image_volume(isize, srange)
def test_does_bbox_contain_bad_pixels():
from scitbx.array_family import flex
mask = flex.bool(flex.grid(100, 100), True)
for j in range(100):
for i in range(40, 60):
mask[j, i] = False
mask[i, j] = False
for i in range(1000):
x0 = random.randint(0, 90)
y0 = random.randint(0, 90)
z0 = random.randint(0, 90)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
res1 = shoebox.does_bbox_contain_bad_pixels(mask)
res2 = False
if x0 >= 40 and x0 < 60:
res2 = True
if x1 > 40 and x1 <= 60:
res2 = True
if y0 >= 40 and y0 < 60:
res2 = True
if y1 > 40 and y1 <= 60:
res2 = True
assert res1 == res2
def test_count_mask_values():
for i in range(10):
x0 = random.randint(0, 90)
y0 = random.randint(0, 90)
z0 = random.randint(0, 90)
x1 = random.randint(1, 10) + x0
y1 = random.randint(1, 10) + y0
z1 = random.randint(1, 10) + z0
shoebox = Shoebox((x0, x1, y0, y1, z0, z1))
shoebox.allocate()
maxnum = len(shoebox.mask)
num = random.randint(1, maxnum)
indices = random.sample(list(range(maxnum)), num)
value = 1 << 2
for i in indices:
shoebox.mask[i] = value
assert shoebox.count_mask_values(value) == num
def test_centroid_all():
for shoebox, (XC, I) in random_shoeboxes(10):
centroid = shoebox.centroid_all()
assert shoebox.is_consistent()
assert abs(matrix.col(centroid.px.position) - matrix.col(XC)) < 1.0
def test_centroid_masked():
for shoebox, (XC, I) in random_shoeboxes(10):
centroid = shoebox.centroid_masked(1 << 0)
assert shoebox.is_consistent()
assert abs(matrix.col(centroid.px.position) - matrix.col(XC)) < 1.0
def test_summed_intensity():
for shoebox, (XC, I) in random_shoeboxes(10):
intensity = shoebox.summed_intensity()
assert shoebox.is_consistent()
assert abs(intensity.observed.value - I) < 1e-1
def test_flatten():
from dials.algorithms.shoebox import MaskCode
from dials.array_family import flex
for shoebox, (XC, I) in random_shoeboxes(10, mask=True):
assert not shoebox.flat
zs = shoebox.zsize()
ys = shoebox.ysize()
xs = shoebox.xsize()
expected_data = flex.real(flex.grid(1, ys, xs), 0)
expected_mask = flex.int(flex.grid(1, ys, xs), 0)
for k in range(zs):
for j in range(ys):
for i in range(xs):
expected_data[0, j, i] += shoebox.data[k, j, i]
expected_mask[0, j, i] |= shoebox.mask[k, j, i]
if not (expected_mask[0, j, i] & MaskCode.Valid) or not (
shoebox.mask[k, j, i] & MaskCode.Valid
):
expected_mask[0, j, i] &= ~MaskCode.Valid
shoebox.flatten()
diff = expected_data.as_double() - shoebox.data.as_double()
max_diff = flex.max(flex.abs(diff))
assert max_diff < 1e-7
assert expected_mask.all_eq(shoebox.mask)
assert shoebox.flat
assert shoebox.is_consistent()
def test_all_foreground_valid():
from dials.tests.model.data.all_foreground_valid_data import data
shoeboxes = pickle.loads(bytes(data, encoding="latin-1"), encoding="bytes")
for i, shoebox in enumerate(shoeboxes):
if i < 4:
assert not shoebox.all_foreground_valid()
else:
assert shoebox.all_foreground_valid()
|
dials/dials
|
tests/model/data/test_shoebox.py
|
Python
|
bsd-3-clause
| 9,971
|
[
"Gaussian"
] |
438b5147b5c86c35ceef42893341f3a0bba87eec90cc2d44f91a3bce93352859
|
from validate_app import validateApp
import os
from distutils import spawn
import sys
from parse_files import parseOut, bringTogether
from bashSub import bashSub
def checkPreprocessApplications():
applications = ["super_deduper", "sickle", "flash2", "bowtie2"]
source = ["https://github.com/dstreett/Super-Deduper", "https://github.com/dstreett/sickle", "https://github.com/dstreett/FLASH2", "http://sourceforge.net/projects/bowtie-bio/files/bowtie2/2.2.6/", "https://github.com/najoshi/scythe"]
i = 0;
for app in applications:
if spawn.find_executable(app) is None:
sys.stderr.write("It doesn't look like you have app - " + app + "\n" )
sys.stderr.write("You can download it here - " + source[i] + "\n");
exit(0)
else:
sys.stderr.write(app + " found\n")
i += 1
def returnReads(dictSampleSeqFiles):
SE = ""
PE1 = ""
PE2 = ""
#data struct
# { (sampleKey, seqKey) : [[SE], [SE], [PE1, PE2], [PE1, PE2]] }
#diving into each of the sub lists in the dictionary value key
for e in dictSampleSeqFiles:
#if sublist only has one elment then it is SE read
if len(e) == 1:
if SE == "":
SE = e[0]
else:
SE += "," + e[0]
else:
if PE1 == "":
PE1 = e[0]
PE2 = e[1]
else:
PE1 += "," + e[0]
PE2 += "," + e[1]
return [SE, PE1, PE2]
def check_dir(Dir):
if not os.path.exists(Dir):
os.mkdir(Dir)
class preprocessCMD:
def __init__(self):
self.metaDataFolder = "MetaData"
def execute(self, args):
logFiles = []
time = 0
checkPreprocessApplications()
validate = validateApp()
validate.setValidation(True)
dictSampleSeqFiles = validate.validateSampleSheet(args.samplesDirectory, args.finalDir, args.samplesFile, args.force, False)
for key in dictSampleSeqFiles:
check_dir(args.finalDir)
check_dir(key)
meta = key
SEandPE = returnReads(dictSampleSeqFiles[key])
#screening python scripts created in virtual enviroment
extract_unmapped = "python " + os.path.join(os.path.dirname(os.path.realpath(__file__)), "extract_unmapped_reads.py")
screen = "python " + os.path.join(os.path.dirname(os.path.realpath(__file__)), "screen.py")
contArgsBaseline = " -t " + args.threads
finalClean = "python " + os.path.join(os.path.dirname(os.path.realpath(__file__)), "cleanupWrapper.py")
if SEandPE[0] != "":
terminalString = []
if args.contaminantsFolder != "":
contArgsBaseline = "-c " + args.contaminantsFolder + contArgsBaseline
terminalString.append(bashSub(screen, [SEandPE[0]], ['-U'], contArgsBaseline , os.path.join(meta, "SE_preproc_mapping.log")))
terminalString.append(bashSub(extract_unmapped, terminalString[-1].processSub(), [''], " -o stdout" , os.path.join(meta, "SE_filter_info.log")))
if args.skipDup == False:
terminalString.append(bashSub("super_deduper", terminalString[-1].processSub(), ['-U'], "-s 5 -l 15 -p stdout", os.path.join(meta, "SE_deduper_info.log")))
sickleArgs = " -o stdout -t sanger -l " + args.minLength + " -T "
if args.polyTrim:
sickleArgs += " -a "
terminalString.append(bashSub("scythe", [args.adapter], ["-a"], terminalString[-1].processSub()[0] + " -q sanger", os.path.join(meta, "SE_scythe_info.log")))
terminalString.append(bashSub("sickle se", terminalString[-1].processSub(), ['-f'], sickleArgs, os.path.join(meta, "SE_sickle_info.log")))
terminalString.append(bashSub(finalClean, terminalString[-1].processSub(), [''], " " + str(int(args.polyTrim)) + " " + str(int(args.forceSplit)) + " " + args.minLength + " " + os.path.join(key, key.split('/')[1]), ""))
print "___ SE COMMANDS ____"
print terminalString[-1].getCommand()
terminalString[-1].runCmd("")
time += terminalString[-1].returnTime()
if SEandPE[1] != "":
terminalString = []
if args.contaminantsFolder != "":
if os.path.exists(args.contaminantsFolder):
contArgsBaseline = "-c " + args.contaminantsFolder + contArgsBaseline
terminalString.append(bashSub(screen, [SEandPE[1], SEandPE[2]], ['-1', '-2'], contArgsBaseline, os.path.join(meta, "PE_preproc_mapping.log")))
terminalString.append(bashSub(extract_unmapped, terminalString[-1].processSub(), [''], " -o stdout" , os.path.join(meta, "PE_filter_info.log")))
if args.skipDup == False:
terminalString.append(bashSub("super_deduper", terminalString[-1].processSub(), ['-i'], "-s 5 -l 15 -p stdout", os.path.join(meta, "PE_deduper_info.log")))
sickleArgs = " -m stdout -s /dev/null -t sanger -T "
if args.polyTrim:
sickleArgs += " -a "
terminalString.append(bashSub("sickle pe", terminalString[-1].processSub(), ['-c'], sickleArgs , os.path.join(meta, "PE_sickle_info.log")))
if args.skipFlash == False:
terminalString.append(bashSub("flash2", terminalString[-1].processSub(), ['-Ti'], " -M " + args.overlapFlash + " --allow-outies -o " + key.split('/')[1] + " -d " + key + " -To -c ", os.path.join(meta, "flash_info.log")))
terminalString.append(bashSub(finalClean, terminalString[-1].processSub(), [''], " " + str(int(args.polyTrim)) + " " + str(int(args.forceSplit)) + " " + args.minLength + " " + os.path.join(key, key.split('/')[1]), ""))
print "___ PE COMMANDS ___"
print terminalString[-1].getCommand()
terminalString[-1].runCmd("")
sys.stderr.flush()
time += terminalString[-1].returnTime()
logFiles.append(parseOut(key, key.split("/")[-1]))
bringTogether(logFiles, os.path.join(args.finalDir, "Preprocessing_Summary.log"))
print "Total amount of seconds to run all samples"
print "Seconds: " + str(time)
#self.clean()
def clean(self):
import glob
for f in glob.glob(".screening_cont*"):
os.remove(f)
|
msettles/expHTS
|
expHTS/preprocessCMD.py
|
Python
|
apache-2.0
| 7,647
|
[
"Bowtie"
] |
c948ca01dd5e567792280beed205525c0515533700d4551305aa0030dbaec87c
|
# Copyright (c) 2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2007 The Regents of The University of Michigan
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Andreas Sandberg
import m5
import _m5.stats
from m5.objects import Root
from m5.util import attrdict, fatal
# Stat exports
from _m5.stats import schedStatEvent as schedEvent
from _m5.stats import periodicStatDump
outputList = []
def _url_factory(func):
"""Wrap a plain Python function with URL parsing helpers
Wrap a plain Python function f(fn, **kwargs) to expect a URL that
has been split using urlparse.urlsplit. First positional argument
is assumed to be a filename, this is created as the concatenation
of the netloc (~hostname) and path in the parsed URL. Keyword
arguments are derived from the query values in the URL.
For example:
wrapped_f(urlparse.urlsplit("text://stats.txt?desc=False")) ->
f("stats.txt", desc=False)
"""
from functools import wraps
@wraps(func)
def wrapper(url):
from urlparse import parse_qs
from ast import literal_eval
qs = parse_qs(url.query, keep_blank_values=True)
# parse_qs returns a list of values for each parameter. Only
# use the last value since kwargs don't allow multiple values
# per parameter. Use literal_eval to transform string param
# values into proper Python types.
def parse_value(key, values):
if len(values) == 0 or (len(values) == 1 and not values[0]):
fatal("%s: '%s' doesn't have a value." % (url.geturl(), key))
elif len(values) > 1:
fatal("%s: '%s' has multiple values." % (url.geturl(), key))
else:
try:
return key, literal_eval(values[0])
except ValueError:
fatal("%s: %s isn't a valid Python literal" \
% (url.geturl(), values[0]))
kwargs = dict([ parse_value(k, v) for k, v in qs.items() ])
try:
return func("%s%s" % (url.netloc, url.path), **kwargs)
except TypeError:
fatal("Illegal stat visitor parameter specified")
return wrapper
@_url_factory
def _textFactory(fn, desc=True):
"""Output stats in text format.
Text stat files contain one stat per line with an optional
description. The description is enabled by default, but can be
disabled by setting the desc parameter to False.
Example: text://stats.txt?desc=False
"""
return _m5.stats.initText(fn, desc)
factories = {
# Default to the text factory if we're given a naked path
"" : _textFactory,
"file" : _textFactory,
"text" : _textFactory,
}
def addStatVisitor(url):
"""Add a stat visitor specified using a URL string
Stat visitors are specified using URLs on the following format:
format://path[?param=value[;param=value]]
The available formats are listed in the factories list. Factories
are called with the path as the first positional parameter and the
parameters are keyword arguments. Parameter values must be valid
Python literals.
"""
from urlparse import urlsplit
parsed = urlsplit(url)
try:
factory = factories[parsed.scheme]
except KeyError:
fatal("Illegal stat file type specified.")
outputList.append(factory(parsed))
def initSimStats():
_m5.stats.initSimStats()
_m5.stats.registerPythonStatsHandlers()
names = []
stats_dict = {}
stats_list = []
def enable():
'''Enable the statistics package. Before the statistics package is
enabled, all statistics must be created and initialized and once
the package is enabled, no more statistics can be created.'''
global stats_list
stats_list = list(_m5.stats.statsList())
for stat in stats_list:
if not stat.check() or not stat.baseCheck():
fatal("statistic '%s' (%d) was not properly initialized " \
"by a regStats() function\n", stat.name, stat.id)
if not (stat.flags & flags.display):
stat.name = "__Stat%06d" % stat.id
def less(stat1, stat2):
v1 = stat1.name.split('.')
v2 = stat2.name.split('.')
return v1 < v2
stats_list.sort(less)
for stat in stats_list:
stats_dict[stat.name] = stat
stat.enable()
_m5.stats.enable();
def prepare():
'''Prepare all stats for data access. This must be done before
dumping and serialization.'''
for stat in stats_list:
stat.prepare()
lastDump = 0
def dump():
'''Dump all statistics data to the registered outputs'''
curTick = m5.curTick()
global lastDump
assert lastDump <= curTick
if lastDump == curTick:
return
lastDump = curTick
_m5.stats.processDumpQueue()
prepare()
for output in outputList:
if output.valid():
output.begin()
for stat in stats_list:
stat.visit(output)
output.end()
def reset():
'''Reset all statistics to the base state'''
# call reset stats on all SimObjects
root = Root.getInstance()
if root:
for obj in root.descendants(): obj.resetStats()
# call any other registered stats reset callbacks
for stat in stats_list:
stat.reset()
_m5.stats.processResetQueue()
flags = attrdict({
'none' : 0x0000,
'init' : 0x0001,
'display' : 0x0002,
'total' : 0x0010,
'pdf' : 0x0020,
'cdf' : 0x0040,
'dist' : 0x0080,
'nozero' : 0x0100,
'nonan' : 0x0200,
})
|
HwisooSo/gemV-update
|
src/python/m5/stats/__init__.py
|
Python
|
bsd-3-clause
| 7,711
|
[
"VisIt"
] |
da34aa352b1383cb9dae36af5c8ac2cdf08c1192f48bd45aa5990a51243a6a15
|
#!/usr/bin/env python
import sys
import os
import vtk
import numpy
from siconos.io.mechanics_hdf5 import MechanicsHdf5
## the best way to dump all data
# $ h5dump toto.hdf5 > toto.txt
import h5py
import getopt
def usage():
"""
{0} <hdf5 file>
""".format(sys.argv[0])
print '{0}: Usage'.format(sys.argv[0])
print """
{0} [--help] [--output_frequency=n] [--output_filename=] <hdf5 file>
"""
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['output_frequency=',
'output_filename=',
'cf-scale='])
except getopt.GetoptError, err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
min_time = None
max_time = None
scale_factor = 1
output_frequency=10
output_filename=None
for o, a in opts:
if o == '--help':
usage()
exit(0)
elif o == '--output_frequency':
output_frequency = float(a)
elif o == '--output_filename':
out_filename=a
elif o == '--cf-scale':
scale_factor = float(a)
if len(args) > 0:
io_filename = args[0]
if output_filename == None:
out_filename=''.join(io_filename.rsplit('.')[:-1])+'-filtered.hdf5'
else:
usage()
exit(1)
with MechanicsHdf5(io_filename=io_filename, mode='r') as io:
with MechanicsHdf5(io_filename=out_filename, mode='w') as out:
hdf1 = io._out
hdf2 = out._out
# copy /data/input
hdf2.__delitem__('data/input')
h5py.h5o.copy(hdf1.id, "data/input", hdf2.id, "data/input")
# copy /data/nslaws
hdf2.__delitem__('data/nslaws')
h5py.h5o.copy(hdf1.id, "data/nslaws", hdf2.id, "data/nslaws")
# copy /data/ref
hdf2.__delitem__('data/ref')
h5py.h5o.copy(hdf1.id, "data/ref", hdf2.id, "data/ref")
print('***************************************************** ')
print('************ Parsing simulation data ****************')
print('***************************************************** ')
def load():
ispos_data = io.static_data()
idpos_data = io.dynamic_data()
icf_data = io.contact_forces_data()[:]
isolv_data = io.solver_data()
return ispos_data, idpos_data, icf_data, isolv_data
spos_data, dpos_data, cf_data, solv_data = load()
#print('io._data',io._data)
#print('static position data : spos_data',spos_data)
#print('spos_data.value',spos_data.value)
#print('dynamic position data : dpos_data',dpos_data)
print('dpos_data.value',dpos_data.value)
print('cf_data',cf_data)
#print('solv_data',solv_data)
times = list(set(dpos_data[:, 0]))
times.sort()
print('len(times)',len(times))
if (len(times) ==0 ):
print('no results in the hdf5 file')
else:
print('Results for ',len(times),' steps in the hdf5 file')
#ndyna = len(numpy.where(dpos_data[:, 0] == times[0])) does not work
ndyna = len(dpos_data[:, 0])/len(times)
print('ndyna =', ndyna)
if len(spos_data) > 0:
nstatic = len(numpy.where(spos_data[:, 0] == times[0]))
nstatic = spos_data.shape[0]
else:
nstatic = 0
print('nstatic =', nstatic)
# instances = set(dpos_data[:, 1])
# filtering
p=0
current_line=0
for k in range(len(times)):
#print(times[k])
if (k+1 < len(times) ):
time_step=times[k+1]-times[k]
#print('time_step',time_step)
if (k%output_frequency==0):
if k==0 :
print('filter for k',k,'at times', times[k], 'p', p)
out._dynamic_data.resize((p+1)*ndyna,0)
out._dynamic_data[p*ndyna:(p+1)*ndyna,:] = numpy.array(dpos_data[k*ndyna:(k+1)*ndyna,:])
#print('times',dpos_data[k*ndyna:(k+1)*ndyna,0])
out._static_data.resize((p+1)*nstatic,0)
out._static_data[p*nstatic:(p+1)*nstatic,:] = numpy.array(spos_data[0:nstatic,:])
out._solv_data.resize((p+1),0)
out._solv_data[p:(p+1),:] = numpy.array(solv_data[0:1,:])
id_f = numpy.where(abs(cf_data[:, 0] - times[k]) < time_step*1e-5)[0]
if len(id_f) == 0:
print('no contact data at time',times[k])
else:
#print('index of contact :', min(id_f), max(id_f))
out._cf_data.resize(max(id_f)+1,0)
out._cf_data[min(id_f):max(id_f),:] = numpy.array(cf_data[min(id_f):max(id_f),:])
current_line = max(id_f)
else:
print('filter for k',k,'at times', times[k], 'p', p)
out._dynamic_data.resize((p+1)*ndyna,0)
#print( dpos_data[k*ndyna:(k+1)*ndyna,:])
#print('times',dpos_data[(k+1)*ndyna:(k+2)*ndyna,0])
out._dynamic_data[p*ndyna:(p+1)*ndyna,:] = dpos_data[(k+1)*ndyna:(k+2)*ndyna,:]
# out._static_data.resize((p+1)*nstatic,0)
# #print( dpos_data[k*nstatic:(k+1)*nstatic,:])
# out._static_data[p*nstatic:(p+1)*nstatic,:] = spos_data[k*nstatic:(k+1)*nstatic,:]
out._solv_data.resize((p+1),0)
out._solv_data[p:(p+1),:] = numpy.array(solv_data[k:k+1,:])
id_f = numpy.where(abs(cf_data[:, 0] - times[k]) < time_step*1e-5)[0]
if len(id_f) == 0:
print('no contact data at time',times[k])
else:
#print('index of contact :', min(id_f), max(id_f))
new_line = current_line+max(id_f)-min(id_f)+1
#print('new_line',new_line)
#print('current_line',current_line)
#print('size of contact data', max(id_f)-min(id_f)+1)
out._cf_data.resize(new_line,0)
#print('fill out._cf_data indices', current_line, new_line-1)
out._cf_data[current_line:new_line,:] = numpy.array(cf_data[min(id_f):max(id_f)+1,:])
current_line=new_line
#print('current_line',current_line)
p = p+1
#print(dpos_data)
print(out._dynamic_data.shape)
print(out._static_data.shape)
print(out.static_data().value)
print(out._cf_data.shape)
print(out._solv_data.shape)
|
radarsat1/siconos
|
io/swig/io/filter_output_frequency.py
|
Python
|
apache-2.0
| 6,832
|
[
"VTK"
] |
b2f51bfc03c1b421ece1a35c12479a438c53d51e6df745735a08b4836eec0ba5
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config, hooks
from lib import *
import time, os, pprint, shutil, traceback
from valuespec import *
import cmk.paths
# Datastructures and functions needed before plugins can be loaded
loaded_with_language = False
# Custom user attributes
user_attributes = {}
builtin_user_attribute_names = []
# Connection configuration
connection_dict = {}
# Connection object dictionary
g_connections = {}
# Load all userdb plugins
def load_plugins(force):
global user_attributes
global multisite_user_connectors
global builtin_user_attribute_names
# Do not cache the custom user attributes. They can be created by the user
# during runtime, means they need to be loaded during each page request.
# But delete the old definitions before to also apply removals of attributes
if user_attributes:
for attr_name in user_attributes.keys():
if attr_name not in builtin_user_attribute_names:
del user_attributes[attr_name]
declare_custom_user_attrs()
connection_dict.clear()
for connection in config.user_connections:
connection_dict[connection['id']] = connection
# Cleanup eventual still open connections
if g_connections:
g_connections.clear()
global loaded_with_language
if loaded_with_language == current_language and not force:
return
# declare & initialize global vars
user_attributes = {}
multisite_user_connectors = {}
load_web_plugins("userdb", globals())
builtin_user_attribute_names = user_attributes.keys()
declare_custom_user_attrs()
# Connectors have the option to perform migration of configuration options
# while the initial loading is performed
for connector_class in multisite_user_connectors.values():
connector_class.migrate_config()
# This must be set after plugin loading to make broken plugins raise
# exceptions all the time and not only the first time (when the plugins
# are loaded).
loaded_with_language = current_language
# Cleans up at the end of a request: Cleanup eventual open connections
def finalize():
if g_connections:
g_connections.clear()
# Returns a list of two part tuples where the first element is the unique
# connection id and the second element the connector specification dict
def get_connections(only_enabled=False):
connections = []
for connector_id, connector_class in multisite_user_connectors.items():
if connector_id == 'htpasswd':
# htpasswd connector is enabled by default and always executed first
connections.insert(0, ('htpasswd', connector_class({})))
else:
for connection_config in config.user_connections:
if only_enabled and connection_config.get('disabled'):
continue
connection = connector_class(connection_config)
if only_enabled and not connection.is_enabled():
continue
connections.append((connection_config['id'], connection))
return connections
def active_connections():
return get_connections(only_enabled=True)
def connection_choices():
return sorted([ (cid, "%s (%s)" % (cid, c.type())) for cid, c in get_connections(only_enabled=False)
if c.type() == "ldap" ],
key=lambda (x, y): y)
# When at least one LDAP connection is defined and active a sync is possible
def sync_possible():
for connection_id, connection in active_connections():
if connection.type() == "ldap":
return True
return False
def cleanup_connection_id(connection_id):
if connection_id is None:
connection_id = 'htpasswd'
# Old Check_MK used a static "ldap" connector id for all LDAP users.
# Since Check_MK now supports multiple LDAP connections, the ID has
# been changed to "default". But only transform this when there is
# no connection existing with the id LDAP.
if connection_id == 'ldap' and not get_connection('ldap'):
connection_id = 'default'
return connection_id
# Returns the connection object of the requested connection id. This function
# maintains a cache that for a single connection_id only one object per request
# is created.
def get_connection(connection_id):
if connection_id in g_connections:
return g_connections[connection_id]
connection = dict(get_connections()).get(connection_id)
if connection:
g_connections[connection_id] = connection
return connection
# Returns a list of connection specific locked attributes
def locked_attributes(connection_id):
return get_attributes(connection_id, "locked_attributes")
# Returns a list of connection specific multisite attributes
def multisite_attributes(connection_id):
return get_attributes(connection_id, "multisite_attributes")
# Returns a list of connection specific non contact attributes
def non_contact_attributes(connection_id):
return get_attributes(connection_id, "non_contact_attributes")
def get_attributes(connection_id, what):
connection = get_connection(connection_id)
if connection:
return getattr(connection, what)()
else:
return []
def new_user_template(connection_id):
new_user = {
'serial': 0,
'connector': connection_id,
}
# Apply the default user profile
new_user.update(config.default_user_profile)
return new_user
def create_non_existing_user(connection_id, username):
users = load_users(lock = True)
if username in users:
return # User exists. Nothing to do...
users[username] = new_user_template(connection_id)
save_users(users)
# Call the sync function for this new user
hook_sync(connection_id = connection_id, only_username = username)
# This function is called very often during regular page loads so it has to be efficient
# even when having a lot of users.
#
# When using the multisite authentication with just by WATO created users it would be
# easy, but we also need to deal with users which are only existant in the htpasswd
# file and don't have a profile directory yet.
def user_exists(username):
if _user_exists_according_to_profile(username):
return True
return _user_exists_htpasswd(username)
def _user_exists_according_to_profile(username):
base_path = config.config_dir + "/" + username.encode("utf-8") + "/"
return os.path.exists(base_path + "transids.mk") \
or os.path.exists(base_path + "serial.mk")
def _user_exists_htpasswd(username):
for line in open(cmk.paths.htpasswd_file):
l = line.decode("utf-8")
if l.startswith("%s:" % username):
return True
return False
def user_locked(username):
users = load_users()
return users[username].get('locked', False)
def login_timed_out(username, last_activity):
idle_timeout = load_custom_attr(username, "idle_timeout", convert_idle_timeout, None)
if idle_timeout == None:
idle_timeout = config.user_idle_timeout
if idle_timeout in [ None, False ]:
return False # no timeout activated at all
timed_out = (time.time() - last_activity) > idle_timeout
# TODO: uncomment this once log level can be configured
#if timed_out:
# logger(LOG_DEBUG, "%s login timed out (Inactive for %d seconds)" %
# (username, time.time() - last_activity))
return timed_out
def update_user_access_time(username):
if not config.save_user_access_times:
return
save_custom_attr(username, 'last_seen', repr(time.time()))
def on_succeeded_login(username):
num_failed_logins = load_custom_attr(username, 'num_failed_logins', saveint)
if num_failed_logins != None and num_failed_logins != 0:
save_custom_attr(username, 'num_failed_logins', '0')
update_user_access_time(username)
# userdb.need_to_change_pw returns either False or the reason description why the
# password needs to be changed
def need_to_change_pw(username):
if load_custom_attr(username, 'enforce_pw_change', saveint) == 1:
return 'enforced'
last_pw_change = load_custom_attr(username, 'last_pw_change', saveint)
max_pw_age = config.password_policy.get('max_age')
if max_pw_age:
if not last_pw_change:
# The age of the password is unknown. Assume the user has just set
# the password to have the first access after enabling password aging
# as starting point for the password period. This bewares all users
# from needing to set a new password after enabling aging.
save_custom_attr(username, 'last_pw_change', str(int(time.time())))
return False
elif time.time() - last_pw_change > max_pw_age:
return 'expired'
return False
def on_failed_login(username):
users = load_users(lock = True)
if username in users:
if "num_failed_logins" in users[username]:
users[username]["num_failed_logins"] += 1
else:
users[username]["num_failed_logins"] = 1
if config.lock_on_logon_failures:
if users[username]["num_failed_logins"] >= config.lock_on_logon_failures:
users[username]["locked"] = True
save_users(users)
root_dir = cmk.paths.check_mk_config_dir + "/wato/"
multisite_dir = cmk.paths.default_config_dir + "/multisite.d/wato/"
# Old vs:
#ListChoice(
# title = _('Automatic User Synchronization'),
# help = _('By default the users are synchronized automatically in several situations. '
# 'The sync is started when opening the "Users" page in configuration and '
# 'during each page rendering. Each connector can then specify if it wants to perform '
# 'any actions. For example the LDAP connector will start the sync once the cached user '
# 'information are too old.'),
# default_value = [ 'wato_users', 'page', 'wato_pre_activate_changes', 'wato_snapshot_pushed' ],
# choices = [
# ('page', _('During regular page processing')),
# ('wato_users', _('When opening the users\' configuration page')),
# ('wato_pre_activate_changes', _('Before activating the changed configuration')),
# ('wato_snapshot_pushed', _('On a remote site, when it receives a new configuration')),
# ],
# allow_empty = True,
#),
def transform_userdb_automatic_sync(val):
if val == []:
# legacy compat - disabled
return None
elif type(val) == list and val:
# legacy compat - all connections
return "all"
else:
return val
#.
# .--User Session--------------------------------------------------------.
# | _ _ ____ _ |
# | | | | |___ ___ _ __ / ___| ___ ___ ___(_) ___ _ __ |
# | | | | / __|/ _ \ '__| \___ \ / _ \/ __/ __| |/ _ \| '_ \ |
# | | |_| \__ \ __/ | ___) | __/\__ \__ \ | (_) | | | | |
# | \___/|___/\___|_| |____/ \___||___/___/_|\___/|_| |_| |
# | |
# +----------------------------------------------------------------------+
# | When single users sessions are activated, a user an only login once |
# | a time. In case a user tries to login a second time, an error is |
# | shown to the later login. |
# | |
# | To make this feature possible a session ID is computed during login, |
# | saved in the users cookie and stored in the user profile together |
# | with the current time as "last activity" timestamp. This timestamp |
# | is updated during each user activity in the GUI. |
# | |
# | Once a user logs out or the "last activity" is older than the |
# | configured session timeout, the session is invalidated. The user |
# | can then login again from the same client or another one. |
# '----------------------------------------------------------------------'
def is_valid_user_session(username, session_id):
if config.single_user_session == None:
return True # No login session limitation enabled, no validation
session_info = load_session_info(username)
if session_info == None:
return False # no session active
else:
active_session_id, last_activity = session_info
if session_id == active_session_id:
return True # Current session. Fine.
# TODO: uncomment this once log level can be configured
#logger(LOG_DEBUG, "%s session_id not valid (timed out?) (Inactive for %d seconds)" %
# (username, time.time() - last_activity))
return False
def ensure_user_can_init_session(username):
if config.single_user_session == None:
return True # No login session limitation enabled, no validation
session_timeout = config.single_user_session
session_info = load_session_info(username)
if session_info == None:
return True # No session active
last_activity = session_info[1]
if (time.time() - last_activity) > session_timeout:
return True # Former active session timed out
# TODO: uncomment this once log level can be configured
#logger(LOG_DEBUG, "%s another session is active (inactive for: %d seconds)" %
# (username, time.time() - last_activity))
raise MKUserError(None, _("Another session is active"))
# Creates a new user login session (if single user session mode is enabled) and
# returns the session_id of the new session.
def initialize_session(username):
if not config.single_user_session:
return ""
session_id = create_session_id()
save_session_info(username, session_id)
return session_id
# Creates a random session id for the user and returns it.
def create_session_id():
return gen_id()
# Updates the current session of the user and returns the session_id or only
# returns an empty string when single user session mode is not enabled.
def refresh_session(username):
if not config.single_user_session:
return ""
session_info = load_session_info(username)
if session_info == None:
return # Don't refresh. Session is not valid anymore
session_id = session_info[0]
save_session_info(username, session_id)
def invalidate_session(username):
remove_custom_attr(username, "session_info")
# Saves the current session_id and the current time (last activity)
def save_session_info(username, session_id):
save_custom_attr(username, "session_info", "%s|%s" % (session_id, int(time.time())))
# Returns either None (when no session_id available) or a two element
# tuple where the first element is the sesssion_id and the second the
# timestamp of the last activity.
def load_session_info(username):
return load_custom_attr(username, "session_info", convert_session_info)
def convert_session_info(value):
if value == "":
return None
else:
session_id, last_activity = value.split("|", 1)
return session_id, int(last_activity)
#.
# .-Users----------------------------------------------------------------.
# | _ _ |
# | | | | |___ ___ _ __ ___ |
# | | | | / __|/ _ \ '__/ __| |
# | | |_| \__ \ __/ | \__ \ |
# | \___/|___/\___|_| |___/ |
# | |
# +----------------------------------------------------------------------+
def declare_user_attribute(name, vs, user_editable = True, permission = None,
show_in_table = False, topic = None, add_custom_macro = False,
domain = "multisite"):
user_attributes[name] = {
'valuespec' : vs,
'user_editable' : user_editable,
'show_in_table' : show_in_table,
'topic' : topic and topic or 'personal',
'add_custom_macro' : add_custom_macro,
'domain' : domain,
}
# Permission needed for editing this attribute
if permission:
user_attributes[name]["permission"] = permission
def get_user_attributes():
return user_attributes.items()
def load_users(lock = False):
filename = root_dir + "contacts.mk"
if lock:
# Make sure that the file exists without modifying it, *if* it exists
# to be able to lock and realease the file properly.
# Note: the lock will be released on next save_users() call or at
# end of page request automatically.
file(filename, "a")
aquire_lock(filename)
if html.is_cached('users'):
return html.get_cached('users')
# First load monitoring contacts from Check_MK's world. If this is
# the first time, then the file will be empty, which is no problem.
# Execfile will the simply leave contacts = {} unchanged.
try:
vars = { "contacts" : {} }
execfile(filename, vars, vars)
contacts = vars["contacts"]
except IOError:
contacts = {} # a not existing file is ok, start with empty data
except Exception, e:
if config.debug:
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
else:
logger(LOG_ERR, 'load_users: Problem while loading contacts (%s - %s). '
'Initializing structure...' % (filename, e))
contacts = {}
# Now add information about users from the Web world
filename = multisite_dir + "users.mk"
try:
vars = { "multisite_users" : {} }
execfile(filename, vars, vars)
users = vars["multisite_users"]
except IOError:
users = {} # not existing is ok -> empty structure
except Exception, e:
if config.debug:
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
else:
logger(LOG_ERR, 'load_users: Problem while loading users (%s - %s). '
'Initializing structure...' % (filename, e))
users = {}
# Merge them together. Monitoring users not known to Multisite
# will be added later as normal users.
result = {}
for id, user in users.items():
profile = contacts.get(id, {})
profile.update(user)
result[id] = profile
# Convert non unicode mail addresses
if type(profile.get("email")) == str:
profile["email"] = profile["email"].decode("utf-8")
# This loop is only neccessary if someone has edited
# contacts.mk manually. But we want to support that as
# far as possible.
for id, contact in contacts.items():
if id not in result:
result[id] = contact
result[id]["roles"] = [ "user" ]
result[id]["locked"] = True
result[id]["password"] = ""
# Passwords are read directly from the apache htpasswd-file.
# That way heroes of the command line will still be able to
# change passwords with htpasswd. Users *only* appearing
# in htpasswd will also be loaded and assigned to the role
# they are getting according to the multisite old-style
# configuration variables.
def readlines(f):
try:
return file(f)
except IOError:
return []
# FIXME TODO: Consolidate with htpasswd user connector
filename = cmk.paths.htpasswd_file
for line in readlines(filename):
line = line.strip()
if ':' in line:
id, password = line.strip().split(":")[:2]
id = id.decode("utf-8")
if password.startswith("!"):
locked = True
password = password[1:]
else:
locked = False
if id in result:
result[id]["password"] = password
result[id]["locked"] = locked
else:
# Create entry if this is an admin user
new_user = {
"roles" : config.roles_of_user(id),
"password" : password,
"locked" : False,
}
result[id] = new_user
# Make sure that the user has an alias
result[id].setdefault("alias", id)
# Other unknown entries will silently be dropped. Sorry...
# Now read the serials, only process for existing users
serials_file = '%s/auth.serials' % os.path.dirname(cmk.paths.htpasswd_file)
for line in readlines(serials_file):
line = line.strip()
if ':' in line:
user_id, serial = line.split(':')[:2]
user_id = user_id.decode("utf-8")
if user_id in result:
result[user_id]['serial'] = saveint(serial)
# Now read the user specific files
dir = cmk.paths.var_dir + "/web/"
for d in os.listdir(dir):
if d[0] != '.':
id = d.decode("utf-8")
# read special values from own files
if id in result:
for attr, conv_func in [
('num_failed_logins', saveint),
('last_pw_change', saveint),
('last_seen', savefloat),
('enforce_pw_change', lambda x: bool(saveint(x))),
('idle_timeout', convert_idle_timeout),
('session_id', convert_session_info),
]:
val = load_custom_attr(id, attr, conv_func)
if val != None:
result[id][attr] = val
# read automation secrets and add them to existing
# users or create new users automatically
try:
secret = file(dir + d + "/automation.secret").read().strip()
except IOError:
secret = None
if secret:
if id in result:
result[id]["automation_secret"] = secret
else:
result[id] = {
"roles" : ["guest"],
"automation_secret" : secret,
}
# populate the users cache
html.set_cache('users', result)
return result
def custom_attr_path(userid, key):
return cmk.paths.var_dir + "/web/" + make_utf8(userid) + "/" + key + ".mk"
def load_custom_attr(userid, key, conv_func, default = None):
path = custom_attr_path(userid, key)
try:
return conv_func(file(path).read().strip())
except IOError:
return default
def save_custom_attr(userid, key, val):
path = custom_attr_path(userid, key)
make_nagios_directory(os.path.dirname(path))
create_user_file(path, 'w').write('%s\n' % val)
def remove_custom_attr(userid, key):
try:
os.unlink(custom_attr_path(userid, key))
except OSError:
pass # Ignore non existing files
def get_online_user_ids():
online_threshold = time.time() - config.user_online_maxage
users = []
for user_id, user in load_users(lock = False).items():
if user.get('last_seen', 0) >= online_threshold:
users.append(user_id)
return users
def split_dict(d, keylist, positive):
return dict([(k,v) for (k,v) in d.items() if (k in keylist) == positive])
def save_users(profiles):
# Add custom macros
core_custom_macros = [ k for k,o in user_attributes.items() if o.get('add_custom_macro') ]
for user in profiles.keys():
for macro in core_custom_macros:
if macro in profiles[user]:
profiles[user]['_'+macro] = profiles[user][macro]
multisite_custom_values = [ k for k,v in user_attributes.items() if v["domain"] == "multisite" ]
# Keys not to put into contact definitions for Check_MK
non_contact_keys = [
"roles",
"password",
"locked",
"automation_secret",
"language",
"serial",
"connector",
"num_failed_logins",
"enforce_pw_change",
"last_pw_change",
"last_seen",
"idle_timeout",
] + multisite_custom_values
# Keys to put into multisite configuration
multisite_keys = [
"roles",
"locked",
"automation_secret",
"alias",
"language",
"connector",
] + multisite_custom_values
# Remove multisite keys in contacts.
contacts = dict(
e for e in
[ (id, split_dict(user, non_contact_keys + non_contact_attributes(user.get('connector')), False))
for (id, user)
in profiles.items() ])
# Only allow explicitely defined attributes to be written to multisite config
users = {}
for uid, profile in profiles.items():
users[uid] = dict([ (p, val)
for p, val in profile.items()
if p in multisite_keys + multisite_attributes(profile.get('connector'))])
# Check_MK's monitoring contacts
filename = root_dir + "contacts.mk.new"
out = create_user_file(filename, "w")
out.write("# Written by Multisite UserDB\n# encoding: utf-8\n\n")
out.write("contacts.update(\n%s\n)\n" % pprint.pformat(contacts))
out.close()
os.rename(filename, filename[:-4])
# Users with passwords for Multisite
filename = multisite_dir + "users.mk.new"
make_nagios_directory(multisite_dir)
out = create_user_file(filename, "w")
out.write("# Written by Multisite UserDB\n# encoding: utf-8\n\n")
out.write("multisite_users = \\\n%s\n" % pprint.pformat(users))
out.close()
os.rename(filename, filename[:-4])
# Execute user connector save hooks
hook_save(profiles)
# Write out the users serials
serials_file = '%s/auth.serials.new' % os.path.dirname(cmk.paths.htpasswd_file)
rename_file = True
try:
out = create_user_file(serials_file, "w")
except:
rename_file = False
out = create_user_file(serials_file[:-4], "w")
for user_id, user in profiles.items():
out.write('%s:%d\n' % (make_utf8(user_id), user.get('serial', 0)))
out.close()
if rename_file:
os.rename(serials_file, serials_file[:-4])
# Write user specific files
for user_id, user in profiles.items():
user_dir = cmk.paths.var_dir + "/web/" + user_id
make_nagios_directory(user_dir)
# authentication secret for local processes
auth_file = user_dir + "/automation.secret"
if "automation_secret" in user:
create_user_file(auth_file, "w").write("%s\n" % user["automation_secret"])
else:
remove_user_file(auth_file)
# Write out user attributes which are written to dedicated files in the user
# profile directory. The primary reason to have separate files, is to reduce
# the amount of data to be loaded during regular page processing
save_custom_attr(user_id, 'serial', str(user.get('serial', 0)))
save_custom_attr(user_id, 'num_failed_logins', str(user.get('num_failed_logins', 0)))
save_custom_attr(user_id, 'enforce_pw_change', str(int(user.get('enforce_pw_change', False))))
save_custom_attr(user_id, 'last_pw_change', str(user.get('last_pw_change', int(time.time()))))
if "idle_timeout" in user:
save_custom_attr(user_id, "idle_timeout", user["idle_timeout"])
else:
remove_custom_attr(user_id, "idle_timeout")
# Write out the last seent time
if 'last_seen' in user:
save_custom_attr(user_id, 'last_seen', repr(user['last_seen']))
save_cached_profile(user_id, user, multisite_keys, non_contact_keys)
# During deletion of users we don't delete files which might contain user settings
# and e.g. customized views which are not easy to reproduce. We want to keep the
# files which are the result of a lot of work even when e.g. the LDAP sync deletes
# a user by accident. But for some internal files it is ok to delete them.
#
# Be aware: The user_exists() function relies on these files to be deleted.
profile_files_to_delete = [
"automation.secret",
"transids.mk",
"serial.mk",
]
dir = cmk.paths.var_dir + "/web"
for user_dir in os.listdir(cmk.paths.var_dir + "/web"):
if user_dir not in ['.', '..'] and user_dir.decode("utf-8") not in profiles:
entry = dir + "/" + user_dir
if not os.path.isdir(entry):
continue
for to_delete in profile_files_to_delete:
if os.path.exists(entry + '/' + to_delete):
os.unlink(entry + '/' + to_delete)
# Release the lock to make other threads access possible again asap
# This lock is set by load_users() only in the case something is expected
# to be written (like during user syncs, wato, ...)
release_lock(root_dir + "contacts.mk")
# populate the users cache
html.set_cache('users', profiles)
# Call the users_saved hook
hooks.call("users-saved", profiles)
def rewrite_users():
users = load_users(lock=True)
save_users(users)
def create_cmk_automation_user():
secret = gen_id()
users = load_users(lock=True)
users["automation"] = {
'alias' : u"Check_MK Automation - used for calling web services",
'contactgroups' : [],
'automation_secret' : secret,
'password' : encrypt_password(secret),
'roles' : ['admin'],
'locked' : False,
'serial' : 0,
'email' : '',
'pager' : '',
'notifications_enabled' : False,
}
save_users(users)
def save_cached_profile(user_id, user, multisite_keys, non_contact_keys):
# Only save contact AND multisite attributes to the profile. Not the
# infos that are stored in the custom attribute files.
cache = {}
for key in user.keys():
if key in multisite_keys or key not in non_contact_keys:
cache[key] = user[key]
config.save_user_file("cached_profile", cache, user=user_id)
def load_cached_profile():
return config.user.load_file("cached_profile", None)
def contactgroups_of_user(user_id):
user = load_cached_profile()
if user == None:
# No cached profile present. Load all users to get the users data
user = load_users(lock=False)[user_id]
return user.get("contactgroups", [])
def convert_idle_timeout(value):
return value != "False" and int(value) or False
#.
# .-Roles----------------------------------------------------------------.
# | ____ _ |
# | | _ \ ___ | | ___ ___ |
# | | |_) / _ \| |/ _ \/ __| |
# | | _ < (_) | | __/\__ \ |
# | |_| \_\___/|_|\___||___/ |
# | |
# +----------------------------------------------------------------------+
def load_roles():
# Fake builtin roles into user roles.
builtin_role_names = { # Default names for builtin roles
"admin" : _("Administrator"),
"user" : _("Normal monitoring user"),
"guest" : _("Guest user"),
}
roles = dict([(id, {
"alias" : builtin_role_names.get(id, id),
"permissions" : {}, # use default everywhere
"builtin": True})
for id in config.builtin_role_ids ])
filename = multisite_dir + "roles.mk"
try:
vars = { "roles" : roles }
execfile(filename, vars, vars)
# Make sure that "general." is prefixed to the general permissions
# (due to a code change that converted "use" into "general.use", etc.
for role in roles.values():
for pname, pvalue in role["permissions"].items():
if "." not in pname:
del role["permissions"][pname]
role["permissions"]["general." + pname] = pvalue
# Reflect the data in the roles dict kept in the config module needed
# for instant changes in current page while saving modified roles.
# Otherwise the hooks would work with old data when using helper
# functions from the config module
config.roles.update(vars['roles'])
return vars["roles"]
except IOError:
return roles # Use empty structure, not existing file is ok!
except Exception, e:
if config.debug:
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
else:
logger(LOG_ERR, 'load_roles: Problem while loading roles (%s - %s). '
'Initializing structure...' % (filename, e))
return roles
#.
# .-Groups---------------------------------------------------------------.
# | ____ |
# | / ___|_ __ ___ _ _ _ __ ___ |
# | | | _| '__/ _ \| | | | '_ \/ __| |
# | | |_| | | | (_) | |_| | |_) \__ \ |
# | \____|_| \___/ \__,_| .__/|___/ |
# | |_| |
# +----------------------------------------------------------------------+
# TODO: Contact groups are fine here, but service / host groups?
def load_group_information():
try:
# Load group information from Check_MK world
vars = {}
for what in ["host", "service", "contact" ]:
vars["define_%sgroups" % what] = {}
filename = root_dir + "groups.mk"
try:
execfile(filename, vars, vars)
except IOError:
return {} # skip on not existing file
# Now load information from the Web world
multisite_vars = {}
for what in ["host", "service", "contact" ]:
multisite_vars["multisite_%sgroups" % what] = {}
filename = multisite_dir + "groups.mk"
try:
execfile(filename, multisite_vars, multisite_vars)
except IOError:
pass
# Merge information from Check_MK and Multisite worlds together
groups = {}
for what in ["host", "service", "contact" ]:
groups[what] = {}
for id, alias in vars['define_%sgroups' % what].items():
groups[what][id] = {
'alias': alias
}
if id in multisite_vars['multisite_%sgroups' % what]:
groups[what][id].update(multisite_vars['multisite_%sgroups' % what][id])
return groups
except Exception, e:
if config.debug:
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
else:
logger(LOG_ERR, 'load_group_information: Problem while loading groups (%s - %s). '
'Initializing structure...' % (filename, e))
return {}
class GroupChoice(DualListChoice):
def __init__(self, what, **kwargs):
DualListChoice.__init__(self, **kwargs)
self.what = what
self._choices = lambda: self.load_groups()
def load_groups(self):
all_groups = load_group_information()
this_group = all_groups.get(self.what, {})
return [ (k, t['alias'] and t['alias'] or k) for (k, t) in this_group.items() ]
#.
# .-Custom-Attrs.--------------------------------------------------------.
# | ____ _ _ _ _ |
# | / ___| _ ___| |_ ___ _ __ ___ / \ | |_| |_ _ __ ___ |
# | | | | | | / __| __/ _ \| '_ ` _ \ _____ / _ \| __| __| '__/ __| |
# | | |__| |_| \__ \ || (_) | | | | | |_____/ ___ \ |_| |_| | \__ \_ |
# | \____\__,_|___/\__\___/|_| |_| |_| /_/ \_\__|\__|_| |___(_) |
# | |
# +----------------------------------------------------------------------+
# | Mange custom attributes of users (in future hosts etc.) |
# '----------------------------------------------------------------------'
# TODO: userdb is a bad place for this when it manages user and host attributes!
# Maybe move to own module?
def load_custom_attrs():
try:
filename = multisite_dir + "custom_attrs.mk"
if not os.path.exists(filename):
return {}
vars = {
'wato_user_attrs': [],
'wato_host_attrs': [],
}
execfile(filename, vars, vars)
attrs = {}
for what in [ "user", "host" ]:
attrs[what] = vars.get("wato_%s_attrs" % what, [])
return attrs
except Exception, e:
if config.debug:
raise
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
def declare_custom_user_attrs():
all_attrs = load_custom_attrs()
attrs = all_attrs.setdefault('user', [])
for attr in attrs:
vs = globals()[attr['type']](title = attr['title'], help = attr['help'])
declare_user_attribute(attr['name'], vs,
user_editable = attr['user_editable'],
show_in_table = attr.get('show_in_table', False),
topic = attr.get('topic', 'personal'),
add_custom_macro = attr.get('add_custom_macro', False )
)
#.
# .--ConnectorCfg--------------------------------------------------------.
# | ____ _ ____ __ |
# | / ___|___ _ __ _ __ ___ ___| |_ ___ _ __ / ___|/ _| __ _ |
# | | | / _ \| '_ \| '_ \ / _ \/ __| __/ _ \| '__| | | |_ / _` | |
# | | |__| (_) | | | | | | | __/ (__| || (_) | | | |___| _| (_| | |
# | \____\___/|_| |_|_| |_|\___|\___|\__\___/|_| \____|_| \__, | |
# | |___/ |
# +----------------------------------------------------------------------+
# | The user can enable and configure a list of user connectors which |
# | are then used by the userdb to fetch user / group information from |
# | external sources like LDAP servers. |
# '----------------------------------------------------------------------'
def load_connection_config():
user_connections = []
filename = multisite_dir + "user_connections.mk"
if not os.path.exists(filename):
return user_connections
try:
context = {
"user_connections": user_connections,
}
execfile(filename, context, context)
return context["user_connections"]
except Exception, e:
if config.debug:
raise MKGeneralException(_("Cannot read configuration file %s: %s") %
(filename, e))
return user_connections
def save_connection_config(connections):
make_nagios_directory(multisite_dir)
out = create_user_file(multisite_dir + "user_connections.mk", "w")
out.write("# Written by Multisite UserDB\n# encoding: utf-8\n\n")
out.write("user_connections = \\\n%s\n\n" % pprint.pformat(connections))
#.
# .--ConnectorAPI--------------------------------------------------------.
# | ____ _ _ ____ ___ |
# | / ___|___ _ __ _ __ ___ ___| |_ ___ _ __ / \ | _ \_ _| |
# | | | / _ \| '_ \| '_ \ / _ \/ __| __/ _ \| '__/ _ \ | |_) | | |
# | | |__| (_) | | | | | | | __/ (__| || (_) | | / ___ \| __/| | |
# | \____\___/|_| |_|_| |_|\___|\___|\__\___/|_|/_/ \_\_| |___| |
# | |
# +----------------------------------------------------------------------+
# | Implements the base class for User Connector classes. It implements |
# | basic mechanisms and default methods which might/should be |
# | overridden by the specific connector classes. |
# '----------------------------------------------------------------------'
# FIXME: How to declare methods/attributes forced to be overridden?
class UserConnector(object):
def __init__(self, config):
super(UserConnector, self).__init__()
self._config = config
@classmethod
def type(self):
return None
# The string representing this connector to humans
@classmethod
def title(self):
return None
@classmethod
def short_title(self):
return _('htpasswd')
#
# USERDB API METHODS
#
@classmethod
def migrate_config(self):
pass
# Optional: Hook function can be registered here to be executed
# to validate a login issued by a user.
# Gets parameters: username, password
# Has to return either:
# '<user_id>' -> Login succeeded
# False -> Login failed
# None -> Unknown user
def check_credentials(self, user_id, password):
return None
# Optional: Hook function can be registered here to be executed
# to synchronize all users.
def do_sync(self, add_to_changelog, only_username):
pass
# Optional: Tells whether or not the given user is currently
# locked which would mean that he is not allowed to login.
def is_locked(self, user_id):
return False
# Optional: Hook function can be registered here to be executed
# on each call to the multisite cron job page which is normally
# executed once a minute.
def on_cron_job(self):
pass
# Optional: Hook function can be registered here to be xecuted
# to save all users.
def save_users(self, users):
pass
# List of user attributes locked for all users attached to this
# connection. Those locked attributes are read-only in WATO.
def locked_attributes(self):
return []
def multisite_attributes(self):
return []
def non_contact_attributes(self):
return []
#.
# .-Hooks----------------------------------------------------------------.
# | _ _ _ |
# | | | | | ___ ___ | | _____ |
# | | |_| |/ _ \ / _ \| |/ / __| |
# | | _ | (_) | (_) | <\__ \ |
# | |_| |_|\___/ \___/|_|\_\___/ |
# | |
# +----------------------------------------------------------------------+
# This hook is called to validate the login credentials provided by a user
def hook_login(username, password):
for connection_id, connection in active_connections():
result = connection.check_credentials(username, password)
# None -> User unknown, means continue with other connectors
# '<user_id>' -> success
# False -> failed
if result not in [ False, None ]:
username = result
if type(username) not in [ str, unicode ]:
raise MKInternalError(_("The username returned by the %s "
"connector is not of type string (%r).") % (connection_id, username))
# Check wether or not the user exists (and maybe create it)
create_non_existing_user(connection_id, username)
# Now, after successfull login (and optional user account
# creation), check wether or not the user is locked.
# In e.g. htpasswd connector this is checked by validating the
# password against the hash in the htpasswd file prefixed with
# a "!". But when using other conectors it might be neccessary
# to validate the user "locked" attribute.
if connection.is_locked(username):
return False # The account is locked
return result
elif result == False:
return result
def show_exception(connection_id, title, e, debug=True):
html.show_error(
"<b>" + connection_id + ' - ' + title + "</b>"
"<pre>%s</pre>" % (debug and traceback.format_exc() or e)
)
# Hook function can be registered here to be executed to synchronize all users.
# Is called on:
# a) before rendering the user management page in WATO
# b) a user is created during login (only for this user)
# c) Before activating the changes in WATO
def hook_sync(connection_id = None, add_to_changelog = False, only_username = None, raise_exc = False):
if connection_id:
connections = [ (connection_id, get_connection(connection_id)) ]
else:
connections = active_connections()
no_errors = True
for connection_id, connection in connections:
try:
connection.do_sync(add_to_changelog, only_username)
except MKLDAPException, e:
if raise_exc:
raise
show_exception(connection_id, _("Error during sync"), e, debug=config.debug)
no_errors = False
except Exception, e:
if raise_exc:
raise
show_exception(connection_id, _("Error during sync"), e)
no_errors = False
return no_errors
# Hook function can be registered here to be executed during saving of the
# new user construct
def hook_save(users):
for connection_id, connection in active_connections():
try:
connection.save_users(users)
except Exception, e:
if config.debug:
raise
else:
show_exception(connection_id, _("Error during saving"), e)
# This function registers general stuff, which is independet of the single
# connectors to each page load. It is exectued AFTER all other connections jobs.
def general_userdb_job():
# Working around the problem that the auth.php file needed for multisite based
# authorization of external addons might not exist when setting up a new installation
# We assume: Each user must visit this login page before using the multisite based
# authorization. So we can easily create the file here if it is missing.
# This is a good place to replace old api based files in the future.
auth_php = cmk.paths.var_dir + '/wato/auth/auth.php'
if not os.path.exists(auth_php) or os.path.getsize(auth_php) == 0:
create_auth_file("page_hook", load_users())
# Create initial auth.serials file, same issue as auth.php above
serials_file = '%s/auth.serials' % os.path.dirname(cmk.paths.htpasswd_file)
if not os.path.exists(serials_file) or os.path.getsize(serials_file) == 0:
save_users(load_users(lock = True))
# Hook function can be registered here to execute actions on a "regular" base without
# user triggered action. This hook is called on each page load.
# Catch all exceptions and log them to apache error log. Let exceptions raise trough
# when debug mode is enabled.
def execute_userdb_job():
if not userdb_sync_job_enabled():
return
for connection_id, connection in active_connections():
try:
connection.on_cron_job()
except:
if config.debug:
raise
else:
logger(LOG_ERR, 'Exception (%s, userdb_job): %s' %
(connection_id, traceback.format_exc()))
general_userdb_job()
# Legacy option config.userdb_automatic_sync defaulted to "master".
# Can be: None: (no sync), "all": all sites sync, "master": only master site sync
# Take that option into account for compatibility reasons.
# For remote sites in distributed setups, the default is to do no sync.
def user_sync_default_config(site_name):
global_user_sync = transform_userdb_automatic_sync(config.userdb_automatic_sync)
if global_user_sync == "master":
import wato # FIXME: Cleanup!
if config.site_is_local(site_name) and not wato.is_wato_slave_site():
user_sync_default = "all"
else:
user_sync_default = None
else:
user_sync_default = global_user_sync
return user_sync_default
def user_sync_config():
# use global option as default for reading legacy options and on remote site
# for reading the value set by the WATO master site
default_cfg = user_sync_default_config(config.omd_site())
return config.site(config.omd_site()).get("user_sync", default_cfg)
def userdb_sync_job_enabled():
cfg = user_sync_config()
if cfg == None:
return False # not enabled at all
import wato # FIXME: Cleanup!
if cfg == "master" and wato.is_wato_slave_site():
return False
return True
def ajax_sync():
try:
hook_sync(add_to_changelog = False, raise_exc = True)
html.write('OK\n')
except Exception, e:
if config.debug:
raise
else:
html.write('ERROR %s\n' % e)
|
ypid-bot/check_mk
|
web/htdocs/userdb.py
|
Python
|
gpl-2.0
| 51,514
|
[
"VisIt"
] |
213eeffa017a6fe59fc929750d95511ae609e43bc47b1d1ae21a9fa4d269a268
|
"""
Seismic wavelets.
:copyright: 2021 Agile Geoscience
:license: Apache 2.0
"""
import warnings
from collections import namedtuple
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
from bruges.util import deprecated
def _get_time(duration, dt, sym=True):
"""
Make a time vector.
If `sym` is `True`, the time vector will have an odd number of samples,
and will be symmetric about 0. If it's False, and the number of samples
is even (e.g. duration = 0.016, dt = 0.004), then 0 will bot be center.
"""
# This business is to avoid some of the issues with `np.arange`:
# (1) unpredictable length and (2) floating point weirdness, like
# 1.234e-17 instead of 0. Not using `linspace` because figuring out
# the length and offset gave me even more of a headache than this.
n = int(duration / dt)
odd = n % 2
k = int(10**-np.floor(np.log10(dt)))
dti = int(k * dt) # integer dt
if (odd and sym):
t = np.arange(n)
elif (not odd and sym):
t = np.arange(n + 1)
elif (odd and not sym):
t = np.arange(n)
elif (not odd and not sym):
t = np.arange(n) - 1
t -= t[-1] // 2
return dti * t / k
def _generic(func, duration, dt, f, t=None, return_t=True, taper='blackman', sym=True):
"""
Generic wavelet generator: applies a window to a continuous function.
Args:
func (function): The continuous function, taking t, f as arguments.
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Dominant frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray. wavelet(s) with centre frequency f sampled on t. If you
passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
f = np.asanyarray(f).reshape(-1, 1)
# Compute time domain response.
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed."
warnings.warn(m, UserWarning, stacklevel=2)
t = np.array(t)
t[t == 0] = 1e-12 # Avoid division by zero.
f[f == 0] = 1e-12 # Avoid division by zero.
w = np.squeeze(func(t, f))
if taper is not None:
tapers = {
'bartlett': np.bartlett,
'blackman': np.blackman,
'hamming': np.hamming,
'hanning': np.hanning,
'none': lambda _: 1,
}
taper = tapers.get(taper, taper)
w *= taper(t.size)
if return_t:
Wavelet = namedtuple('Wavelet', ['amplitude', 'time'])
return Wavelet(w, t)
else:
return w
def sinc(duration, dt, f, t=None, return_t=True, taper='blackman', sym=True):
"""
sinc function centered on t=0, with a dominant frequency of f Hz.
If you pass a 1D array of frequencies, you get a wavelet bank in return.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.sinc(0.256, 0.002, 40)
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Dominant frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
Returns:
ndarray. sinc wavelet(s) with centre frequency f sampled on t. If
you passed `return_t = True` then a tuple of (wavelet, t) is returned.
"""
def func(t_, f_):
return np.sin(2*np.pi*f_*t_) / (2*np.pi*f_*t_)
return _generic(func, duration, dt, f, t, return_t, taper, sym=sym)
def cosine(duration, dt, f, t=None, return_t=True, taper='gaussian', sigma=None, sym=True):
"""
With the default Gaussian window, equivalent to a 'modified Morlet'
also sometimes called a 'Gabor' wavelet. The `bruges.filters.gabor`
function returns a similar shape, but with a higher mean frequancy,
somewhere between a Ricker and a cosine (pure tone).
If you pass a 1D array of frequencies, you get a wavelet bank in return.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.cosine(0.256, 0.002, 40)
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Dominant frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
sigma (float): Width of the default Gaussian window, in seconds.
Defaults to 1/8 of the duration.
Returns:
ndarray. sinc wavelet(s) with centre frequency f sampled on t. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if sigma is None:
sigma = duration / 8
def func(t_, f_):
return np.cos(2 * np.pi * f_ * t_)
def taper(length):
return scipy.signal.gaussian(length, sigma/dt)
return _generic(func, duration, dt, f, t, return_t, taper, sym=sym)
def gabor(duration, dt, f, t=None, return_t=True, sym=True):
"""
Generates a Gabor wavelet with a peak frequency f0 at time t.
https://en.wikipedia.org/wiki/Gabor_wavelet
If you pass a 1D array of frequencies, you get a wavelet bank in return.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.gabor(0.256, 0.002, 40)
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Centre frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
Returns:
ndarray. Gabor wavelet(s) with centre frequency f sampled on t. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
def func(t_, f_):
return np.exp(-2 * f_**2 * t_**2) * np.cos(2 * np.pi * f_ * t_)
return _generic(func, duration, dt, f, t, return_t, sym=sym)
def ricker(duration, dt, f, t=None, return_t=True, sym=True):
"""
Also known as the mexican hat wavelet, models the function:
.. math::
A = (1 - 2 \pi^2 f^2 t^2) e^{-\pi^2 f^2 t^2}
If you pass a 1D array of frequencies, you get a wavelet bank in return.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.ricker(0.256, 0.002, 40)
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Centre frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray. Ricker wavelet(s) with centre frequency f sampled on t. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
f = np.asanyarray(f).reshape(-1, 1)
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed."
warnings.warn(m, UserWarning, stacklevel=2)
pft2 = (np.pi * f * t)**2
w = np.squeeze((1 - (2 * pft2)) * np.exp(-pft2))
if return_t:
RickerWavelet = namedtuple('RickerWavelet', ['amplitude', 'time'])
return RickerWavelet(w, t)
else:
return w
def klauder(duration, dt, f,
autocorrelate=True,
t=None,
return_t=True,
taper='blackman',
sym=True,
**kwargs):
"""
By default, gives the autocorrelation of a linear frequency modulated
wavelet (sweep). Uses scipy.signal.chirp, adding dimensions as necessary.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.klauder(0.256, 0.002, [12, 48])
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): is the sample interval in seconds (usually 0.001, 0.002,
or 0.004)
f (array-like): Upper and lower frequencies. Any sequence like (f1, f2).
A list of lists will create a wavelet bank.
autocorrelate (bool): Whether to autocorrelate the sweep(s) to create
a wavelet. Default is `True`.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
**kwargs: Further arguments are passed to scipy.signal.chirp. They are
`method` ('linear','quadratic','logarithmic'), `phi` (phase offset
in degrees), and `vertex_zero`.
Returns:
ndarray: The waveform. If you passed `return_t=True` then a tuple of
(wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed. "
m += "Pass None to suppress this warning."
warnings.warn(m, UserWarning, stacklevel=2)
t0, t1 = -duration/2, duration/2
f = np.asanyarray(f).reshape(2, -1)
f1, f2 = f
c = [scipy.signal.chirp(t, f1_+(f2_-f1_)/2., t1, f2_, **kwargs)
for f1_, f2_
in zip(f1, f2)]
if autocorrelate:
w = [np.correlate(c_, c_, mode='same') for c_ in c]
w = np.squeeze(w) / np.amax(w)
if taper:
funcs = {
'bartlett': np.bartlett,
'blackman': np.blackman,
'hamming': np.hamming,
'hanning': np.hanning,
'none': lambda x: x,
}
func = funcs.get(taper, taper)
w *= func(t.size)
if return_t:
Sweep = namedtuple('Sweep', ['amplitude', 'time'])
return Sweep(w, t)
else:
return w
sweep = klauder
def _ormsby(t, f1, f2, f3, f4):
"""
Compute a single Ormsby wavelet. Private function.
"""
def numerator(f, t):
"""The numerator in the Ormsby equation."""
return (np.sinc(f * t)**2) * ((np.pi * f) ** 2)
pf43 = (np.pi * f4) - (np.pi * f3)
pf21 = (np.pi * f2) - (np.pi * f1)
w = ((numerator(f4, t)/pf43) - (numerator(f3, t)/pf43) -
(numerator(f2, t)/pf21) + (numerator(f1, t)/pf21))
return np.squeeze(w) / np.amax(w)
def ormsby(duration, dt, f, t=None, return_t=True, sym=True):
"""
The Ormsby wavelet requires four frequencies which together define a
trapezoid shape in the spectrum. The Ormsby wavelet has several sidelobes,
unlike Ricker wavelets.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.ormsby(0.256, 0.002, [5, 10, 40, 80])
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (usually 0.001, 0.002,
or 0.004).
f (array-like): Sequence of form (f1, f2, f3, f4), or list of lists of
frequencies, which will return a 2D wavelet bank.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray: A vector containing the Ormsby wavelet, or a bank of them. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
try:
f = np.atleast_2d(f).reshape(-1, 4)
except ValueError as e:
raise ValueError("The last dimension of the frequency array must be of size 4.")
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed."
warnings.warn(m, UserWarning, stacklevel=2)
w = np.squeeze([_ormsby(t, *fs) for fs in f])
if return_t:
OrmsbyWavelet = namedtuple('OrmsbyWavelet', ['amplitude', 'time'])
return OrmsbyWavelet(w, t)
else:
return w
def _ormsby_fft(duration, dt, f, P, sym):
fs = 1 / dt
fN = fs // 2
n = int(duration / dt)
a = map(lambda p: 10**(p/20), P)
# Linear interpolation of points.
x = np.linspace(0, int(fN), int(10*n))
xp = [ 0.] + list(f) + [fN]
fp = [0., 0.] + list(a) + [0., 0.]
W = np.interp(x, xp, fp)
# Compute inverse FFT.
w_ = np.fft.fftshift(np.fft.irfft(W))
L = int(w_.size // 2)
normalize = lambda d: d / np.max(abs(d))
return normalize(w_[L-n//2:L+n//2+sym])
def ormsby_fft(duration, dt, f, P=(0, 0), return_t=True, sym=True):
"""
Non-white Ormsby, with arbitary amplitudes.
Can use as many points as you like. The power of f1 and f4 is assumed to be 0,
so you only need to provide p2 and p3 (the corners). (You can actually provide
as many f points as you like, as long as there are n - 2 matching p points.)
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.ormsby(0.256, 0.002, [5, 10, 40, 80])
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (usually 0.001, 0.002,
or 0.004).
f (array-like): Sequence of form (f1, f2, f3, f4), or list of lists of
frequencies, which will return a 2D wavelet bank.
P (tuple): The power of the f2 and f3 frequencies, in relative dB.
(The magnitudes of f1 and f4 are assumed to be -∞ dB, i.e. a
magnitude of 0.) The default power values of (0, 0) results in a
trapezoidal spectrum and a conventional Ormsby wavelet. Pass, e.g.
(0, -15) for a 'pink' wavelet, with more energy in the lower
frequencies.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray: A vector containing the Ormsby wavelet, or a bank of them. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
try:
f = np.atleast_2d(f).reshape(-1, 4)
except ValueError as e:
raise ValueError("The last dimension of the frequency array must be of size 4.")
w = np.squeeze([_ormsby_fft(duration, dt, fs, P, sym) for fs in f])
t = _get_time(duration, dt, sym=sym)
if return_t:
OrmsbyWavelet = namedtuple('OrmsbyWavelet', ['amplitude', 'time'])
return OrmsbyWavelet(w, t)
else:
return w
def berlage(duration, dt, f, n=2, alpha=180, phi=-np.pi/2, t=None, return_t=True, sym=True):
r"""
Generates a Berlage wavelet with a peak frequency f. Implements
.. math::
w(t) = AH(t) t^n \mathrm{e}^{- \alpha t} \cos(2 \pi f_0 t + \phi_0)
as described in Aldridge, DF (1990), The Berlage wavelet, GEOPHYSICS
55 (11), p 1508-1511. Berlage wavelets are causal, minimum phase and
useful for modeling marine airgun sources.
If you pass a 1D array of frequencies, you get a wavelet bank in return.
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.berlage(0.256, 0.002, 40)
plt.plot(t, w)
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (array-like): Centre frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
n (float): The time exponent; non-negative and real.
alpha(float): The exponential decay factor; non-negative and real.
phi (float): The phase.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray. Berlage wavelet(s) with centre frequency f sampled on t. If
you passed `return_t=True` then a tuple of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
f = np.asanyarray(f).reshape(-1, 1)
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed."
warnings.warn(m, UserWarning, stacklevel=2)
H = np.heaviside(t, 0)
w = H * t**n * np.exp(-alpha * t) * np.cos(2 * np.pi * f * t + phi)
w = np.squeeze(w) / np.max(np.abs(w))
if return_t:
BerlageWavelet = namedtuple('BerlageWavelet', ['amplitude', 'time'])
return BerlageWavelet(w, t)
else:
return w
def generalized(duration, dt, f, u=2, t=None, return_t=True, imag=False, sym=True):
"""
Wang's generalized wavelet, of which the Ricker is a special case where
u = 2. The parameter u is the order of the time-domain derivative, which
can be a fractional derivative.
As given by Wang (2015), Generalized seismic wavelets. GJI 203, p 1172-78.
DOI: https://doi.org/10.1093/gji/ggv346. I am using the (more accurate)
frequency domain method (eq 4 in that paper).
.. plot::
import matplotlib.pyplot as plt
import bruges
w, t = bruges.filters.generalized(0.256, 0.002, 40, u=1.0)
plt.plot(t, w)
Args:
duration (float): The length of the wavelet, in s.
dt (float): The time sample interval in s.
f (float or array-like): The frequency or frequencies, in Hertz.
u (float or array-like): The fractional derivative parameter u.
t (array-like): The time series to evaluate at, if you don't want one
to be computed. If you pass `t` then `duration` and `dt` will be
ignored, so we recommend passing `None` for those arguments.
return_t (bool): Whether to return the time basis array.
center (bool): Whether to center the wavelet on time 0.
imag (bool): Whether to return the imaginary component as well.
sym (bool): If True (default behaviour before v0.5) then the wavelet
is forced to have an odd number of samples and the central sample
is at 0 time.
Returns:
ndarray. If f and u are floats, the resulting wavelet has duration/dt
= A samples. If you give f as an array of length M and u as an
array of length N, then the resulting wavelet bank will have shape
(M, N, A). If f or u are floats, their size will be 1, and they
will be squeezed out: the bank is always squeezed to its minimum
number of dimensions. If you passed `return_t=True` then a tuple
of (wavelet, t) is returned.
"""
if not return_t:
m = "return_t is deprecated. In future releases, return_t will always be True."
warnings.warn(m, DeprecationWarning, stacklevel=2)
# Make sure we can do banks.
f = np.asanyarray(f).reshape(-1, 1)
u = np.asanyarray(u).reshape(-1, 1, 1)
# Compute time domain response.
if t is None:
t = _get_time(duration, dt, sym=sym)
else:
if (duration is not None) or (dt is not None):
m = "`duration` and `dt` are ignored when `t` is passed."
warnings.warn(m, UserWarning, stacklevel=2)
dt = t[1] - t[0]
duration = len(t) * dt
# Basics.
om0 = f * 2 * np.pi
u2 = u / 2
df = 1 / duration
nyquist = (1 / dt) / 2
nf = 1 + nyquist / df
t0 = duration / 2
om = 2 * np.pi * np.arange(0, nyquist, df)
# Compute the spectrum from Wang's eq 4.
exp1 = np.exp((-om**2 / om0**2) + u2)
exp2 = np.exp(-1j*om*t0 + 1j*np.pi * (1 + u2))
W = (u2**(-u2)) * (om**u / om0**u) * exp1 * exp2
w = np.fft.ifft(W, t.size)
if not imag:
w = w.real
# At this point the wavelet bank has the shape (u, f, a),
# where u is the size of u, f is the size of f, and a is
# the number of amplitude samples we generated.
w_max = np.max(np.abs(w), axis=-1)[:, :, None]
w = np.squeeze(w / w_max)
if return_t:
GeneralizedWavelet = namedtuple('GeneralizedWavelet', ['amplitude', 'time'])
return GeneralizedWavelet(w, t)
else:
return w
|
agile-geoscience/bruges
|
bruges/filters/wavelets.py
|
Python
|
apache-2.0
| 26,725
|
[
"Gaussian"
] |
86c684bd23d4d0ca0e6d4c7a48d93575dbe8818d694bbdd05899a31ba32718be
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
from pymatgen.util.testing import PymatgenTest
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import DistanceCutoffFloat
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import AngleCutoffFloat
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import CSMFloat
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import AdditionalConditionInt
from pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies import SimplestChemenvStrategy
class StrategyOptionsTest(PymatgenTest):
def test_options(self):
# DistanceCutoffFloat
with self.assertRaises(ValueError) as cm:
DistanceCutoffFloat(0.5)
self.assertEqual(str(cm.exception), 'Distance cutoff should be between 1.0 and +infinity')
dc1 = DistanceCutoffFloat(1.2)
dc1_dict = dc1.as_dict()
dc2 = DistanceCutoffFloat.from_dict(dc1_dict)
self.assertEqual(dc1, dc2)
# AngleCutoffFloat
with self.assertRaises(ValueError) as cm:
AngleCutoffFloat(1.2)
self.assertEqual(str(cm.exception), 'Angle cutoff should be between 0.0 and 1.0')
ac1 = AngleCutoffFloat(0.3)
ac1_dict = ac1.as_dict()
ac2 = AngleCutoffFloat.from_dict(ac1_dict)
self.assertEqual(ac1, ac2)
# CSMFloat
with self.assertRaises(ValueError) as cm:
CSMFloat(100.1)
self.assertEqual(str(cm.exception), 'Continuous symmetry measure limits should be between 0.0 and 100.0')
csm1 = CSMFloat(0.458)
csm1_dict = csm1.as_dict()
csm2 = CSMFloat.from_dict(csm1_dict)
self.assertEqual(csm1, csm2)
# AdditionalConditions
with self.assertRaises(ValueError) as cm:
AdditionalConditionInt(5)
self.assertEqual(str(cm.exception), 'Additional condition 5 is not allowed')
with self.assertRaises(ValueError) as cm:
AdditionalConditionInt(0.458)
self.assertEqual(str(cm.exception), 'Additional condition 0.458 is not an integer')
acd1 = AdditionalConditionInt(3)
acd1_dict = acd1.as_dict()
acd2 = AdditionalConditionInt.from_dict(acd1_dict)
self.assertEqual(acd1, acd2)
def test_strategies(self):
simplest_strategy = SimplestChemenvStrategy()
self.assertTrue(simplest_strategy.uniquely_determines_coordination_environments)
self.assertAlmostEqual(simplest_strategy.continuous_symmetry_measure_cutoff, 10.0)
self.assertAlmostEqual(simplest_strategy.distance_cutoff, 1.4)
self.assertAlmostEqual(simplest_strategy.angle_cutoff, 0.3)
simplest_strategy = SimplestChemenvStrategy(distance_cutoff=1.3, angle_cutoff=0.45,
continuous_symmetry_measure_cutoff=8.26)
self.assertAlmostEqual(simplest_strategy.continuous_symmetry_measure_cutoff, 8.26)
self.assertAlmostEqual(simplest_strategy.distance_cutoff, 1.3)
self.assertAlmostEqual(simplest_strategy.angle_cutoff, 0.45)
simplest_strategy.set_option('distance_cutoff', 1.5)
self.assertAlmostEqual(simplest_strategy.distance_cutoff, 1.5)
with self.assertRaises(ValueError) as cm:
simplest_strategy.set_option('distance_cutoff', 0.5)
self.assertEqual(str(cm.exception), 'Distance cutoff should be between 1.0 and +infinity')
simplest_strategy.set_option('angle_cutoff', 0.2)
self.assertAlmostEqual(simplest_strategy.angle_cutoff, 0.2)
with self.assertRaises(ValueError) as cm:
simplest_strategy.set_option('angle_cutoff', 1.5)
self.assertEqual(str(cm.exception), 'Angle cutoff should be between 0.0 and 1.0')
simplest_strategy.setup_options({'distance_cutoff': 1.4,
'additional_condition': 3,
'continuous_symmetry_measure_cutoff': 8.5})
self.assertAlmostEqual(simplest_strategy.distance_cutoff, 1.4)
self.assertAlmostEqual(simplest_strategy.continuous_symmetry_measure_cutoff, 8.5)
self.assertEqual(simplest_strategy.additional_condition, 3)
with self.assertRaises(ValueError) as cm:
simplest_strategy.setup_options({'continuous_symmetry_measure_cutoff': -0.1})
self.assertEqual(str(cm.exception), 'Continuous symmetry measure limits should be between 0.0 and 100.0')
with self.assertRaises(ValueError) as cm:
simplest_strategy.setup_options({'continuous_symmetry_measure_cutoff': 100.1})
self.assertEqual(str(cm.exception), 'Continuous symmetry measure limits should be between 0.0 and 100.0')
if __name__ == "__main__":
unittest.main()
|
Bismarrck/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/tests/test_chemenv_strategies.py
|
Python
|
mit
| 4,858
|
[
"pymatgen"
] |
0dcc599097c3873eafcb1143aaf6c9c4819346d2942991d1d66669f7682c7c8e
|
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import urllib2
import h5py
import tempfile
import random
import numpy as np
from PIL import Image
from StringIO import StringIO
import makeunitdb
from ndlib.ndtype import IMAGE, UINT8, UINT16
from params import Params
from postmethods import postNPZ, getNPZ, getHDF5, postHDF5, getURL, postBlosc, getBlosc
from postmethods import putAnnotation, getAnnotation, getURL, postURL
from ramonmethods import H5AnnotationFile, getH5id, makeAnno, getId, getField, setField
from test_settings import *
# Test Image
# Test_Image_Slice
# 1 - test_xy
# 2 - test_yz
# 3 - test_xz
# 4 - test_xy_incorrect
# Test_Image_Post
# 1 - test_npz
# 2 - test_npz_incorrect_region
# 3 - test_npz_incorrect_datatype
# 4 - test_hdf5
# 5 - test_hdf5_incorrect_region
# 6 - test_hdf5_incorrect_datatype
# 7 - test_npz_incorrect_channel
# 8 - test_hdf5_incorrect_channel
p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['unit_anno']
@pytest.mark.skipif(KV_ENGINE == REDIS, reason='Annotation not supported in Redis')
class Test_Neuron:
def setup_class(self):
"""Create the unittest database"""
makeunitdb.createTestDB(p.token, readonly=0)
def teardown_class (self):
"""Destroy the unittest database"""
makeunitdb.deleteTestDB(p.token)
def test_neuron (self):
"""Make a multiple segments that overlap and then query them as a neuron"""
# create neuron
makeAnno(p,5)
neuronid = p.annoid
# create annotations
for i in range(0,3):
# create annotations
makeAnno(p,4)
f = setField(p,'neuron',neuronid)
# add data
p.args = (3000,3100,4000,4100,100+2*i,100+2*i+3)
image_data = np.ones( [1,3,100,100], dtype=np.uint32 ) * p.annoid
response = postNPZ(p, image_data)
# get the neuron annotation
p.annoid = neuronid
p.field = 'tight_cutout'
h5ret = getAnnotation(p)
idgrp = h5ret.get(str(p.annoid))
# count the voxels to make sure they remapped correctly
assert ( np.unique(np.array(idgrp['CUTOUT'][:,:,:])) == [0,neuronid] ).all()
assert ( len(np.nonzero(np.array(idgrp['CUTOUT'][:,:,:]))[0]) == 70000 )
|
neurodata/ndstore
|
test/test_neuron.py
|
Python
|
apache-2.0
| 2,722
|
[
"NEURON"
] |
2d7832d68eb7f87fe2f7dff2db7aa025b048d0d17c46e6c3fa9cb2689cc11ab0
|
# -*- coding: utf-8 -*-
import os
import tempfile
import types
import json
from mock import patch
from nose.tools import eq_
from helper import TestCase
import appvalidator.constants
from appvalidator.errorbundle import ErrorBundle
from appvalidator.specs.webapps import WebappSpec
import appvalidator.webapp
class TestWebappAccessories(TestCase):
"""
Test that helper functions for webapp manifests work as they are intended
to.
"""
def test_path(self):
"""Test that paths are tested properly for allowances."""
s = WebappSpec("{}", ErrorBundle())
eq_(s._path_valid("*"), False)
eq_(s._path_valid("*", can_be_asterisk=True), True)
eq_(s._path_valid("/foo/bar"), False)
eq_(s._path_valid("/foo/bar", can_be_absolute=True), True)
eq_(s._path_valid("//foo/bar"), False)
eq_(s._path_valid("//foo/bar", can_be_absolute=True), False)
eq_(s._path_valid("//foo/bar", can_be_relative=True), False)
eq_(s._path_valid("http://asdf/"), False)
eq_(s._path_valid("https://asdf/"), False)
eq_(s._path_valid("ftp://asdf/"), False)
eq_(s._path_valid("http://asdf/", can_have_protocol=True), True)
eq_(s._path_valid("https://asdf/", can_have_protocol=True), True)
# No FTP for you!
eq_(s._path_valid("ftp://asdf/", can_have_protocol=True), False)
eq_(s._path_valid("data:asdf"), False)
eq_(s._path_valid("data:asdf", can_be_data=True), True)
class WebappBaseTestCase(TestCase):
def setUp(self):
super(WebappBaseTestCase, self).setUp()
self.listed = False
descr = "Exciting Open Web development action!"
descr += (1024 - len(descr)) * "_"
self.data = {
"version": "1.0",
"name": "MozBall",
"description": descr,
"icons": {
"32": "/img/icon-32.png",
"128": "/img/icon-128.png",
},
"developer": {
"name": "Mozilla Labs",
"url": "http://mozillalabs.com"
},
"installs_allowed_from": [
"https://appstore.mozillalabs.com",
"HTTP://mozilla.com/AppStore"
],
"launch_path": "/index.html",
"locales": {
"es": {
"name": "Foo Bar",
"description": "¡Acción abierta emocionante del desarrollo",
"developer": {
"url": "http://es.mozillalabs.com/"
}
},
"it": {
"description": "Azione aperta emozionante di sviluppo di!",
"developer": {
"url": "http://it.mozillalabs.com/"
}
}
},
"default_locale": "en",
"screen_size": {
"min_width": "600",
"min_height": "300"
},
"required_features": [
"touch", "geolocation", "webgl"
],
"orientation": "landscape",
"fullscreen": "true",
"type": "web",
"precompile": [
"game.js",
"database.js"
],
}
self.resources = [("app_type", "web")]
def make_privileged(self):
self.resources = [("app_type", "privileged"),
("packaged", True)]
self.data["type"] = "privileged"
def analyze(self):
"""Run the webapp tests on the file."""
self.detected_type = appvalidator.constants.PACKAGE_WEBAPP
self.setup_err()
for resource, value in self.resources:
self.err.save_resource(resource, value)
with tempfile.NamedTemporaryFile(delete=False) as t:
if isinstance(self.data, types.StringTypes):
t.write(self.data)
else:
t.write(json.dumps(self.data))
name = t.name
appvalidator.webapp.detect_webapp(self.err, name)
os.unlink(name)
class TestWebapps(WebappBaseTestCase):
def test_pass(self):
"""Test that a bland webapp file throws no errors."""
self.analyze()
self.assert_silent()
output = json.loads(self.err.render_json())
assert "manifest" in output and output["manifest"]
def test_bom(self):
"""Test that a plain webapp with a BOM won't throw errors."""
self.setup_err()
appvalidator.webapp.detect_webapp(
self.err, "tests/resources/unicodehelper/utf8_webapp.json")
self.assert_silent()
def test_fail_parse(self):
"""Test that invalid JSON is reported."""
self.data = "}{"
self.analyze()
self.assert_failed(with_errors=True)
def test_missing_required(self):
"""Test that missing the name element is a bad thing."""
del self.data["name"]
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_name(self):
"""Test that the name element is a string."""
self.data["name"] = ["foo", "bar"]
self.analyze()
self.assert_failed(with_errors=True)
def test_long_name(self):
"""Test that long names are flagged for truncation in Gaia."""
self.data["name"] = "This is a long name."
self.analyze()
self.assert_failed(with_warnings=True)
def test_long_name_with_locale(self):
"""Test that long localized names are flagged for truncation in
Gaia."""
self.data["locales"]["es"]["name"] = "This is a long name."
self.analyze()
self.assert_failed(with_warnings=True)
def test_role(self):
"""Test that app may contain role element."""
self.data["role"] = "input"
self.analyze()
self.assert_silent()
def test_langpack_role_need_languages_target(self):
"""Test that a language-target is needed for langpacks."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req_cond", ))
def test_langpack_invalid_languages_target_type(self):
"""Test language-target type."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
"languages-target": ["2.2"], # Wrong type.
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_target_wrong_version_type(self):
"""Test that language-target version number has the correct type."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": 2.2 # Wrong type.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_target_wrong_version_value(self):
"""Test that language-target version number has the correct value."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.3" # Wrong value.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_value", ))
def test_langpack_invalid_languages_target(self):
"""Test that language-target manifest has the correct value."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://my.manifest.webapp": "2.2" # Manifest is incorrect.
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "not_allowed", ))
def test_langpack_role_need_languages_provided(self):
"""Test that a language-provided is needed for langpacks."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req_cond", ))
def test_langpack_invalid_languages_provided_should_not_be_empty(self):
"""Test that language-provided is not empty."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {}
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_languages_provided_language_should_be_dict(self):
"""Test that language-provided children are dicts."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": []
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_provided_need_revision(self):
"""Test that language-provided revision is present."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req", ))
def test_langpack_invalid_languages_provided_need_apps(self):
"""Test that language-provided apps is present."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "missing_req", ))
def test_langpack_invalid_languages_provided_apps(self):
"""Test that language-provided apps should be a dict."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": ["app://blah.gaiamobile.org/manifest.webapp"]
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_langpack_invalid_languages_provided_apps_empty(self):
"""Test that language-provided apps should be non-empty dict."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "empty", ))
def test_langpack_invalid_languages_provided_revision(self):
"""Test that language-provided revision should be an int."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": "201411051234", # Wrong type, should be a int.
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "iterate", "bad_type", ))
def test_valid_langpack(self):
"""Test that a valid langpack passes validation."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_valid_langpack_30(self):
"""Test that a valid langpack for FxOS 3.0 passes validation."""
self.resources.append(('packaged', True))
self.data.update({
"role": "langpack",
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "3.0"
},
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_silent()
def test_languages_target_invalid_for_webapps(self):
"""Test that language-target is invalid for non-langpack webapps."""
self.data.update({
"languages-target": {
"app://*.gaiamobile.org/manifest.webapp": "2.2"
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(
("spec", "webapp", "languages_target_langpacks", ))
def test_languages_provided_invalid_for_webapps(self):
"""Test that language-provided is invalid for non-langpack webapps."""
self.data.update({
"languages-provided": {
"de": {
"name": "Deutsch",
"revision": 201411051234,
"apps": {
"app://blah.gaiamobile.org/manifest.webapp": "/de/blah",
"app://email.gaiamobile.org/manifest.webapp": "/de/email"
}
}
},
})
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(
("spec", "webapp", "languages_provided_langpacks", ))
def test_invalid_role(self):
"""Test that app may not contain invalid role element."""
self.data["role"] = "hello"
self.analyze()
self.assert_failed(with_errors=True)
def test_empty_name(self):
"""Test that empty names are not allowed"""
self.data["name"] = None
self.analyze()
self.assert_failed(with_errors=True)
def test_maxlengths(self):
"""Test that certain elements are capped in length."""
self.data["name"] = "%" * 129
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_keys(self):
"""Test that unknown elements are flagged"""
self.data["foobar"] = "hello"
self.analyze()
self.assert_failed(with_warnings=True)
def test_warn_extra_keys(self):
"""Test that extra keys are flagged."""
self.data["locales"]["es"]["foo"] = "hello"
self.analyze()
self.assert_failed(with_warnings=True)
def test_icons_not_dict(self):
"""Test that the icons property is a dictionary."""
self.data["icons"] = ["data:foo/bar.png"]
self.analyze()
self.assert_failed(with_errors=True)
def test_icons_empty(self):
"""Test that no icons doesn't cause a traceback."""
self.data["icons"] = {}
self.analyze()
def test_icons_size(self):
"""Test that webapp icon sizes must be integers."""
self.data["icons"]["foo"] = "/foo.png"
self.analyze()
self.assert_failed(with_errors=True)
def test_icons_data_url(self):
"""Test that webapp icons can be data URLs."""
self.data["icons"]["128"] = "data:foo/bar.png"
self.analyze()
self.assert_silent()
def test_icons_relative_url(self):
"""Test that webapp icons cannot be relative URLs."""
self.data["icons"]["128"] = "foo/bar"
self.analyze()
self.assert_silent()
def test_icons_absolute_url(self):
"""Test that webapp icons can be absolute URLs."""
def test_icon(self, icon):
self.setUp()
self.data["icons"]["128"] = icon
self.analyze()
self.assert_silent()
for icon in ['/foo/bar', 'http://foo.com/bar', 'https://foo.com/bar']:
yield test_icon, self, icon
def test_icons_has_min_selfhosted(self):
del self.data["icons"]["128"]
self.analyze()
self.assert_silent()
def test_icons_has_min_listed(self):
self.listed = True
self.data["installs_allowed_from"] = \
appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS
del self.data["icons"]["128"]
self.analyze()
self.assert_failed(with_errors=True)
def test_no_locales(self):
"""Test that locales are not required."""
del self.data["locales"]
self.analyze()
self.assert_silent()
def test_no_default_locale_no_locales(self):
"""Test that locales are not required if no default_locale."""
del self.data["default_locale"]
del self.data["locales"]
self.analyze()
self.assert_silent()
def test_no_default_locale(self):
"""Test that locales require default_locale."""
del self.data["default_locale"]
self.analyze()
self.assert_failed(with_errors=True)
def test_invalid_locale_keys(self):
"""Test that locales only contain valid keys."""
# Banned locale element.
self.data["locales"]["es"]["default_locale"] = "foo"
self.analyze()
self.assert_failed(with_warnings=True)
def test_invalid_locale_keys_missing(self):
"""Test that locales aren't missing any required elements."""
del self.data["locales"]["es"]["name"]
self.analyze()
self.assert_silent()
def test_installs_allowed_from_not_list(self):
"""Test that the installs_allowed_from path is a list."""
self.data["installs_allowed_from"] = "foobar"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_installs_allowed_from_path(self):
"""Test that the installs_allowed_from path is valid."""
self.data["installs_allowed_from"].append("foo/bar")
self.analyze()
self.assert_failed(with_errors=True)
def test_no_amo_installs_allowed_from(self):
"""Test that installs_allowed_from should include Marketplace."""
# self.data does not include a marketplace URL by default.
self.listed = True
self.analyze()
self.assert_failed(with_errors=True)
def test_amo_iaf(self):
"""Test that the various Marketplace URLs work."""
# Test that the Marketplace production URL is acceptable.
self.setUp()
orig_iaf = self.data["installs_allowed_from"]
def test_iaf(self, iaf, url):
self.setUp()
self.data["installs_allowed_from"] = iaf + [url]
self.analyze()
self.assert_silent()
for url in appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS:
yield test_iaf, self, orig_iaf, url
def test_iaf_wildcard(self):
"""Test that installs_allowed_from can contain a wildcard."""
self.listed = True
self.data["installs_allowed_from"].append("*")
self.analyze()
self.assert_silent()
def test_installs_allowed_from_protocol(self):
"""
Test that if the developer includes a URL in the `installs_allowed_from`
parameter that is a valid Marketplace URL but uses HTTP instead of
HTTPS, we flag it as using the wrong protocol and not as an invalid URL.
"""
self.listed = True
bad_url = appvalidator.constants.DEFAULT_WEBAPP_MRKT_URLS[0].replace(
"https", "http")
self.data["installs_allowed_from"] = (bad_url, )
self.analyze()
self.assert_failed(with_errors=True)
self.assert_got_errid(("spec", "webapp", "iaf_bad_mrkt_protocol", ))
def test_launch_path_packaged(self):
"""Test that the launch path is present in a packaged app."""
del self.data["launch_path"]
self.resources.append(('packaged', True))
self.analyze()
self.assert_failed(with_errors=True)
def test_launch_path_not_string(self):
"""Test that the launch path is a string."""
self.data["launch_path"] = [123]
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path(self):
"""Test that the launch path is valid."""
self.data["launch_path"] = "data:asdf"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path_protocol(self):
"""Test that the launch path cannot have a protocol."""
self.data["launch_path"] = "http://foo.com/bar"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_launch_path_absolute(self):
"""Test that the launch path is absolute."""
self.data["launch_path"] = "/foo/bar"
self.analyze()
self.assert_silent()
def test_widget_deprecated(self):
"""Test that the widget property is deprecated."""
self.data["widget"] = {
"path": "/butts.html",
"width": 100,
"height": 200
}
self.analyze()
self.assert_failed(with_errors=True)
def test_dev_missing(self):
"""Test that the developer property cannot be absent."""
del self.data["developer"]
self.analyze()
self.assert_failed(with_errors=True)
def test_dev_not_dict(self):
"""Test that the developer property must be a dict."""
self.data["developer"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_dev_keys(self):
"""Test that the developer keys are present."""
del self.data["developer"]["name"]
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_dev_url(self):
"""Test that the developer keys are correct."""
self.data["developer"]["url"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_screen_size_missing(self):
"""Test that the 'screen_size' property can be absent."""
del self.data["screen_size"]
self.analyze()
self.assert_silent()
def test_screen_size_is_dict(self):
"""Test that the 'screen_size' property must be a dict."""
self.data["screen_size"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_screen_size_contains_pair(self):
"""Test that 'screen_size' must contain at least one key/value pair."""
self.data["screen_size"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_bad_screen_size_key(self):
"""Test that the 'screen_size' keys are correct."""
self.data["screen_size"]["max_width"] = "500"
self.analyze()
self.assert_failed(with_warnings=True)
def test_bad_screen_size_value(self):
"""Test that the 'screen_size' keys are correct."""
self.data["screen_size"]["min_width"] = "500px"
self.analyze()
self.assert_failed(with_errors=True)
def test_required_screen_size_missing(self):
"""Test that the 'screen_size' property can be absent."""
del self.data["screen_size"]
self.analyze()
self.assert_silent()
def test_required_features_is_list(self):
"""Test that the 'required_features' property must be a list."""
self.data["required_features"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_required_features_missing(self):
"""Test that 'required_features' can be absent."""
del self.data["required_features"]
self.analyze()
self.assert_silent()
def test_required_features_empty(self):
"""Test that 'required_features' can be an empty list."""
self.data["required_features"] = []
self.analyze()
self.assert_silent()
def test_orientation_missing(self):
"""Test that the 'orientation' property can be absent."""
del self.data["orientation"]
self.analyze()
self.assert_silent()
def test_orientation_list(self):
"""Test that the 'orientation' property can be absent."""
self.data["orientation"] = ["portrait", "portrait-secondary"]
self.analyze()
self.assert_silent()
def test_orientation_is_string(self):
"""Test that the 'orientation' property must be a string."""
self.data["orientation"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_cannot_be_empty(self):
"""Test that 'orientation' cannot be an empty string."""
self.data["orientation"] = ""
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_valid_value(self):
"""Test that 'orientation' must have a valid value."""
def test_orientation(self, orientation):
self.setUp()
self.data["orientation"] = orientation
self.analyze()
self.assert_silent()
for key in ("portrait", "landscape", "portrait-secondary",
"landscape-secondary", "portrait-primary",
"landscape-primary"):
yield test_orientation, self, key
def test_orientation_bad_value(self):
"""Test that 'orientation' cannot have an invalid value."""
self.data["orientation"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_empty_list(self):
"""Test that 'orientation' cannot be an empty list."""
self.data["orientation"] = []
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_invalid(self):
"""Test that 'orientation' cannot be a list with invalid values."""
self.data["orientation"] = ["fart"]
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_mixed(self):
"""Test that 'orientation' cannot be a list with mixed values."""
self.data["orientation"] = ["portrait", "fart", "landscape"]
self.analyze()
self.assert_failed(with_errors=True)
def test_orientation_list_type(self):
"""Test that 'orientation' cannot be a list with non-strings."""
self.data["orientation"] = ["portrait", 4]
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_valid(self):
"""Test with 'inputs' entries throw no errors."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text']
},
'siri': {
'name': 'Voice Control',
'description': 'Voice Control Input',
'launch_path': '/vc.html',
'types': ['text', 'url']
}
}
self.analyze()
self.assert_silent()
def test_inputs_dict_empty(self):
"""Test that 'inputs' may not be empty dict."""
self.data['inputs'] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_name(self):
"""Test that 'inputs' with an entry missing 'name'."""
self.data['inputs'] = {
'input1': {
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_description(self):
"""Test that 'inputs' with an entry missing 'description'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'launch_path': '/input1.html',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_launch_path(self):
"""Test that 'inputs' with an entry missing 'launch_path'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'types': ['text']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_missing_types(self):
"""Test that 'inputs' with an entry missing 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html'
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_empty_types(self):
"""Test that 'inputs' with an entry with empty 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': []
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_invalid_types(self):
"""Test that 'inputs' with an entry with invalid 'types'."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['foo']
}
}
self.analyze()
self.assert_failed(with_errors=True)
def test_inputs_dict_entry_locales(self):
"""Test that 'inputs' with an localized entry."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text'],
'locales': {
'es': {
'name': 'foo',
'description': 'bar'
}
}
}
}
self.analyze()
self.assert_silent()
def test_inputs_dict_entry_invalid_locales(self):
"""Test that 'inputs' with an localized entry but contain invalid element."""
self.data['inputs'] = {
'input1': {
'name': 'Symbols',
'description': 'Symbols Virtual Keyboard',
'launch_path': '/input1.html',
'types': ['text'],
'locales': {
'es': {
'name': 'foo',
'description': 'bar',
'foo': 'bar2'
}
}
}
}
self.analyze()
self.assert_failed(with_warnings=True)
def test_fullscreen_missing(self):
"""Test that the 'fullscreen' property can be absent."""
del self.data["fullscreen"]
self.analyze()
self.assert_silent()
def test_fullscreen_is_string(self):
"""Test that the 'fullscreen' property must be a string."""
self.data["fullscreen"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_fullscreen_cannot_be_empty(self):
"""Test that 'fullscreen' cannot be an empty string."""
self.data["fullscreen"] = ""
self.analyze()
self.assert_failed(with_errors=True)
def test_fullscreen_valid_value(self):
"""Test that 'fullscreen' must have a valid value."""
def test_fullscreen(self, value):
self.setUp()
self.data["fullscreen"] = key
self.analyze()
self.assert_silent()
for key in ("true", "false", ):
yield test_fullscreen, self, key
def test_fullscreen_bad_value(self):
"""Test that 'fullscreen' cannot have an invalid value."""
self.data["fullscreen"] = "fart"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_failed(self):
"""Test that the `type` element must be a recognized value."""
self.data["type"] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_valid(self):
"""Test that the `type` element doesn't fail with valid values."""
def wrap(self, value):
self.setUp()
self.resources.append(("packaged", value != "web"))
self.data["type"] = value
self.analyze()
self.assert_silent()
for key in ("web", "privileged", "certified", ):
yield wrap, self, key
def test_type_not_certified(self):
"""Test that certified apps cannot be listed in the marketplace."""
self.listed = True
self.data["type"] = "certified"
self.analyze()
self.assert_failed(with_errors=True)
def test_type_web_priv_fail(self):
"""Test that web apps cannot be privileged or certified."""
self.data["type"] = "web"
self.resources.append(("packaged", False))
self.analyze()
self.assert_silent()
def test_type_packaged_priv_fail(self):
"""Test that web apps cannot be privileged or certified."""
self.data["type"] = "privileged"
self.resources.append(("packaged", True))
self.analyze()
self.assert_silent()
###########
# Web activities are tested in tests/test_webapp_activity.py
###########
def test_act_root_type(self):
"""Test that the most basic web activity passes."""
self.data["activities"] = "wrong type"
self.analyze()
self.assert_failed(with_errors=True)
def test_version(self):
"""Test that the version matches the format that we require."""
def wrap(version, passes):
self.setUp()
self.data["version"] = version
self.analyze()
if passes:
self.assert_silent()
else:
self.assert_failed(with_errors=True)
yield wrap, "1.0", True
yield wrap, "1.0.1", True
yield wrap, "Poop", True
yield wrap, "1.0b", True
yield wrap, "*.*", True
yield wrap, "1.5-alpha", True
yield wrap, "1.5_windows", True
yield wrap, "1.5_windows,x64", True
yield wrap, "Mountain Lion", False
yield wrap, "", False
for char in "`~!@#$%^&()+=/|\\<>":
yield wrap, char * 3, False
def set_permissions(self):
"""Fill out the permissions node with every possible permission."""
self.data["permissions"] = {}
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.data["permissions"][perm] = {
"description": "Required to make things good."
}
if perm in WebappSpec.PERMISSIONS_ACCESS:
self.data["permissions"][perm]["access"] = (
WebappSpec.PERMISSIONS_ACCESS[perm][0])
def test_permissions_full(self):
self.set_permissions()
self.analyze()
self.assert_silent()
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.assert_has_permission(perm)
def test_permissions_extra_invalid(self):
self.set_permissions()
self.data["permissions"]["foo"] = {"description": "lol"}
self.analyze()
self.assert_failed(with_errors=True)
assert 'foo' not in self.err.get_resource("permissions")
for perm in appvalidator.constants.ALL_PERMISSIONS:
self.assert_has_permission(perm)
def test_permissions_missing_desc(self):
self.set_permissions()
self.data["permissions"]["alarm"] = {}
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_missing_access(self):
self.set_permissions()
del self.data["permissions"]["contacts"]["access"]
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_invalid_access(self):
self.set_permissions()
self.data["permissions"]["contacts"]["access"] = "asdf"
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_wrong_access(self):
self.set_permissions()
# This access type isn't available for the `settings` permission.
self.data["permissions"]["settings"]["access"] = "createonly"
self.analyze()
self.assert_failed(with_errors=True)
def test_permissions_mobileid(self):
self.set_permissions()
self.data["permissions"]["mobileid"] = {"description": "cause"}
self.analyze()
self.assert_silent()
def test_csp(self):
self.data['csp'] = 'this is the csp policy. it can be a string.'
self.analyze()
self.assert_silent()
def test_description_long(self):
self.data['description'] = 'x' * 1025
self.analyze()
self.assert_failed(with_errors=True)
def test_locale_description_long(self):
self.data['locales']['es']['description'] = u'×' * 1025
self.analyze()
self.assert_failed(with_errors=True)
assert 'locales > es > description' in (
self.err.errors[0]['description'][-1])
def test_appcache_path_packaged(self):
self.data["appcache_path"] = '/foo.bar'
self.analyze()
self.assert_silent()
self.resources.append(("packaged", True))
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_not_list(self):
self.data['messages'] = "foo"
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_obj_not_obj(self):
self.data['messages'] = ["foo"]
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_multiple_keys(self):
self.data['messages'] = [{"a": "1", "b": "2"}]
self.analyze()
self.assert_failed(with_errors=True)
def test_messages_pass(self):
self.data['messages'] = [{"key": "val"}, {"key": "val"}]
self.analyze()
self.assert_silent()
def test_redirects_pass(self):
self.data['redirects'] = [
{"to": "asdf", "from": "qwer"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_silent()
def test_redirects_type(self):
self.data['redirects'] = 'asdf'
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_subtype(self):
self.data['redirects'] = [
'asdf',
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_required_nodes(self):
self.data['redirects'] = [
{"bar": "asdf", "foo": "qwer"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_redirects_missing_nodes(self):
self.data['redirects'] = [
{"to": "asdf"},
{"to": "asdf", "from": "qwer"},
]
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_unprivileged(self):
self.data['origin'] = 'app://domain.com'
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_be_lowercase(self):
self.make_privileged()
self.data['origin'] = 'app://DOMAIN.com'
self.analyze()
self.assert_failed(with_errors=True)
def test_arbitrary_origin(self):
self.make_privileged()
self.data['origin'] = 'app://just-some-identifier-string'
self.analyze()
self.assert_silent()
def test_uuid_origin(self):
self.make_privileged()
self.data['origin'] = 'app://878a1076-130e-46fc-a73f-634394166d14'
self.analyze()
self.assert_silent()
def test_origin_pass(self):
self.make_privileged()
self.data['origin'] = 'app://domain.com'
self.analyze()
self.assert_silent()
def test_origin_dashes(self):
self.make_privileged()
self.data["origin"] = "app://my-domain.com"
self.analyze()
self.assert_silent()
def test_origin_subdomains(self):
self.make_privileged()
self.data["origin"] = "app://sub.domain.com"
self.analyze()
self.assert_silent()
def test_origin_non_fqdn(self):
self.make_privileged()
self.data["origin"] = "app://hello"
self.analyze()
self.assert_silent()
def test_origin_type(self):
self.make_privileged()
self.data["origin"] = 123
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_have_spaces(self):
self.make_privileged()
self.data["origin"] = "app://origin with spaces"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_be_web_safe(self):
self.make_privileged()
self.data["origin"] = "app://control\tchars\ndisallowed"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_must_have_app_protocol(self):
self.make_privileged()
self.data["origin"] = "random-identifier-without-protocol"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_contain_path(self):
self.make_privileged()
self.data["origin"] = "app://domain.com/path"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_cannot_end_in_trailing_slash(self):
self.make_privileged()
self.data["origin"] = "app://domain.com/"
self.analyze()
self.assert_failed(with_errors=True)
def test_origin_allowed(self):
for origin in ("app://marketplace.firefox.com",
"app://gamescentre.mozilla.com",
"app://mozilla.org",
"app://system.gaiamobile.org"):
self.make_privileged()
self.data["origin"] = origin
self.analyze()
self.assert_silent()
@patch("appvalidator.specs.webapps.BANNED_ORIGINS", [
"gamescentre.mozilla.com",
"firefox.com",
])
def test_origin_banned(self):
self.make_privileged()
for origin in ("app://gamescentre.mozilla.com",
"app://theconsoleisdead.firefox.com"):
self.data["origin"] = origin
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome(self):
self.data["chrome"] = {"navigation": True}
self.analyze()
self.assert_silent()
def test_chrome_alt(self):
self.data["chrome"] = {"navigation": False}
self.analyze()
self.assert_silent()
def test_chrome_bad_navigation(self):
self.data["chrome"] = {"navigation": 123}
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome_bad_keys(self):
self.data["chrome"] = {"haldo": 123}
self.analyze()
self.assert_failed(with_errors=True)
def test_chrome_bad_type(self):
self.data["chrome"] = []
self.analyze()
self.assert_failed(with_errors=True)
def test_precompile_wrong_format(self):
# "precompile" should be list of files not this weird dict.
self.data["precompile"] = {"foo.js": True}
self.analyze()
self.assert_failed(with_errors=True)
def test_precompile_feature(self):
self.analyze()
self.assert_silent()
self.assert_has_feature('PRECOMPILE_ASMJS')
|
stasm/app-validator
|
tests/test_webapp.py
|
Python
|
bsd-3-clause
| 48,765
|
[
"exciting"
] |
3652827949f41bc33a16855c4d3bc2f5fe3c7c4b394dfafbdf72e6b775143070
|
"""
Functions to spot hemy regions
"""
import os.path as op
import datetime
from collections import defaultdict, Counter
from tabulate import tabulate
from scipy import stats
import numpy as np
import pybedtools
import pysam
import vcf
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.utils import append_stem, file_exists, splitext_plus, safe_makedir
from bcbio.variation.vcfutils import bgzip_and_index
def is_good_cpg(frmt, record):
alt_depth = sum(map(int, frmt['DP4'].split(','))[2:])
ref_depth = sum(map(int, frmt['DP4'].split(','))[:2])
if record[6] != "PASS":
return False
if int(ref_depth) > 3 and int(alt_depth) > 3:
return True
def _genotype(alleles):
if alleles[0] == alleles[1]:
return "homoz"
else:
return "heteroz"
def is_good_het(frmt, record):
depth = sum(map(int, frmt['DP4'].split(','))[2:])
# if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3 and depth > 3 and record[6] == "PASS":
if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3:
return True
def _get_strand(record):
return record[7].split(";")[0].split("=")[1]
def _snp_veracity_both_strand(sense, anti):
"""
Only if SNPs is detected in both strand with two alleles
"""
gen_plus = sense.keys()
gen_minus = anti.keys()
allels1 = [g.split(":")[0].split("/")[0] for g in gen_plus]
allels2 = [g.split(":")[0] for g in gen_minus]
if len(allels1) == len(allels2):
return True
def _read_pairs(gt):
# print "read_pairs %s" % gt
gt1 = gt.split(":")[0].split("/")[0]
if gt.find("/") > -1:
gt2 = gt.split(":")[0].split("/")[1]
return (gt1, gt2)
def _get_total(gts, total):
return [total[_read_pairs(gts[0][1])[0]], total[_read_pairs(gts[1][1])[0]]]
def _top_gt(gts):
total = Counter()
first = _read_pairs(gts[0][1])
top = None
for gt in gts:
pair = _read_pairs(gt[1])
if pair:
if pair[0] != first[0] and pair[1] != first[1]:
top = [gts[0], gt]
total[pair[0]] += gt[0]
if top:
total = _get_total(top, total)
return top, total
return False, False
def _above_prop(x, s, p=0.8):
pvals = []
for p in [0.8, 0.9, 1.0]:
pvals.append(stats.binom_test(x, s, p))
return max(pvals) > 0.70
def _prop(gt):
sense_sorted = sorted(zip(gt.values(), gt.keys()), reverse=True)
top_2, total = _top_gt(sense_sorted)
# print "top_2 %s totla %s" % (top_2, total)
if top_2:
gt2_prop = float(top_2[1][0]) / total[1]
gt1_prop = float(top_2[0][0]) / total[0]
table = np.array([[top_2[1][0], total[1] - top_2[1][0]], [total[0] - top_2[0][0], top_2[0][0]]])
# print "table\n%s\ntotals %s %s" % (table, gt1_prop, gt2_prop)
# print stats.fisher_exact(table)
if stats.fisher_exact(table)[1] < 0.05 and _above_prop(top_2[0][0], total[0]) and _above_prop(top_2[1][0], total[1]):
return True
return False
def _valid_test(link, link_as):
"""
Only if top2 associated nt are equally represented
"""
# print "link %s %s" % (link, link_as)
if len(link) > 1:
sense_pval = _prop(link)
else:
sense_pval = False
# if len(link_as) > 1:
# anti_pval = _prop(link_as)
# else:
# anti_pval = True
if sense_pval:
return True
return False
def _valid(link, link_as):
"""
Only if one snp allele is associated with the Cu/Cm
"""
if len(link) == 2:
gen = link.keys()
allels1 = gen[0].split(":")[0].split("/")
allels2 = gen[1].split(":")[0].split("/")
if allels1[0] != allels2[0] and allels1[1] != allels2[1] and _snp_veracity(link, link_as):
return True
def _format(link):
"""
Give nice format to dict with alleles and reads supporting
"""
cell = ''
for allele in link:
cell += "%s=%s;" % (allele, link[allele])
return cell
def _change_to_cpg(line, tag):
return line.replace(tag, "CpG%s" % tag).strip()
def _change_to_snp(line, tag):
return line.replace(tag, "SNP%s" % tag).strip()
def _create_vcf_header(vcf_file, out_handle):
"""
Create header for final vcf
"""
print >>out_handle, "##fileformat=VCFv4.1"
print >>out_handle, "##fileData=%s" % datetime.date.today().strftime('%y%m%d')
with open(vcf_file) as in_handle:
for line in in_handle:
if line.startswith("##reference"):
print >>out_handle, line.strip()
if line.startswith("##contig"):
print >>out_handle, line.strip()
if line.startswith("#CHROM"):
print >>out_handle, line.strip()
if line.startswith("##BisSNP"):
print >>out_handle, line.strip()
if line.startswith("##FILTER"):
print >>out_handle, line.strip()
if line.startswith("##FORMAT=<ID=GT"):
print >>out_handle, line.strip()
if line.startswith("##INFO=<ID=DP"):
print >>out_handle, line.strip()
if line.startswith("##FORMAT=<ID=BRC6"):
print >>out_handle, _change_to_cpg(line, 'BRC6')
print >>out_handle, _change_to_snp(line, 'BRC6')
if line.startswith("##FORMAT=<ID=CM"):
print >>out_handle, _change_to_cpg(line, 'CM')
print >>out_handle, _change_to_snp(line, 'CM')
if line.startswith("##FORMAT=<ID=CU"):
print >>out_handle, _change_to_cpg(line, 'CU')
print >>out_handle, _change_to_snp(line, 'CU')
if line.startswith("##FORMAT=<ID=CP"):
print >>out_handle, _change_to_cpg(line, 'CP')
print >>out_handle, _change_to_snp(line, 'CP')
if line.startswith("##FORMAT=<ID=DP"):
print >>out_handle, _change_to_cpg(line, 'DP')
print >>out_handle, _change_to_snp(line, 'DP')
if line.startswith("##INFO=<ID=CS"):
print >>out_handle, line.strip()
def _get_info(info, tag):
"""
get value from info vcf field
"""
return next((value.split("=")[1] for value in info.split(";") if value.startswith(tag)), None)
def _get_format(header, frmt):
"""
get format field in dict instance
"""
frmt = dict(zip(header.split(":"), frmt.split(':')))
return frmt
def _format_vcf_value(frmt1, frmt2, tag):
return {_change_to_cpg(tag, tag): frmt1[tag],
_change_to_snp(tag, tag): frmt2[tag]}
def _get_vcf_line(record):
"""
create new vcf file with CpG and SNP information
"""
frmt = {}
cs = _get_info(record[7], "CS")
ref = "%s%s" % ("C", record[13])
alt = "%s%s" % ("C", record[14])
qual = (float(record[5]) + float(record[15])) / 2
filter = "LowQual"
dp = int(_get_info(record[7], "DP")) + int(_get_info(record[17], "DP"))
info = ";".join(["DP=%s" % dp, "CS=%s" % cs])
cpg = _get_format(record[8], record[9])
snp = _get_format(record[18], record[19])
for value in ["BRC6", "CM", "CU", "CP", "DP"]:
frmt.update(_format_vcf_value(cpg, snp, value))
format = "GT:" + ":".join(frmt.keys())
sample = snp["GT"] + ":" + ":".join(frmt.values())
return record[0], record[11], ref, alt, qual, filter, info, format, sample
def _correct_vcf(vcf_file):
"""
sort by genome/position, bgzip and index
"""
vcf_sort = append_stem(vcf_file, "_sort") + ".gz"
if not file_exists(vcf_sort):
with file_transaction(vcf_sort) as tx_out:
cmd = "cat {vcf_file} |vcf-sort | bgzip > {tx_out}"
do.run(cmd.format(**locals()), "sort %s" % vcf_file)
do.run("tabix -f {0}".format(tx_out), "")
return vcf_sort
def cpg_het_pairs(cpgvcf, snpvcf, bam_file, out_file, workdir):
"""
Detect het close to hemi-met sites
"""
out_vcf = splitext_plus(out_file)[0] + ".vcf"
cpg_filter = op.join(workdir, op.basename(append_stem(cpgvcf, "_filtered")))
snp_filter = op.join(workdir, op.basename(append_stem(snpvcf, "_filtered")))
if not file_exists(cpg_filter):
with open(cpg_filter, 'w') as out_handle:
with open(cpgvcf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
record = line.strip().split("\t")
# print record
header, frmt = record[8], record[9]
frmt = dict(zip(header.split(":"), frmt.split(':')))
if is_good_cpg(frmt, record):
print >>out_handle, line
if not file_exists(snp_filter):
with open(snp_filter, 'w') as out_handle:
with open(snpvcf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
record = line.strip().split("\t")
header, frmt = record[8], record[9]
frmt = dict(zip(header.split(":"), frmt.split(':')))
if is_good_het(frmt, record):
print >>out_handle, line
if not file_exists(out_vcf):
res = pybedtools.BedTool(cpg_filter).window(snp_filter, w=75)
with open(out_file, 'w') as out_handle, open(out_vcf, 'w') as vcf_handle:
_create_vcf_header(cpgvcf, vcf_handle)
print >>out_handle, "chrom\tCpG_pos\tCpG_nt\tSNP_pos\tAlleles\tassociation_plus\tSNP_reads_minus"
for record in res:
if record[1] != record[11]:
# if record[1] == "19889634":
link, link_as, align = _make_linkage(bam_file, record[0], int(record[1]), int(record[11]), _get_strand(record))
res = "%s\t%s\t%s\t%s\t%s/%s\t%s\t%s" % (record[0], record[1], record[3], record[11], record[13], record[14], _format(link), _format(link_as))
chrom, pos, ref, alt, qual, filt, info, frmt, sample = _get_vcf_line(record)
# print res
if _valid_test(link, link_as):
filt = "PASS"
print >>out_handle, res
# print res
# print >>out_handle, '\n'.join(align)
vcf_res = "{chrom}\t{pos}\t.\t{ref}\t{alt}\t{qual}\t{filt}\t{info}\t{frmt}\t{sample}".format(**locals())
print >>vcf_handle, vcf_res
return _correct_vcf(out_vcf)
def _complement(nt):
if nt == 'a':
return 't'
elif nt == 't':
return 'a'
elif nt == 'c':
return 'g'
elif nt == 'g':
return 'c'
def _model(pileup, snp, cpg_st):
c_pos = v_pos = []
for read in pileup:
if len(pileup[read].keys()) == 1:
continue
info_snp = pileup[read]['snp'].split(":")
info_cpg = pileup[read]['cpg'].split(":")
if info_cpg[1] == cpg_st:
if cpg_st == "+":
c_pos.append(info_cpg[0].lower())
v_pos.append(info_snp[0].lower())
else:
c_pos.append(_complement(info_cpg[0].lower()))
v_pos.append(_complement(info_snp[0].lower()))
else:
if info_snp[1] == "+":
v_pos.append(info_snp[0].lower())
else:
v_pos.append(_complement(info_snp[0].lower()))
def _make_linkage(bam_file, chrom, cpg, snp, cpg_st):
start, end = [cpg-1, snp-1] if cpg-1 < snp-1 else [snp-1, cpg-1]
pairs = _pairs_matrix(bam_file, [chrom, start, end], cpg-1, snp-1)
link = Counter()
link_as = Counter()
align = []
for pair in pairs:
if len(pairs[pair].keys()) == 1:
continue
nts = [pairs[pair]['cpg'].split(":")[0], pairs[pair]['snp'].split(":")[0]]
align.append("-".join(nts) if cpg < snp else "-".join(nts[::-1]))
info_snp = pairs[pair]['snp'].split(":")
# if info_snp[1] == cpg_st:
# print pairs[pair]
if pairs[pair]['cpg']:
info_cpg = pairs[pair]['cpg'].split(":")
if info_cpg[1] == info_snp[1] and info_cpg[1] == cpg_st:
link["v%s/c%s:%s" % (info_snp[0], info_cpg[0], cpg_st)] += 1
# else:
# link_as["v%s:%s" % (info_snp[0], info_snp[1])] += 1
# print "LINK\n%s\n" % link
return link, link_as, align
def _pairs_matrix(bam_file, region, cpg, snp):
"""
Get reads from the cpg region and pairs
cpg nt with snp nt
"""
pileup = defaultdict(dict)
c, s, e = region
samfile = pysam.AlignmentFile(bam_file, "rb")
for pileupcolumn in samfile.pileup(c, s, e):
if pileupcolumn.pos == cpg or pileupcolumn.pos == snp:
# print ("\ncoverage at base %s = %s" % (pileupcolumn.pos, pileupcolumn.n))
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip: # query position is None if is_del or is_refskip is set.
strand = "-" if pileupread.alignment.is_reverse else "+"
tag = "cpg" if pileupcolumn.pos == cpg else "snp"
nt = pileupread.alignment.query_sequence[pileupread.query_position]
nt = nt.lower() if strand == "-" else nt
pileup[pileupread.alignment.query_name].update({tag: nt + ":%s" % strand})
return pileup
def get_het(in_vcf, region, sample, out_file):
res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True)
with file_transaction(out_file) as tx_out:
with open(tx_out, 'w') as out_handle:
# print >> out_handle, "chrom\tstart\tend\tgen\dp4\tstrand\tgene\tsample"
for record in res:
gene = record[-2]
chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9]
# cs = info.split(';')[0].split('=')[1]
frmt = dict(zip(header.split(":"), frmt.split(':')))
# if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 10 and int(frmt['DP4']) > 10 and record[6] == "PASS":
if is_good_het(frmt, record):
tag = "%s-%s-%s-%s" % (frmt['GT'], frmt['DP'], gene, sample)
print >> out_handle, "%s\t%s\t%s\t%s\t.\t+" % (chrom, pos, pos + 1, tag )
def post_processing(vcf_res, vcf_merged, out):
"""
merge list of vcf files and get stats
"""
if len(vcf_res) == 1:
return vcf_res
if not file_exists(vcf_merged):
cmd = "bcftools merge {0} > {1}".format(" ".join(vcf_res), vcf_merged)
do.run(cmd, "merge files")
vcf_reader = vcf.Reader(open(vcf_merged, 'r'))
samples = vcf_reader.samples
num_call = Counter()
num_call_sample = Counter()
for record in vcf_reader:
if not record.FILTER:
num_call[record.num_called] += 1
# print record.num_called
for sample in samples:
if record.genotype(sample)['GT'] != "./.":
# print record.genotype(sample)['GT']
num_call_sample[sample] += 1
with open(out + "_shared_stat.tsv", 'w') as stat_handle:
print >>stat_handle, tabulate([[k, v] for k, v in num_call.iteritems()], headers=["# samples", "# of SNPs"])
with open(out + "_stat.tsv", 'w') as stat_handle:
print >>stat_handle, tabulate([[k, v] for k, v in num_call_sample.iteritems()], headers=["samples", "# of SNPs"])
def detect_asm(data, args):
vcf_res = []
in_vcf = data['fastq']
bam_file = data['bam']
sample = splitext_plus(op.basename(in_vcf))[0].split(".raw")[0].replace(".rawcpg", "")
workdir = op.join(args.out, sample)
safe_makedir(workdir)
snp_file = in_vcf.replace("rawcpg", "rawsnp")
assert bam_file, "No bam file associated to vcf %s" % in_vcf
out_file = op.join(workdir, sample + "_pairs.tsv")
vcf_res = cpg_het_pairs(in_vcf, snp_file, bam_file, out_file, workdir)
data['asm'] = vcf_res
return data
|
lpantano/ASMfinder
|
asm/select.py
|
Python
|
mit
| 16,220
|
[
"pysam"
] |
459cb20dc4d3f339c7c46798b41e4438678d43cda5de3f3be063d27dd7f890dc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import textwrap
"""
This module implements input and output processing from QChem.
"""
import copy
import re
import os
import numpy as np
from string import Template
import six
from monty.io import zopen
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Energy, FloatWithUnit
from monty.json import MSONable
from pymatgen.util.coord import get_angle
__author__ = "Xiaohui Qu"
__copyright__ = "Copyright 2013, The Electrolyte Genome Project"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "[email protected]"
__date__ = "11/4/13"
class QcTask(MSONable):
"""
An object representing a QChem input file.
Args:
molecule: The input molecule. If it is None of string "read",
QChem will read geometry from checkpoint file. If it is a
Molecule object, QcInput will convert it into Cartesian
coordinates. Valid values: pymatgen Molecule object, "read", None
Defaults to None.
charge (int): Charge of the molecule. If None, charge on molecule is
used. Defaults to None.
spin_multiplicity (int): Spin multiplicity of molecule. Defaults to
None, which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
jobtype (str): The type the QChem job. "SP" for Single Point Energy,
"opt" for geometry optimization, "freq" for
vibrational frequency.
title (str): Comments for the job. Defaults to None. Which means the
$comment section will be discarded.
exchange (str): The exchange methods of the theory. Examples including:
"B" (in pure BLYP), "PW91", "PBE", "TPSS".
Defaults to "HF".
This parameter can also be common names of hybrid
functionals, such as B3LYP, TPSSh, XYGJOS. In such cases,
the correlation parameter should be left as None.
correlation (str): The correlation level of the theory. Example
including: "MP2", "RI-MP2", "CCSD(T)", "LYP", "PBE", "TPSS"
Defaults to None.
basis_set (str/dict): The basis set.
If it is a dict, each element can use different basis set.
aux_basis_set (str/dict): Auxiliary basis set. For methods,
like RI-MP2, XYG3, OXYJ-OS, auxiliary basis set is required.
If it is a dict, each element can use different auxiliary
basis set.
ecp: Effective core potential (ECP) to be used.
If it is a dict, each element can use different ECP.
rem_params (dict): The parameters supposed to write in the $rem
section. Dict of key/value pairs.
Example: {"scf_algorithm": "diis_gdm", "scf_max_cycles": 100}
optional_params (dict): The parameter for keywords other than $rem
section. Dict of key/value pairs.
Example: {"basis": {"Li": "cc-PVTZ", "B": "aug-cc-PVTZ",
"F": "aug-cc-PVTZ"} "ecp": {"Cd": "srsc", "Br": "srlc"}}
ghost_atom (list): List of ghost atoms indices. Indices start from 0.
The ghost atom will be represented in of the form of @element_symmbol
"""
optional_keywords_list = {"basis", "basis2", "ecp", "empirical_dispersion",
"external_charges", "force_field_params",
"intracule", "isotopes", "aux_basis",
"localized_diabatization", "multipole_field",
"nbo", "occupied", "swap_occupied_virtual", "opt",
"pcm", "pcm_solvent", "solvent", "plots", "qm_atoms", "svp",
"svpirf", "van_der_waals", "xc_functional",
"cdft", "efp_fragments", "efp_params", "alist", "velocity"}
alternative_keys = {"job_type": "jobtype",
"symmetry_ignore": "sym_ignore",
"scf_max_cycles": "max_scf_cycles"}
alternative_values = {"optimization": "opt",
"frequency": "freq"}
zmat_patt = re.compile(r'^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$')
xyz_patt = re.compile(r'^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+'
r'([\d\.eE\-]+)[\-\.\s,\w.]*$')
def __init__(self, molecule=None, charge=None, spin_multiplicity=None,
jobtype='SP', title=None, exchange="HF", correlation=None,
basis_set="6-31+G*", aux_basis_set=None, ecp=None,
rem_params=None, optional_params=None, ghost_atoms=None,
method=None):
self.mol = copy.deepcopy(molecule) if molecule else "read"
self.charge = charge
self.spin_multiplicity = spin_multiplicity
if isinstance(self.mol, six.string_types):
self.mol = self.mol.lower()
if self.mol != "read":
raise ValueError('The only accept text value for mol is "read"')
elif isinstance(self.mol, list):
for m in self.mol:
if not isinstance(m, Molecule):
raise ValueError("In case of type list, every element of mol must be a pymatgen Molecule")
if self.charge is None or self.spin_multiplicity is None:
raise ValueError("For fragments molecule section input, charge and spin_multiplicity "
"must be specificed")
total_charge = sum([m.charge for m in self.mol])
total_unpaired_electron = sum([m.spin_multiplicity-1 for m in self.mol])
if total_charge != self.charge:
raise ValueError("The charge of the molecule doesn't equal to the sum of the fragment charges")
if total_unpaired_electron % 2 != (self.spin_multiplicity - 1) % 2:
raise ValueError("Spin multiplicity of molecule and fragments doesn't match")
elif isinstance(self.mol, Molecule):
self.charge = charge if charge is not None else self.mol.charge
ghost_nelectrons = 0
if ghost_atoms:
for i in ghost_atoms:
site = self.mol.sites[i]
for sp, amt in site.species_and_occu.items():
ghost_nelectrons += sp.Z * amt
nelectrons = self.mol.charge + self.mol.nelectrons - ghost_nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError("Charge of {} and spin multiplicity of {} "
"is not possible for this molecule"
.format(self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
else:
raise ValueError("The molecule must be a pymatgen Molecule "
"object or read/None or list of pymatgen Molecule")
if (self.charge is None) != (self.spin_multiplicity is None):
raise ValueError("spin multiplicity must be set together")
if self.charge is not None and isinstance(self.mol, Molecule) and not ghost_atoms:
self.mol.set_charge_and_spin(self.charge, self.spin_multiplicity)
self.params = dict()
if title is not None:
self.params["comment"] = self._wrap_comment(title)
if "rem" not in self.params:
self.params["rem"] = dict()
if method is None or exchange.lower() != "hf":
self.params["rem"]["exchange"] = exchange.lower()
if method is not None:
self.params["rem"]["method"] = method.lower()
available_jobtypes = {"sp", "opt", "ts", "freq", "force", "rpath",
"nmr", "bsse", "eda", "pes_scan", "fsm", "aimd",
"pimc", "makeefp"}
jt = jobtype.lower()
if jt in self.alternative_values:
jt = self.alternative_values[jt]
if jt not in available_jobtypes:
raise ValueError("Job type " + jobtype + " is not supported yet")
self.params["rem"]["jobtype"] = jobtype.lower()
if correlation is not None:
self.params["rem"]["correlation"] = correlation.lower()
if rem_params is not None:
for k, v in rem_params.items():
k = k.lower()
if k in self.alternative_keys:
k = self.alternative_keys[k]
if isinstance(v, six.string_types):
v = str(v).lower()
if v in self.alternative_values:
# noinspection PyTypeChecker
v = self.alternative_values[v]
self.params["rem"][k] = v
elif isinstance(v, int) or isinstance(v, float):
self.params["rem"][k] = v
else:
raise ValueError("The value in $rem can only be Integer "
"or string")
if optional_params:
op_key = set([k.lower() for k in optional_params.keys()])
if len(op_key - self.optional_keywords_list) > 0:
invalid_keys = op_key - self.optional_keywords_list
raise ValueError(','.join(['$' + k for k in invalid_keys]) +
'is not a valid optional section')
self.params.update(optional_params)
self.set_basis_set(basis_set)
if aux_basis_set is None:
if self._aux_basis_required():
if isinstance(self.params["rem"]["basis"], six.string_types):
if self.params["rem"]["basis"].startswith("6-31+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvdz")
elif self.params["rem"]["basis"].startswith("6-311+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvtz")
if "aux_basis" not in self.params["rem"]:
raise ValueError("Auxiliary basis set is missing")
else:
self.set_auxiliary_basis_set(aux_basis_set)
if ecp:
self.set_ecp(ecp)
self.ghost_atoms = ghost_atoms
if self.ghost_atoms:
if not isinstance(self.ghost_atoms, list):
raise ValueError("ghost_atoms must be a list of integers")
for atom in self.ghost_atoms:
if not isinstance(atom, int):
raise ValueError("Each element of ghost atom list must an integer")
def _aux_basis_required(self):
if "method" in self.params["rem"]:
method = self.params["rem"]["method"]
else:
method = self.params["rem"]["exchange"]
if method in ['xygjos', 'xyg3', 'lxygjos']:
return True
if 'correlation' in self.params["rem"]:
if self.params["rem"]["correlation"].startswith("ri"):
return True
def set_velocities(self, velocities):
"""
:param velocities (au): list of list of atom velocities
:return:
"""
assert len(velocities) == len(self.mol)
self.params["velocity"] = velocities
def set_basis_set(self, basis_set):
if isinstance(basis_set, six.string_types):
self.params["rem"]["basis"] = str(basis_set).lower()
if basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("basis", None)
elif isinstance(basis_set, dict):
self.params["rem"]["basis"] = "gen"
bs = dict()
for element, basis in basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The basis set for elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Basis set error: the molecule "
"doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(basis_set, list):
self.params["rem"]["basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in basis_set]
self.params["basis"] = bs
if len(self.mol) != len(basis_set):
raise ValueError("Must specific a basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(basis_set)))
def set_partial_hessian_atoms(self, alist, phess=1):
for a in alist:
if not isinstance(a, int):
raise ValueError("the parament alist must a list of atom indices")
self.params["rem"]["n_sol"] = len(alist)
if phess == 1:
self.params["rem"]["phess"] = True
else:
self.params["rem"]["phess"] = phess
self.params["rem"]["jobtype"] = "freq"
self.params["alist"] = alist
def set_basis2(self, basis2_basis_set):
if isinstance(basis2_basis_set, six.string_types):
self.params["rem"]["basis2"] = basis2_basis_set.lower()
if basis2_basis_set.lower() not in ["basis2_gen", "basis2_mixed"]:
self.params.pop("basis2", None)
elif isinstance(basis2_basis_set, dict):
self.params["rem"]["basis2"] = "basis2_gen"
bs = dict()
for element, basis in basis2_basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["basis2"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["basis2"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The BASIS2 basis set for "
"elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("BASIS2 basis set error: the "
"molecule doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(basis2_basis_set, list):
self.params["rem"]["basis2"] = "basis2_mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in basis2_basis_set]
self.params["basis2"] = bs
if len(self.mol) != len(basis2_basis_set):
raise ValueError("Must specific a BASIS2 basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(basis2_basis_set)))
def set_auxiliary_basis_set(self, aux_basis_set):
if isinstance(aux_basis_set, six.string_types):
self.params["rem"]["aux_basis"] = aux_basis_set.lower()
if aux_basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("aux_basis", None)
elif isinstance(aux_basis_set, dict):
self.params["rem"]["aux_basis"] = "gen"
bs = dict()
for element, basis in aux_basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["aux_basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["aux_basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The auxiliary basis set for "
"elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Auxiliary asis set error: the "
"molecule doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(aux_basis_set, list):
self.params["rem"]["aux_basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in aux_basis_set]
self.params["aux_basis"] = bs
if len(self.mol) != len(aux_basis_set):
raise ValueError("Must specific a auxiliary basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(aux_basis_set)))
def set_ecp(self, ecp):
if isinstance(ecp, six.string_types):
self.params["rem"]["ecp"] = ecp.lower()
elif isinstance(ecp, dict):
self.params["rem"]["ecp"] = "gen"
potentials = dict()
for element, p in ecp.items():
potentials[element.strip().capitalize()] = p.lower()
self.params["ecp"] = potentials
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
ecp_elements = set(self.params["ecp"].keys())
if len(ecp_elements - mol_elements) > 0:
raise ValueError("ECP error: the molecule "
"doesn't contain element " +
", ".join(ecp_elements - mol_elements))
@property
def molecule(self):
return self.mol
def set_memory(self, total=None, static=None):
"""
Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected.
"""
if total:
self.params["rem"]["mem_total"] = total
if static:
self.params["rem"]["mem_static"] = static
def set_max_num_of_scratch_files(self, num=16):
"""
In QChem, the size of a single scratch is limited 2GB. By default,
the max number of scratich is 16, which is cooresponding to 32GB
scratch space. If you want to use more scratch disk space, you need
to increase the number of scratch files:
Args:
num: The max number of the scratch files. (Integer)
"""
self.params["rem"]["max_sub_file_num"] = num
def set_scf_algorithm_and_iterations(self, algorithm="diis",
iterations=50):
"""
Set algorithm used for converging SCF and max number of SCF iterations.
Args:
algorithm: The algorithm used for converging SCF. (str)
iterations: The max number of SCF iterations. (Integer)
"""
available_algorithms = {"diis", "dm", "diis_dm", "diis_gdm", "gdm",
"rca", "rca_diis", "roothaan"}
if algorithm.lower() not in available_algorithms:
raise ValueError("Algorithm " + algorithm +
" is not available in QChem")
self.params["rem"]["scf_algorithm"] = algorithm.lower()
self.params["rem"]["max_scf_cycles"] = iterations
def set_scf_convergence_threshold(self, exponent=8):
"""
SCF is considered converged when the wavefunction error is less than
10**(-exponent).
In QChem, the default values are:
5 For single point energy calculations.
7 For geometry optimizations and vibrational analysis.
8 For SSG calculations
Args:
exponent: The exponent of the threshold. (Integer)
"""
self.params["rem"]["scf_convergence"] = exponent
def set_integral_threshold(self, thresh=12):
"""
Cutoff for neglect of two electron integrals. 10−THRESH (THRESH <= 14).
In QChem, the default values are:
8 For single point energies.
10 For optimizations and frequency calculations.
14 For coupled-cluster calculations.
Args:
thresh: The exponent of the threshold. (Integer)
"""
self.params["rem"]["thresh"] = thresh
def set_dft_grid(self, radical_points=128, angular_points=302,
grid_type="Lebedev"):
"""
Set the grid for DFT numerical integrations.
Args:
radical_points: Radical points. (Integer)
angular_points: Angular points. (Integer)
grid_type: The type of of the grid. There are two standard grids:
SG-1 and SG-0. The other two supported grids are "Lebedev" and
"Gauss-Legendre"
"""
available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,
170, 194, 230, 266, 302, 350, 434,
590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890,
4334, 4802, 5294}
if grid_type.lower() == "sg-0":
self.params["rem"]["xc_grid"] = 0
elif grid_type.lower() == "sg-1":
self.params["rem"]["xc_grid"] = 1
elif grid_type.lower() == "lebedev":
if angular_points not in available_lebedev_angular_points:
raise ValueError(str(angular_points) + " is not a valid "
"Lebedev angular points number")
self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
elif grid_type.lower() == "gauss-legendre":
self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
else:
raise ValueError("Grid type " + grid_type + " is not supported "
"currently")
def set_scf_initial_guess(self, guess="SAD"):
"""
Set initial guess method to be used for SCF
Args:
guess: The initial guess method. (str)
"""
availabel_guesses = {"core", "sad", "gwh", "read", "fragmo"}
if guess.lower() not in availabel_guesses:
raise ValueError("The guess method " + guess + " is not supported "
"yet")
self.params["rem"]["scf_guess"] = guess.lower()
def set_geom_max_iterations(self, iterations):
"""
Set the max iterations of geometry optimization.
Args:
iterations: the maximum iterations of geometry optimization.
(Integer)
"""
self.params["rem"]["geom_opt_max_cycles"] = iterations
def set_geom_opt_coords_type(self, coords_type="internal_switch"):
"""
Set the coordinates system used in geometry optimization.
"cartesian" --- always cartesian coordinates.
"internal" --- always internal coordinates.
"internal-switch" --- try internal coordinates first, if fails, switch
to cartesian coordinates.
"z-matrix" --- always z-matrix coordinates.
"z-matrix-switch" --- try z-matrix first, if fails, switch to
cartesian coordinates.
Args:
coords_type: The type of the coordinates. (str)
"""
coords_map = {"cartesian": 0, "internal": 1, "internal-switch": -1,
"z-matrix": 2, "z-matrix-switch": -2}
if coords_type.lower() not in set(coords_map.keys()):
raise ValueError("Coodinate system " + coords_type + " is not "
"supported yet")
else:
self.params["rem"]["geom_opt_coords"] = \
coords_map[coords_type.lower()]
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1,
energy=0.1):
"""
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
"""
if gradient < 1.0/(300-1) or displacement < 1.0/(1200-1) or \
energy < 1.0/(100-1):
raise ValueError("The geometry optimization convergence criteria "
"is too tight")
self.params["rem"]["geom_opt_tol_gradient"] = int(gradient * 300)
self.params["rem"]["geom_opt_tol_displacement"] = int(displacement *
1200)
self.params["rem"]["geom_opt_tol_energy"] = int(energy * 100)
def set_geom_opt_use_gdiis(self, subspace_size=None):
"""
Use GDIIS algorithm in geometry optimization.
Args:
subspace_size: The size of the DIIS subsapce. None for default
value. The default value is min(NDEG, NATOMS, 4) NDEG = number
of moleculardegrees of freedom.
"""
subspace_size = subspace_size if subspace_size is not None else -1
self.params["rem"]["geom_opt_max_diis"] = subspace_size
def disable_symmetry(self):
"""
Turn the symmetry off.
"""
self.params["rem"]["sym_ignore"] = True
self.params["rem"]["symmetry"] = False
def use_cosmo(self, dielectric_constant=78.4):
"""
Set the solvent model to COSMO.
Args:
dielectric_constant: the dielectric constant for the solvent.
"""
self.params["rem"]["solvent_method"] = "cosmo"
self.params["rem"]["solvent_dielectric"] = dielectric_constant
def use_pcm(self, pcm_params=None, solvent_key="solvent", solvent_params=None,
radii_force_field=None):
"""
Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_key (str): for versions < 4.2 the section name is "pcm_solvent"
solvent_params (dict): The parameters of solvent_key section
radii_force_field (str): The force fied used to set the solute
radii. Default to UFF.
"""
self.params["pcm"] = dict()
self.params[solvent_key] = dict()
default_pcm_params = {"Theory": "SSVPE",
"vdwScale": 1.1,
"Radii": "UFF"}
if not solvent_params:
solvent_params = {"Dielectric": 78.3553}
if pcm_params:
for k, v in pcm_params.items():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in default_pcm_params.items():
if k.lower() not in self.params["pcm"].keys():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in solvent_params.items():
self.params[solvent_key][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else copy.deepcopy(v)
self.params["rem"]["solvent_method"] = "pcm"
if radii_force_field:
self.params["pcm"]["radii"] = "bondi"
self.params["rem"]["force_fied"] = radii_force_field.lower()
def __str__(self):
sections = ["comment", "molecule", "rem"] + \
sorted(list(self.optional_keywords_list))
lines = []
for sec in sections:
if sec in self.params or sec == "molecule":
foramt_sec = self.__getattribute__("_format_" + sec)
lines.append("$" + sec)
lines.extend(foramt_sec())
lines.append("$end")
lines.append('\n')
return '\n'.join(lines)
@classmethod
def _wrap_comment(cls, comment):
ml_section_start = comment.find('<')
if ml_section_start >= 0:
title_section = comment[0:ml_section_start]
ml_section = comment[ml_section_start:]
else:
title_section = comment
ml_section = ''
wrapped_title_lines = textwrap.wrap(title_section.strip(), width=70, initial_indent=' ')
wrapped_ml_lines = []
for l in ml_section.splitlines():
if len(l) > 70:
wrapped_ml_lines.extend(textwrap.wrap(l.strip(), width=70, initial_indent=' '))
else:
wrapped_ml_lines.append(l)
return '\n'.join(wrapped_title_lines + wrapped_ml_lines)
def _format_comment(self):
return self._wrap_comment(self.params["comment"]).splitlines()
def _format_alist(self):
return [" {}".format(x) for x in self.params["alist"]]
def _format_velocity(self):
return [' ' + ' '.join(['{:12.5E}'.format(v) for v in atom])
for atom in self.params["velocity"]]
def _format_molecule(self):
lines = []
def inner_format_mol(m2, index_base):
mol_lines = []
for i, site in enumerate(m2.sites):
ghost = "@" if self.ghost_atoms \
and i + index_base in self.ghost_atoms else ""
atom = "{ghost:s}{element:s}".format(ghost=ghost,
element=site.species_string)
mol_lines.append(" {atom:<4} {x:>17.8f} {y:>17.8f} "
"{z:>17.8f}".format(atom=atom, x=site.x,
y=site.y, z=site.z))
return mol_lines
if self.charge is not None:
lines.append(" {charge:d} {multi:d}".format(charge=self
.charge, multi=self.spin_multiplicity))
if isinstance(self.mol, six.string_types) and self.mol == "read":
lines.append(" read")
elif isinstance(self.mol, list):
starting_index = 0
for m in self.mol:
lines.append("--")
lines.append(" {charge:d} {multi:d}".format(
charge=m.charge, multi=m.spin_multiplicity))
lines.extend(inner_format_mol(m, starting_index))
starting_index += len(m)
else:
lines.extend(inner_format_mol(self.mol, 0))
return lines
def _format_rem(self):
rem_format_template = Template(" {name:>$name_width} = "
"{value}")
name_width = 0
for name, value in self.params["rem"].items():
if len(name) > name_width:
name_width = len(name)
rem = rem_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params["rem"].keys())
priority_keys = ["jobtype"]
if "exchange" in self.params["rem"]:
priority_keys.append("exchange")
if "method" in self.params["rem"]:
priority_keys.append("method")
priority_keys.append("basis")
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params["rem"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_basis(self):
lines = []
if isinstance(self.params["basis"], dict):
for element in sorted(self.params["basis"].keys()):
basis = self.params["basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
elif isinstance(self.params["basis"], list):
for i, (element, bs) in enumerate(self.params["basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_aux_basis(self):
lines = []
if isinstance(self.params["aux_basis"], dict):
for element in sorted(self.params["aux_basis"].keys()):
basis = self.params["aux_basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
else:
for i, (element, bs) in enumerate(self.params["aux_basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_basis2(self):
lines = []
if isinstance(self.params["basis2"], dict):
for element in sorted(self.params["basis2"].keys()):
basis = self.params["basis2"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
else:
for i, (element, bs) in enumerate(self.params["basis2"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_ecp(self):
lines = []
for element in sorted(self.params["ecp"].keys()):
ecp = self.params["ecp"][element]
lines.append(" " + element)
lines.append(" " + ecp)
lines.append(" ****")
return lines
def _format_pcm(self):
pcm_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params["pcm"].keys():
if len(name) > name_width:
name_width = len(name)
rem = pcm_format_template.substitute(name_width=name_width)
lines = []
for name in sorted(self.params["pcm"].keys()):
value = self.params["pcm"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_pcm_solvent(self, key="pcm_solvent"):
pp_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params[key].keys():
if len(name) > name_width:
name_width = len(name)
rem = pp_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params[key].keys())
priority_keys = []
for k in ["dielectric", "nonels", "nsolventatoms", "solventatom"]:
if k in all_keys:
priority_keys.append(k)
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params[key][name]
if name == "solventatom":
for v in copy.deepcopy(value):
value = "{:<4d} {:<4d} {:<4d} {:4.2f}".format(*v)
lines.append(rem.format(name=name, value=value))
continue
lines.append(rem.format(name=name, value=value))
return lines
def _format_solvent(self):
return self._format_pcm_solvent(key="solvent")
def _format_opt(self):
# lines is a list of all opt keywords
lines = []
opt_sub_sections = [sec for sec in sorted(self.params['opt'])]
valid_sub_sections = {"CONSTRAINT", "FIXED", "CONNECT", "DUMMY"}
valid_fix_spec = {"X", "Y", "Z", "XY", "XZ", "YZ", "XYZ"}
if len(set(opt_sub_sections) - valid_sub_sections) > 0:
invalid_keys = set(opt_sub_sections) - valid_sub_sections
raise ValueError(','.join(['$' + k for k in invalid_keys]) +
' is not a valid geometry optimization constraint')
for opt_sub_sec in opt_sub_sections:
if len(lines) > 0:
lines.append("")
if opt_sub_sec == "CONSTRAINT":
# constraints
constraint_lines = ['CONSTRAINT']
for index in range(len(self.params['opt']['CONSTRAINT'])):
vals = self.params['opt']['CONSTRAINT'][index]
if vals[0] in ['outp', 'tors', 'linc', 'linp']:
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]} {vals[3]} {vals[4]} {vals[5]}".format(vals=vals))
elif vals[0] == 'stre':
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]} {vals[3]}".format(vals=vals))
elif vals[0] == 'bend':
constraint_lines.append("{vals[0]} {vals[1]} {vals[2]} {vals[3]} {vals[4]}".format(vals=vals))
constraint_lines.append('ENDCONSTRAINT')
lines.extend(constraint_lines)
elif opt_sub_sec == "FIXED":
fixed_lines = ["FIXED"]
for atom in sorted(self.params['opt']['FIXED']):
fix_spec = self.params['opt']['FIXED'][atom]
if fix_spec not in valid_fix_spec:
raise ValueError("{} is a wrong keyword to fix atoms".format(fix_spec))
fixed_lines.append(" {} {}".format(atom, fix_spec))
fixed_lines.append("ENDFIXED")
lines.extend(fixed_lines)
else:
raise ValueError("$opt - {} is not supported yet".format(opt_sub_sec))
return lines
def as_dict(self):
if isinstance(self.mol, six.string_types):
mol_dict = self.mol
elif isinstance(self.mol, Molecule):
mol_dict = self.mol.as_dict()
elif isinstance(self.mol, list):
mol_dict = [m.as_dict() for m in self.mol]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(self.mol)))
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": mol_dict,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"params": self.params}
if self.ghost_atoms:
d["ghost_atoms"] = self.ghost_atoms
return d
@classmethod
def from_dict(cls, d):
if d["molecule"] == "read":
mol = "read"
elif isinstance(d["molecule"], dict):
mol = Molecule.from_dict(d["molecule"])
elif isinstance(d["molecule"], list):
mol = [Molecule.from_dict(m) for m in d["molecule"]]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(d["molecule"])))
jobtype = d["params"]["rem"]["jobtype"]
title = d["params"].get("comment", None)
exchange = d["params"]["rem"].get("exchange", "hf")
method = d["params"]["rem"].get("method", None)
correlation = d["params"]["rem"].get("correlation", None)
basis_set = d["params"]["rem"]["basis"]
aux_basis_set = d["params"]["rem"].get("aux_basis", None)
ecp = d["params"]["rem"].get("ecp", None)
ghost_atoms = d.get("ghost_atoms", None)
optional_params = None
op_keys = set(d["params"].keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = d["params"][k]
return QcTask(molecule=mol, charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=d["params"]["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms,
method=method)
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
@classmethod
def from_string(cls, contents):
"""
Creates QcInput from a string.
Args:
contents: String representing a QChem input file.
Returns:
QcInput object
"""
mol = None
charge = None
spin_multiplicity = None
params = dict()
lines = contents.split('\n')
parse_section = False
section_name = None
section_text = []
ghost_atoms = None
for line_num, line in enumerate(lines):
l = line.strip().lower()
if len(l) == 0:
continue
if (not parse_section) and (l == "$end" or not l.startswith("$")):
raise ValueError("Format error, parsing failed")
if parse_section and l != "$end":
section_text.append(line)
if l.startswith("$") and not parse_section:
parse_section = True
section_name = l[1:]
available_sections = ["comment", "molecule", "rem"] + \
sorted(list(cls.optional_keywords_list))
if section_name not in available_sections:
raise ValueError("Unrecognized keyword " + line.strip() +
" at line " + str(line_num))
if section_name in params:
raise ValueError("duplicated keyword " + line.strip() +
"at line " + str(line_num))
if parse_section and l == "$end":
func_name = "_parse_" + section_name
if func_name not in QcTask.__dict__:
raise Exception(func_name + " is not implemented yet, "
"please implement it")
parse_func = QcTask.__dict__[func_name].__get__(None, QcTask)
if section_name == "molecule":
mol, charge, spin_multiplicity, ghost_atoms = parse_func(section_text)
else:
d = parse_func(section_text)
params[section_name] = d
parse_section = False
section_name = None
section_text = []
if parse_section:
raise ValueError("Format error. " + section_name + " is not "
"terminated")
jobtype = params["rem"]["jobtype"]
title = params.get("comment", None)
exchange = params["rem"].get("exchange", "hf")
method = params["rem"].get("method", None)
correlation = params["rem"].get("correlation", None)
basis_set = params["rem"]["basis"]
aux_basis_set = params["rem"].get("aux_basis", None)
ecp = params["rem"].get("ecp", None)
optional_params = None
op_keys = set(params.keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = params[k]
return QcTask(molecule=mol, charge=charge,
spin_multiplicity=spin_multiplicity,
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=params["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms,
method=method)
@classmethod
def _parse_comment(cls, contents):
return '\n'.join(contents).strip()
@classmethod
def _parse_coords(cls, coord_lines):
"""
Helper method to parse coordinates. Copied from GaussianInput class.
"""
paras = {}
var_pattern = re.compile(r'^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$')
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1)] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and cls.xyz_patt.match(l):
m = cls.xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r'[,\s]+', l.strip())
if len(toks) > 4:
coords.append(list(map(float, toks[2:5])))
else:
coords.append(list(map(float, toks[1:4])))
elif cls.zmat_patt.match(l):
zmode = True
toks = re.split(r'[,\s]+', l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0.0, 0.0, 0.0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array(
[0.0, 0.0, float(parameters[0])]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = list(map(parse_species, species))
return Molecule(species, coords)
@classmethod
def _parse_molecule(cls, contents):
def parse_ghost_indices(coord_text_lines):
no_ghost_text = [l.replace("@", "") for l in coord_text_lines]
ghosts = []
for index, l in enumerate(coord_text_lines):
l = l.strip()
if not l:
break
if "@" in l:
ghosts.append(index)
return ghosts, no_ghost_text
text = copy.deepcopy(contents[:2])
charge_multi_pattern = re.compile(r'\s*(?P<charge>'
r'[-+]?\d+)\s+(?P<multi>\d+)')
line = text.pop(0)
m = charge_multi_pattern.match(line)
if m:
charge = int(m.group("charge"))
spin_multiplicity = int(m.group("multi"))
line = text.pop(0)
else:
charge = None
spin_multiplicity = None
if line.strip().lower() == "read":
return "read", charge, spin_multiplicity, None
elif charge is None or spin_multiplicity is None:
raise ValueError("Charge or spin multiplicity is not found")
else:
if contents[1].strip()[0:2] == "--":
chunks = "\n".join(contents[2:]).split("--\n")
mol = []
ghost_atoms = []
starting_index = 0
for chunk in chunks:
frag_contents = chunk.split("\n")
m = charge_multi_pattern.match(frag_contents[0])
if m:
fragment_charge = int(m.group("charge"))
fragment_spin_multiplicity = int(m.group("multi"))
else:
raise Exception("charge and spin multiplicity must be specified for each fragment")
gh, coord_lines = parse_ghost_indices(frag_contents[1:])
fragment = cls._parse_coords(coord_lines)
fragment.set_charge_and_spin(fragment_charge, fragment_spin_multiplicity)
mol.append(fragment)
ghost_atoms.extend([i+starting_index for i in gh])
starting_index += len(fragment)
else:
ghost_atoms, coord_lines = parse_ghost_indices(contents[1:])
mol = cls._parse_coords(coord_lines)
if len(ghost_atoms) == 0:
mol.set_charge_and_spin(charge, spin_multiplicity)
ghost_atoms = ghost_atoms if len(ghost_atoms) > 0 else None
return mol, charge, spin_multiplicity, ghost_atoms
@classmethod
def _parse_rem(cls, contents):
d = dict()
int_pattern = re.compile(r'^[-+]?\d+$')
float_pattern = re.compile(r'^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $rem section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "xc_grid":
d[k2] = v
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_aux_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Auxiliary basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $aux_basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_basis2(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Auxiliary basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $aux_basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_ecp(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("ECP section format error")
chunks = zip(*[iter(contents)]*3)
d = dict()
for ch in chunks:
element, ecp = ch[:2]
d[element.strip().capitalize()] = ecp.strip().lower()
return d
@classmethod
def _parse_alist(cls, contents):
atom_list = []
for line in contents:
atom_list.extend([int(x) for x in line.split()])
return atom_list
@classmethod
def _parse_velocity(cls, contents):
velocities = []
for line in contents:
velocities.append([float(v) for v in line.split()])
return velocities
@classmethod
def _parse_pcm(cls, contents):
d = dict()
int_pattern = re.compile(r'^[-+]?\d+$')
float_pattern = re.compile(r'^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_pcm_solvent(cls, contents):
d = dict()
int_pattern = re.compile(r'^[-+]?\d+$')
float_pattern = re.compile(r'^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm_solvent section, "
"there should be at least two field: "
"key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "solventatom":
v = [int(i) for i in tokens[1:4]]
# noinspection PyTypeChecker
v.append(float(tokens[4]))
if k2 not in d:
d[k2] = [v]
else:
d[k2].append(v)
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_solvent(cls, contents):
return cls._parse_pcm_solvent(contents)
@classmethod
def _parse_opt(cls, contents):
#only parses opt constraints
opt_dict = {}
const_list = list()
fixed_dict = dict()
constraints = False
fixed_sec = False
valid_fix_spec = {"X", "Y", "Z", "XY", "XZ", "YZ", "XYZ"}
int_pattern = re.compile(r'^[-+]?\d+$')
float_pattern = re.compile(r'^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().split()
if re.match(r'ENDCONSTRAINT', line, re.IGNORECASE):
constraints = False
opt_dict["CONSTRAINT"] = const_list
elif re.match(r'ENDFIXED', line, re.IGNORECASE):
fixed_sec = False
opt_dict["FIXED"] = fixed_dict
elif constraints:
vals = []
for val in tokens:
if int_pattern.match(val):
vals.append(int(val))
elif float_pattern.match(val):
vals.append(float(val))
else:
vals.append(val)
const_list.append(vals)
elif fixed_sec:
atom = int(tokens[0])
fix_spec = tokens[1].upper()
if fix_spec not in valid_fix_spec:
raise ValueError("{} is not a correct keyword to fix"
"atoms".format(fix_spec))
fixed_dict[atom] = fix_spec
elif re.match(r'CONSTRAINT', line, re.IGNORECASE):
constraints = True
const_list = []
elif re.match(r'FIXED', line, re.IGNORECASE):
fixed_sec = True
fixed_dict = dict()
elif len(line.strip()) == 0:
continue
else:
raise ValueError("Keyword {} in $opt section is not supported yet".
format(line.strip()))
return opt_dict
class QcInput(MSONable):
"""
An object representing a multiple step QChem input file.
Args:
jobs: The QChem jobs (List of QcInput object)
"""
def __init__(self, jobs):
jobs = jobs if isinstance(jobs, list) else [jobs]
for j in jobs:
if not isinstance(j, QcTask):
raise ValueError("jobs must be a list QcInput object")
self.jobs = jobs
def __str__(self):
return "\n@@@\n\n\n".join([str(j) for j in self.jobs])
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"jobs": [j.as_dict() for j in self.jobs]}
@classmethod
def from_dict(cls, d):
jobs = [QcTask.from_dict(j) for j in d["jobs"]]
return QcInput(jobs)
@classmethod
def from_string(cls, contents):
qc_contents = contents.split("@@@")
jobs = [QcTask.from_string(cont) for cont in qc_contents]
return QcInput(jobs)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
class QcOutput(object):
kcal_per_mol_2_eV = 4.3363E-2
def __init__(self, filename):
self.filename = filename
split_pattern = r"\n\nRunning Job \d+ of \d+ \S+|" \
r"[*]{61}\nJob \d+ of \d+ \n[*]{61}|" \
r"\n.*time.*\nRunning Job \d+ of \d+ \S+"
try:
with zopen(filename, "rt") as f:
data = f.read()
except UnicodeDecodeError:
with zopen(filename, "rb") as f:
data = f.read().decode("latin-1")
try:
chunks = re.split(split_pattern, data)
# noinspection PyTypeChecker
self.data = list(map(self._parse_job, chunks))
except UnicodeDecodeError:
data = data.decode("latin-1")
chunks = re.split(split_pattern, data)
# noinspection PyTypeChecker
self.data = list(map(self._parse_job, chunks))
@property
def final_energy(self):
return self.data[-1]["energies"][-1][-1]
@property
def final_structure(self):
return self.data[-1]["molecules"][-1]
@classmethod
def _expected_successful_pattern(cls, qctask):
text = ["Convergence criterion met"]
if "correlation" in qctask.params["rem"]:
if "ccsd" in qctask.params["rem"]["correlation"]\
or "qcisd" in qctask.params["rem"]["correlation"]:
text.append('CC.*converged')
if qctask.params["rem"]["jobtype"] == "opt"\
or qctask.params["rem"]["jobtype"] == "ts":
text.append("OPTIMIZATION CONVERGED")
if qctask.params["rem"]["jobtype"] == "freq":
text.append("VIBRATIONAL ANALYSIS")
if qctask.params["rem"]["jobtype"] == "gradient":
text.append("Gradient of SCF Energy")
return text
@classmethod
def _parse_job(cls, output):
scf_energy_pattern = re.compile(r'Total energy in the final basis set ='
r'\s+(?P<energy>-\d+\.\d+)')
corr_energy_pattern = re.compile(r'(?P<name>[A-Z\-\(\)0-9]+)\s+'
r'([tT]otal\s+)?[eE]nergy\s+=\s+'
r'(?P<energy>-\d+\.\d+)')
coord_pattern = re.compile(
r'\s*\d+\s+(?P<element>[A-Z][a-zH]*)\s+(?P<x>\-?\d+\.\d+)\s+'
r'(?P<y>\-?\d+\.\d+)\s+(?P<z>\-?\d+\.\d+)')
num_ele_pattern = re.compile(r'There are\s+(?P<alpha>\d+)\s+alpha '
r'and\s+(?P<beta>\d+)\s+beta electrons')
total_charge_pattern = re.compile(r'Sum of atomic charges ='
r'\s+(?P<charge>\-?\d+\.\d+)')
scf_iter_pattern = re.compile(
r'\d+\s*(?P<energy>\-\d+\.\d+)\s+(?P<diis_error>\d+\.\d+E[-+]\d+)')
zpe_pattern = re.compile(
r'Zero point vibrational energy:\s+(?P<zpe>\d+\.\d+)\s+kcal/mol')
thermal_corr_pattern = re.compile(
r'(?P<name>\S.*\S):\s+(?P<correction>\d+\.\d+)\s+k?cal/mol')
detailed_charge_pattern = re.compile(
r'(Ground-State )?(?P<method>\w+)( Net)? Atomic Charges')
nbo_charge_pattern = re.compile(
r'(?P<element>[A-Z][a-z]{0,2})\s*(?P<no>\d+)\s+(?P<charge>\-?\d\.\d+)'
r'\s+(?P<core>\-?\d+\.\d+)\s+(?P<valence>\-?\d+\.\d+)'
r'\s+(?P<rydberg>\-?\d+\.\d+)\s+(?P<total>\-?\d+\.\d+)'
r'(\s+(?P<spin>\-?\d\.\d+))?')
nbo_wavefunction_type_pattern = re.compile(
r'This is an? (?P<type>\w+\-\w+) NBO calculation')
scr_dir_pattern = re.compile(r"Scratch files written to\s+(?P<scr_dir>[^\n]+)")
bsse_pattern = re.compile(
r'DE, kJ/mol\s+(?P<raw_be>\-?\d+\.?\d+([eE]\d+)?)\s+'
r'(?P<corrected_be>\-?\d+\.?\d+([eE]\d+)?)')
float_pattern = re.compile(r'\-?\d+\.?\d+([eE]\d+)?$')
error_defs = (
(re.compile(r'Convergence failure'), "Bad SCF convergence"),
(re.compile(
r'Coordinates do not transform within specified threshold'),
"autoz error"),
(re.compile(r'MAXIMUM OPTIMIZATION CYCLES REACHED'),
"Geometry optimization failed"),
(re.compile(r'\s+[Nn][Aa][Nn]\s+'), "NAN values"),
(re.compile(r'energy\s+=\s*(\*)+'), "Numerical disaster"),
(re.compile(r'NewFileMan::OpenFile\(\):\s+nopenfiles=\d+\s+'
r'maxopenfiles=\d+s+errno=\d+'), "Open file error"),
(re.compile(r'Application \d+ exit codes: 1[34]\d+'), "Exit Code 134"),
(re.compile(r'Negative overlap matrix eigenvalue. Tighten integral '
r'threshold \(REM_THRESH\)!'), "Negative Eigen"),
(re.compile(r'Unable to allocate requested memory in mega_alloc'),
"Insufficient static memory"),
(re.compile(r'Application \d+ exit signals: Killed'),
"Killed"),
(re.compile(r'UNABLE TO DETERMINE Lamda IN FormD'),
"Lamda Determination Failed"),
(re.compile(r'Job too small. Please specify .*CPSCF_NSEG'),
"Freq Job Too Small"),
(re.compile(r'Not enough total memory'),
"Not Enough Total Memory"),
(re.compile(r'Use of \$pcm_solvent section has been deprecated starting in Q-Chem'),
"pcm_solvent deprecated")
)
energies = []
scf_iters = []
coords = []
species = []
molecules = []
gradients = []
freqs = []
vib_freqs = []
vib_modes = []
grad_comp = None
errors = []
parse_input = False
parse_coords = False
parse_scf_iter = False
parse_gradient = False
parse_freq = False
parse_modes = False
qctask_lines = []
qctask = None
jobtype = None
charge = None
scr_dir = None
spin_multiplicity = None
thermal_corr = dict()
properly_terminated = False
pop_method = None
parse_charge = False
nbo_available = False
nbo_charge_header = None
parse_nbo_charge = False
charges = dict()
scf_successful = False
opt_successful = False
parse_alpha_homo = False
parse_alpha_lumo = False
parse_beta_homo = False
parse_beta_lumo = False
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
homo_lumo = []
bsse = None
hiershfiled_pop = False
gen_scfman = False
for line in output.split("\n"):
for ep, message in error_defs:
if ep.search(line):
if message == "NAN values":
if "time" in line:
continue
errors.append(message)
if parse_input:
if "-" * 50 in line:
if len(qctask_lines) == 0:
continue
else:
qctask = QcTask.from_string('\n'.join(qctask_lines))
jobtype = qctask.params["rem"]["jobtype"]
parse_input = False
continue
qctask_lines.append(line)
elif parse_coords:
if "-" * 50 in line:
if len(coords) == 0:
continue
else:
if qctask and qctask.ghost_atoms:
if isinstance(qctask.mol, Molecule):
for i in qctask.ghost_atoms:
species[i] = qctask.mol.sites[i].specie.symbol
molecules.append(Molecule(species, coords))
coords = []
species = []
parse_coords = False
continue
if "Atom" in line:
continue
m = coord_pattern.match(line)
coords.append([float(m.group("x")), float(m.group("y")),
float(m.group("z"))])
species.append(m.group("element"))
elif parse_scf_iter:
if "SCF time: CPU" in line:
parse_scf_iter = False
continue
if 'Convergence criterion met' in line and gen_scfman:
scf_successful = True
name = "GEN_SCFMAN"
energy = Energy(float(line.split()[1]), "Ha").to("eV")
energies.append(tuple([name, energy]))
if 'Convergence criterion met' in line:
scf_successful = True
m = scf_iter_pattern.search(line)
if m:
scf_iters[-1].append((float(m.group("energy")),
float(m.group("diis_error"))))
elif parse_gradient:
if "Max gradient component" in line:
gradients[-1]["max_gradient"] = \
float(line.split("=")[1])
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
continue
elif "RMS gradient" in line:
gradients[-1]["rms_gradient"] = \
float(line.split("=")[1])
parse_gradient = False
grad_comp = None
continue
elif "." not in line:
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
grad_comp = []
else:
grad_line_token = list(line)
grad_crowd = False
grad_line_final = line
for i in range(5, len(line), 12):
c = grad_line_token[i]
if not c.isspace():
grad_crowd = True
if ' ' in grad_line_token[i+1: i+6+1] or \
len(grad_line_token[i+1: i+6+1]) < 6:
continue
grad_line_token[i-1] = ' '
if grad_crowd:
grad_line_final = ''.join(grad_line_token)
grad_comp.append([float(x) for x
in grad_line_final.strip().split()[1:]])
elif parse_freq:
if parse_modes:
if "TransDip" in line:
parse_modes = False
for freq, mode in zip(vib_freqs, zip(*vib_modes)):
freqs.append({"frequency": freq,
"vib_mode": mode})
vib_modes = []
continue
dis_flat = [float(x) for x in line.strip().split()[1:]]
dis_atom = zip(*([iter(dis_flat)]*3))
vib_modes.append(dis_atom)
if "STANDARD THERMODYNAMIC QUANTITIES" in line\
or "Imaginary Frequencies" in line:
parse_freq = False
continue
if "Frequency:" in line:
vib_freqs = [float(vib) for vib
in line.strip().strip().split()[1:]]
elif "X Y Z" in line:
parse_modes = True
continue
elif parse_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
else:
pop_method = None
parse_charge = False
else:
if len(line.strip()) == 0 or\
'Atom' in line:
continue
else:
charges[pop_method].append(float(line.split()[2]))
elif parse_nbo_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
elif "="*20 in line:
pop_method = None
parse_nbo_charge = False
else:
m = nbo_charge_pattern.search(line)
if m:
charges[pop_method].append(float(m.group("charge")))
else:
raise Exception("Can't find NBO charges")
elif parse_alpha_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_alpha_homo = False
parse_alpha_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_alpha_homo = float(m.group(0))
continue
elif parse_alpha_lumo:
current_alpha_lumo = float(line.split()[0])
parse_alpha_lumo = False
continue
elif parse_beta_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_beta_homo = False
parse_beta_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_beta_homo = float(m.group(0))
continue
elif parse_beta_lumo:
current_beta_lumo = float(line.split()[0])
parse_beta_lumo = False
if isinstance(current_alpha_homo, float) and isinstance(current_beta_homo, float):
current_homo = max([current_alpha_homo, current_beta_homo])
else:
current_homo = 0.0
if isinstance(current_alpha_lumo, float) and isinstance(current_beta_lumo, float):
current_lumo = min([current_alpha_lumo, current_beta_lumo])
else:
current_lumo = 0.0
homo_lumo.append([Energy(current_homo, "Ha").to("eV"),
Energy(current_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
elif "-" * 50 in line and not (current_alpha_lumo is None):
homo_lumo.append([Energy(current_alpha_homo, "Ha").to("eV"),
Energy(current_alpha_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
else:
if spin_multiplicity is None:
m = num_ele_pattern.search(line)
if m:
spin_multiplicity = int(m.group("alpha")) - \
int(m.group("beta")) + 1
if charge is None:
m = total_charge_pattern.search(line)
if m:
charge = int(float(m.group("charge")))
if scr_dir is None:
m = scr_dir_pattern.search(line)
if m:
scr_dir = os.path.abspath(m.group("scr_dir"))
if jobtype and jobtype == "freq":
m = zpe_pattern.search(line)
if m:
zpe = float(m.group("zpe"))
thermal_corr["ZPE"] = zpe
m = thermal_corr_pattern.search(line)
if m:
thermal_corr[m.group("name")] = \
float(m.group("correction"))
m = bsse_pattern.search(line)
if m:
raw_be = float(m.group("raw_be"))
corrected_be = float(m.group("corrected_be"))
bsse_fwu = FloatWithUnit(raw_be - corrected_be, "kJ mol^-1")
bsse = bsse_fwu.to('eV atom^-1').real
name = None
energy = None
m = scf_energy_pattern.search(line)
if m and not gen_scfman:
name = "SCF"
energy = Energy(m.group("energy"), "Ha").to("eV")
m = corr_energy_pattern.search(line)
if m and m.group("name") != "SCF" and not gen_scfman:
name = m.group("name")
energy = Energy(m.group("energy"), "Ha").to("eV")
m = detailed_charge_pattern.search(line)
if m:
pop_method = m.group("method").lower()
parse_charge = True
charges[pop_method] = []
if nbo_available:
if nbo_charge_header is None:
m = nbo_wavefunction_type_pattern.search(line)
if m:
nbo_wavefunction_type = m.group("type")
nbo_charge_header_dict = {
"closed-shell": "Atom No Charge Core "
"Valence Rydberg Total",
"open-shell": "Atom No Charge Core "
"Valence Rydberg Total Density"}
nbo_charge_header = nbo_charge_header_dict[nbo_wavefunction_type]
continue
if nbo_charge_header in line:
pop_method = "nbo"
parse_nbo_charge = True
charges[pop_method] = []
if "GEN_SCFMAN: A general SCF calculation manager " in line:
gen_scfman = True
if "N A T U R A L B O N D O R B I T A L A N A L Y S I S" in line:
nbo_available = True
if name and energy:
energies.append(tuple([name, energy]))
if "User input:" in line:
parse_input = True
elif "Standard Nuclear Orientation (Angstroms)" in line:
parse_coords = True
elif "Performing Hirshfeld population analysis" in line:
hiershfiled_pop = True
elif "Hirshfeld: atomic densities completed" in line:
hiershfiled_pop = False
elif ("Cycle Energy DIIS Error" in line
or "Cycle Energy RMS Gradient" in line)\
and not hiershfiled_pop:
parse_scf_iter = True
scf_iters.append([])
scf_successful = False
elif "Gradient of SCF Energy" in line:
parse_gradient = True
gradients.append({"gradients": []})
elif "VIBRATIONAL ANALYSIS" in line:
parse_freq = True
elif "Alpha MOs" in line:
parse_alpha_homo = True
parse_alpha_lumo = False
elif "Beta MOs" in line:
parse_beta_homo = True
parse_beta_lumo = False
elif "Thank you very much for using Q-Chem." in line:
properly_terminated = True
elif "OPTIMIZATION CONVERGED" in line:
opt_successful = True
if charge is None:
errors.append("Molecular charge is not found")
elif spin_multiplicity is None:
errors.append("Molecular spin multipilicity is not found")
else:
for mol in molecules:
if qctask is None or qctask.ghost_atoms is None:
mol.set_charge_and_spin(charge, spin_multiplicity)
for k in thermal_corr.keys():
v = thermal_corr[k]
if "Entropy" in k:
v *= cls.kcal_per_mol_2_eV * 1.0E-3
else:
v *= cls.kcal_per_mol_2_eV
thermal_corr[k] = v
solvent_method = "NA"
if qctask:
if "solvent_method" in qctask.params["rem"]:
solvent_method = qctask.params["rem"]["solvent_method"]
else:
errors.append("No input text")
if not scf_successful:
if 'Bad SCF convergence' not in errors:
errors.append('Bad SCF convergence')
if jobtype == 'opt':
if not opt_successful:
if 'Geometry optimization failed' not in errors:
errors.append('Geometry optimization failed')
if len(errors) == 0:
for text in cls._expected_successful_pattern(qctask):
success_pattern = re.compile(text)
if not success_pattern.search(output):
errors.append("Can't find text to indicate success")
data = {
"jobtype": jobtype,
"energies": energies,
"HOMO/LUMOs": homo_lumo,
"bsse": bsse,
'charges': charges,
"corrections": thermal_corr,
"molecules": molecules,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": freqs,
"gradients": gradients,
"input": qctask,
"gracefully_terminated": properly_terminated,
"scf_iteration_energies": scf_iters,
"solvent_method": solvent_method,
"scratch_dir": scr_dir
}
return data
class QcNucVeloc(object):
"""
class to QChem AMID NucVeloc file.
Args:
filename (str): Filename to parse
.. attribute:: time_steps (fs)
The AIMD time stamp for each frame
.. attribute:: velocities (a.u.)
The atom velocities for each frame. Format:
[[[x, y, z]
[x, y, z]
... ] ## frame 1
...
[[x, y, z]
[x, y, z]
... ] ## frame N
]
"""
def __init__(self, filename):
self.filename = filename
try:
with zopen(filename, "rt") as f:
data = f.read()
except UnicodeDecodeError:
with zopen(filename, "rb") as f:
data = f.read().decode("latin-1")
self.step_times = []
self.velocities = []
for line in data.split("\n")[1:]:
tokens = line.split()
if len(tokens) < 4:
break
step_time = float(tokens[0])
nuc_veloc_tokens = [float(v) for v in tokens[1:]]
# unit in au
veloc = list(zip(*([iter(nuc_veloc_tokens)] * 3)))
self.step_times.append(step_time)
self.velocities.append(veloc)
|
matk86/pymatgen
|
pymatgen/io/qchem.py
|
Python
|
mit
| 87,768
|
[
"Gaussian",
"Q-Chem",
"pymatgen"
] |
045274848fee1c3edc603bfc0f3a673ebbe1aabd6b9863ae44cb283649808fb7
|
#! /usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
def collect_version_input_from_fallback(meta_file='metadata.py'):
"""From *meta_file*, collect lines matching ``_version_{key} = {value}``
and return as dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = dict(re.findall("__version_([a-z_]+)\s*=\s*'([^']+)'", open(cwd + '/' + meta_file).read()))
res.pop('_')
return res
def is_git_repo(cwd='./', dot_git_qualifies=False, no_git_cmd_result=False):
"""Returns boolean as to whether *cwd* is under git control. When no ``git``
command available in environment, *no_git_cmd_result* returned. If within
the .git directory of a git repository, *dot_git_qualifies* returned.
"""
command = 'git rev-parse --is-inside-work-tree'
try:
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
except EnvironmentError as e:
# most likely, git command not available
return no_git_cmd_result
(out, err) = process.communicate()
if process.returncode != 0:
# fatal: Not a git repository (or any of the parent directories): .git
return False
if out.strip() == 'true':
# in a git repo and not within .git dir
return True
if out.strip() == 'false':
# in a git repo in .git dir
return dot_git_qualifies
def collect_version_input_from_git():
"""Returns a dictionary filled with ``git describe`` results, clean/dirty
flag, and branch status. *cwd* should already be confirmed as a git
repository; this doesn't catch returncodes or EnvironmentErrors because the
raised errors are preferred to incomplete return dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = {}
# * only want annotated tags, so not --all
# * in case *no* tags (impossible in Psi4), --always gets at least hash
# * get commits & hash info even if on tag using --long
command = 'git describe --abbrev=7 --long --always HEAD'
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
fields = str(out).rstrip().split('-')
if len(fields) == 3:
# normal: 0.1-62-ga68d223
res['latest_annotated_v_tag'] = fields[0][1:] # drop the "v"; tag mismatch caught later
res['commits_since_tag'] = fields[1]
res['seven_char_hash'] = fields[2][1:] # drop the "g" git identifier
else:
# no tag present: a68d223
res['latest_annotated_v_tag'] = ''
res['commits_since_tag'] = ''
res['seven_char_hash'] = fields[0] # no prepended "g"
command = 'git diff-index --name-only HEAD'
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
res['is_clean'] = False if str(out).rstrip() else True
command = 'git rev-parse --abbrev-ref HEAD' # returns HEAD when detached
process = subprocess.Popen(command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=cwd,
universal_newlines=True)
(out, err) = process.communicate()
res['branch_name'] = str(out).rstrip()
return res
def reconcile_and_compute_version_output(quiet=False):
res = collect_version_input_from_fallback(meta_file='metadata.py')
meta_latest_annotated_v_tag, _, meta_seven_char_hash = res['long'].partition('+')
# this is the tag format (PEP440 compliant) that our machinery is expecting.
# let's catch any deviations with Travis before it can corrupt versioning.
sane_tag = re.compile("""^(?P<tag>(?P<forwardseries>\d+\.\d+(?P<patch>\.[1-9]+)?)(?(patch)|(?P<prere>((a)|(b)|(rc))\d+)?))$""")
mobj = sane_tag.match(meta_latest_annotated_v_tag)
if mobj:
# some versioning machinery (looking at you, CMake) does strictly
# numerical comparisons such as M.m.p.t and thus can't handle
# prereleases and dev snapshots. we compute a Most Rescent Ancestral
# Release tag (e.g., 1.0 or 1.12.1) for a backward release series.
backwardseries = mobj.group('forwardseries')
if mobj.group('prere'):
tmp = backwardseries.split('.')
bumpdown = str(int(tmp[-1]) - 1)
if bumpdown == '-1':
print("""Unavoidable snag. Probably "2.0". Can't predict backward series from present prerelease.""")
sys.exit()
else:
tmp[-1] = bumpdown
backwardseries = '.'.join(tmp)
else:
print("""Tag in {} is malformed: {}""".format(
'metadata.py', meta_latest_annotated_v_tag))
sys.exit()
cwd = os.path.dirname(os.path.abspath(__file__))
if is_git_repo(cwd=cwd):
res.update(collect_version_input_from_git())
# establish the default response
project_release = False
project_prerelease = False
project_version = 'undefined'
project_version_long = 'undefined+' + res['seven_char_hash']
if res['latest_annotated_v_tag'] == meta_latest_annotated_v_tag:
trial_version_long_release = res['latest_annotated_v_tag'] + '+' + res['seven_char_hash']
trial_version_devel = res['upcoming_annotated_v_tag'] + '.dev' + res['commits_since_tag']
trial_version_long_devel = trial_version_devel + '+' + res['seven_char_hash']
if int(res['commits_since_tag']) == 0:
if trial_version_long_release == res['long']:
print("""Amazing, this can't actually happen that git hash stored at git commit.""")
sys.exit()
else:
if meta_seven_char_hash == 'zzzzzzz':
if not quiet:
print("""Defining {} version: {} (recorded and computed)""".format(
'prerelease' if mobj.group('prere') else 'release', trial_version_long_release))
project_release = res['is_clean'] and not mobj.group('prere')
project_prerelease = res['is_clean'] and mobj.group('prere')
project_version = meta_latest_annotated_v_tag
project_version_long = trial_version_long_release
else:
print("""Undefining version for irreconcilable hashes: {} (computed) vs {} (recorded)""".format(
trial_version_long_release, res['long']))
else:
if res['branch_name'].endswith('.x'):
print("""Undefining version as development snapshots not allowed on maintenance branch: {} (rejected computed)""".format(
trial_version_long_devel))
# TODO prob should be undef unless on master
else:
if not quiet:
print("""Defining development snapshot version: {} (computed)""".format(
trial_version_long_devel))
project_version = trial_version_devel
project_version_long = trial_version_long_devel
else:
print("""Undefining version for irreconcilable tags: {} (computed) vs {} (recorded)""".format(
res['latest_annotated_v_tag'], meta_latest_annotated_v_tag))
else:
print("""Blindly (no git) accepting release version: {} (recorded)""".format(
res['long']))
# assumes that zip only comes from [pre]release. GitHub hides others, but they're there.
project_release = not bool(mobj.group('prere'))
project_prerelease = bool(mobj.group('prere'))
project_version = meta_latest_annotated_v_tag
project_version_long = res['long']
res['is_clean'] = True
res['branch_name'] = ''
def mapped_cmake_version(last_release, is_release):
"""CMake expects MAJOR.MINOR.PATCH.TWEAK. The ancestral *last_release*
is padded into the first three roles. If not *is_release*, the tweak role
collects all postrelease states (prereleases and devel snapshots) into
dummy 999 that at least gets them sorted correctly between releases and
allows EXACT CMake version comparisons. Returns, for example, 1.1.0.0 for
release 1.1, 1.3.4.0 for maintenance release 1.3.4, and 1.0.0.999 for
prerelease 1.1a1 or snapshot 1.1.dev600
"""
cm = last_release.split('.')
cm += ['0'] * (4 - len(cm))
if not is_release:
cm[-1] = '999'
cm = '.'.join(cm)
return cm
return {'__version__': project_version,
'__version_long': project_version_long,
'__version_is_clean': res['is_clean'],
'__version_branch_name': res['branch_name'],
'__version_last_release': backwardseries,
'__version_cmake': mapped_cmake_version(backwardseries, project_release),
'__version_release': project_release,
'__version_prerelease': project_prerelease}
def write_new_metafile(versdata, outfile='metadata.out.py'):
formatter_fn = """
def version_formatter(formatstring='{version}'):
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if (__version_release == 'True') else ('prerelease' if (__version_prerelease == 'True') else '')
ans = formatstring.format(version=__version__,
versionlong=__version_long,
githash=__version_long[len(__version__)+1:],
clean='' if __version_is_clean == 'True' else 'dirty',
branch=__version_branch_name,
lastrel=__version_last_release,
cmake=__version_cmake,
release=release)
return ans
"""
main_fn = """
if __name__ == '__main__':
print(version_formatter(formatstring='all'))
"""
with open(os.path.abspath(outfile), 'w') as handle:
for k in sorted(versdata):
handle.write("""{} = '{}'\n""".format(k, versdata[k]))
handle.write(formatter_fn)
handle.write(main_fn)
def write_new_cmake_metafile(versdata, outfile='metadata.out.cmake'):
main_fn = """
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
${{WTO}}/${{PN}}ConfigVersion.cmake
VERSION {ver}
COMPATIBILITY AnyNewerVersion)
"""
with open(os.path.abspath(outfile), 'w') as handle:
handle.write(main_fn.format(ver=versdata['__version_cmake']))
def version_formatter(versdata, formatstring="""{version}"""):
"""Return version information string with data from *versdata* when
supplied with *formatstring* suitable for ``formatstring.format()``.
Use plaintext and any placeholders among: version, versionlong, githash,
branch, clean, release, lastrel, cmake. For example, '{branch}@{githash}'
returns something like 'fix200@1234567'.
"""
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if versdata['__version_release'] else ('prerelease' if versdata['__version_prerelease'] else '')
ans = formatstring.format(version=versdata['__version__'],
versionlong=versdata['__version_long'],
githash=versdata['__version_long'][len(versdata['__version__']) + 1:],
clean='' if versdata['__version_is_clean'] else 'dirty',
branch=versdata['__version_branch_name'],
lastrel=versdata['__version_last_release'],
cmake=versdata['__version_cmake'],
release=release)
return ans
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to extract Psi4 version from source. Use psi4.version_formatter(fmt_string) after build.')
parser.add_argument('--metaout', default='metadata.out.py', help='file to which the computed version info written')
parser.add_argument('--cmakeout', default='metadata.out.cmake', help='file to which the CMake ConfigVersion generator written')
parser.add_argument('--format', default='all', help='string like "{version} {githash}" to be filled in and returned')
parser.add_argument('--formatonly', action='store_true', help='print only the format string, not the detection info')
args = parser.parse_args()
ans = reconcile_and_compute_version_output(quiet=args.formatonly)
write_new_metafile(ans, args.metaout)
write_new_cmake_metafile(ans, args.cmakeout)
ans2 = version_formatter(ans, formatstring=args.format)
print(ans2)
|
psi-rking/psi4
|
psi4/versioner.py
|
Python
|
lgpl-3.0
| 14,574
|
[
"Psi4"
] |
7a7a630cf8f6e74392a4afc0b09f571449ffdad32ba146695dc65321159f04a0
|
from __future__ import absolute_import, division, print_function
import sys
import os.path
import warnings
import datetime, time
import numpy as np
import astropy.table
# See pixsim.py
import astropy.time
from astropy.io import fits
import fitsio
import desitarget
import desitarget.targetmask
from desitarget.targets import main_cmx_or_sv
import desispec.io
import desispec.io.util
import desimodel.io
from desimodel.focalplane import fiber_area_arcsec2
import desiutil.depend
from desiutil.iers import freeze_iers
import desispec.interpolation
import desisim.io
import desisim.specsim
#- Reference observing conditions for each of dark, gray, bright
reference_conditions = dict(DARK=dict(), GRAY=dict(), BRIGHT=dict())
reference_conditions['DARK']['SEEING'] = 1.1
reference_conditions['DARK']['EXPTIME'] = 1000
reference_conditions['DARK']['AIRMASS'] = 1.0
reference_conditions['DARK']['MOONFRAC'] = 0.0
reference_conditions['DARK']['MOONALT'] = -60
reference_conditions['DARK']['MOONSEP'] = 180
reference_conditions['GRAY']['SEEING'] = 1.1
reference_conditions['GRAY']['EXPTIME'] = 1000
reference_conditions['GRAY']['AIRMASS'] = 1.0
reference_conditions['GRAY']['MOONFRAC'] = 0.1
reference_conditions['GRAY']['MOONALT'] = 10
reference_conditions['GRAY']['MOONSEP'] = 60
reference_conditions['BRIGHT']['SEEING'] = 1.1
reference_conditions['BRIGHT']['EXPTIME'] = 300
reference_conditions['BRIGHT']['AIRMASS'] = 1.0
reference_conditions['BRIGHT']['MOONFRAC'] = 0.7
reference_conditions['BRIGHT']['MOONALT'] = 60
reference_conditions['BRIGHT']['MOONSEP'] = 50
for objtype in ('LRG', 'QSO', 'ELG'):
reference_conditions[objtype] = reference_conditions['DARK']
for objtype in ('MWS', 'BGS'):
reference_conditions[objtype] = reference_conditions['BRIGHT']
def simarc(arcdata, nspec=5000, nonuniform=False, testslit=False):
'''
Simulates an arc lamp exposure
Args:
arcdata (Table): Table with columns VACUUM_WAVE and ELECTRONS
nspec (int, optional) number of spectra to simulate
nonuniform (bool, optional): include calibration screen non-uniformity
testslit (bool, optional): this argument is undocumented.
Returns: (wave, phot, fibermap)
wave: 1D[nwave] wavelengths in Angstroms
phot: 2D[nspec,nwave] photons observed by CCD (i.e. electrons)
fibermap: fibermap Table
Note: this bypasses specsim since we don't have an arclamp model in
surface brightness units; we only have electrons on the CCD. But it
does include the effect of varying fiber sizes.
TODO:
* add exptime support
* update inputs to surface brightness and DESI lamp lines (DESI-2674)
* add psfconvolve option
'''
wave = arcdata['VACUUM_WAVE']
phot = arcdata['ELECTRONS']
if testslit:
fibermap = astropy.table.Table(testslit_fibermap()[0:nspec])
else:
fibermap = astropy.table.Table(desispec.io.empty_fibermap(nspec))
fibermap.meta['FLAVOR'] = 'arc'
fibermap['OBJTYPE'] = 'ARC'
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
r = np.sqrt(x**2 + y**2)
#-----
#- Determine ratio of fiber sizes relative to larges fiber
fiber_area = fiber_area_arcsec2(x, y)
size_ratio = fiber_area / np.max(fiber_area)
#- Correct photons for fiber size
phot = np.tile(phot, nspec).reshape(nspec, len(wave))
phot = (phot.T * size_ratio).T
#- Apply calibration screen non-uniformity
if nonuniform:
ratio = _calib_screen_uniformity(radius=r)
assert np.all(ratio <= 1) and np.all(ratio > 0.99)
phot = (phot.T * ratio).T
return wave, phot, fibermap
def simflat(flatfile, nspec=5000, nonuniform=False, exptime=10, testslit=False,
psfconvolve=True, specsim_config_file="desi"):
'''
Simulates a flat lamp calibration exposure
Args:
flatfile (str): filename with flat lamp spectrum data
nspec (int, optional): number of spectra to simulate
nonuniform (bool, optional): include calibration screen non-uniformity
exptime (float, optional): exposure time in seconds
psfconvolve (bool, optional): passed to simspec.simulator.Simulator camera_output.
if True, convolve with PSF and include per-camera outputs
specsim_config_file (str, optional): path to DESI instrument config file.
default is desi config in specsim package.
Returns: (sim, fibermap)
sim: specsim Simulator object
fibermap: fibermap Table
'''
import astropy.units as u
import specsim.simulator
from desiutil.log import get_logger
log = get_logger()
freeze_iers()
log.info('Reading flat lamp spectrum from {}'.format(flatfile))
sbflux, hdr = fits.getdata(flatfile, header=True)
wave = desispec.io.util.header2wave(hdr)
assert len(wave) == len(sbflux)
#- Trim to DESI wavelength ranges
#- TODO: is there an easier way to get these parameters?
try:
params = desimodel.io.load_desiparams()
wavemin = params['ccd']['b']['wavemin']
wavemax = params['ccd']['z']['wavemax']
except KeyError:
wavemin = desimodel.io.load_throughput('b').wavemin
wavemax = desimodel.io.load_throughput('z').wavemax
ii = (wavemin <= wave) & (wave <= wavemax)
wave = wave[ii]
sbflux = sbflux[ii]
#- Downsample to 0.2A grid to not blow up memory
ww = np.arange(wave[0], wave[-1]+0.1, 0.2)
sbflux = desispec.interpolation.resample_flux(ww, wave, sbflux)
wave = ww
if testslit:
fibermap = astropy.table.Table(testslit_fibermap()[0:nspec])
else:
fibermap = astropy.table.Table(desispec.io.empty_fibermap(nspec))
fibermap.meta['FLAVOR'] = 'flat'
fibermap['OBJTYPE'] = 'FLT'
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
r = np.sqrt(x**2 + y**2)
xy = np.vstack([x, y]).T * u.mm
#- Convert to unit-ful 2D
sbunit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 * u.arcsec ** 2)
sbflux = np.tile(sbflux, nspec).reshape(nspec, len(wave)) * sbunit
if nonuniform:
ratio = _calib_screen_uniformity(radius=r)
assert np.all(ratio <= 1) and np.all(ratio > 0.99)
sbflux = (sbflux.T * ratio).T
tmp = np.min(sbflux) / np.max(sbflux)
log.info('Adjusting for calibration screen non-uniformity {:.4f}'.format(tmp))
log.debug('Creating specsim configuration')
config = _specsim_config_for_wave(wave,specsim_config_file=specsim_config_file)
log.debug('Creating specsim simulator for {} spectra'.format(nspec))
# sim = specsim.simulator.Simulator(config, num_fibers=nspec)
sim = desisim.specsim.get_simulator(config, num_fibers=nspec,
camera_output=psfconvolve)
sim.observation.exposure_time = exptime * u.s
log.debug('Simulating')
sim.simulate(calibration_surface_brightness=sbflux, focal_positions=xy)
return sim, fibermap
def _calib_screen_uniformity(theta=None, radius=None):
'''
Returns calibration screen relative non-uniformity as a function
of theta (degrees) or focal plane radius (mm)
'''
if theta is not None:
assert radius is None
#- Julien Guy fit to DESI-2761v1 figure 5
#- ratio lamp/sky = 1 - 9.4e-04*theta - 2.1e-03 * theta**2
return 1 - 9.4e-04*theta - 2.1e-03 * theta**2
elif radius is not None:
import desimodel.io
ps = desimodel.io.load_platescale()
theta = np.interp(radius, ps['radius'], ps['theta'])
return _calib_screen_uniformity(theta=theta)
else:
raise ValueError('must provide theta or radius')
def simscience(targets, fiberassign, obsconditions='DARK', expid=None,
nspec=None, psfconvolve=True):
'''
Simulates a new DESI exposure from surveysim+fiberassign+mock spectra
Args:
targets (tuple): tuple of (flux[nspec,nwave], wave[nwave], meta[nspec])
fiberassign (Table): fiber assignments table
obsconditions (object, optional): observation metadata as
str: DARK (default) or GRAY or BRIGHT
dict or row of Table with keys::
SEEING (arcsec), EXPTIME (sec), AIRMASS,
MOONFRAC (0-1), MOONALT (deg), MOONSEP (deg)
Table including EXPID for subselection of which row to use
filename with obsconditions Table; expid must also be set
expid (int, optional): exposure ID
nspec (int, optional): number of spectra to simulate
psfconvolve (bool, optional): passed to simspec.simulator.Simulator camera_output.
if True, convolve with PSF and include per-camera outputs
Returns: (sim, fibermap, meta)
sim: specsim.simulate.Simulator object
fibermap: Table
meta: target metadata truth table
See obs.new_exposure() for function to generate new random exposure,
independent from surveysim, fiberassignment, and pre-generated mocks.
'''
from desiutil.log import get_logger
log = get_logger()
freeze_iers()
flux, wave, meta = targets
if nspec is not None:
fiberassign = fiberassign[0:nspec]
flux = flux[0:nspec]
meta = meta[0:nspec]
assert np.all(fiberassign['TARGETID'] == meta['TARGETID'])
fibermap = fibermeta2fibermap(fiberassign, meta)
#- Parse multiple options for obsconditions
if isinstance(obsconditions, str):
#- DARK GRAY BRIGHT
if obsconditions.upper() in reference_conditions:
log.info('Using reference {} obsconditions'.format(obsconditions.upper()))
obsconditions = reference_conditions[obsconditions.upper()]
#- filename
elif os.path.exists(obsconditions):
log.info('Loading obsconditions from {}'.format(obsconditions.upper()))
if obsconditions.endswith('.ecsv'):
allobs = astropy.table.Table.read(obsconditions, format='ascii.ecsv')
else:
allobs = astropy.table.Table.read(obsconditions)
#- trim down to just this exposure
if (expid is not None) and 'EXPID' in allobs.colnames:
obsconditions = allobs[allobs['EXPID'] == expid]
else:
raise ValueError('unable to select which exposure from obsconditions file')
else:
raise ValueError('bad obsconditions {}'.format(obsconditions))
elif isinstance(obsconditions, (astropy.table.Table, np.ndarray)):
#- trim down to just this exposure
if (expid is not None) and ('EXPID' in obsconditions):
obsconditions = allobs[allobs['EXPID'] == expid]
else:
raise ValueError('must provide expid when providing obsconditions as a Table')
#- Validate obsconditions keys
try:
obskeys = set(obsconditions.dtype.names)
except AttributeError:
obskeys = set(obsconditions.keys())
missing_keys = set(reference_conditions['DARK'].keys()) - obskeys
if len(missing_keys) > 0:
raise ValueError('obsconditions missing keys {}'.format(missing_keys))
sim = simulate_spectra(wave, flux, fibermap=fibermap,
obsconditions=obsconditions, psfconvolve=psfconvolve)
return sim, fibermap
def fibermeta2fibermap(fiberassign, meta):
'''
Convert a fiberassign + targeting metadata table into a fibermap Table
A future refactor will standardize the column names of fiber assignment,
target catalogs, and fibermaps, but in the meantime this is needed.
'''
#- Handle DESI_TARGET vs. SV1_DESI_TARGET etc.
target_colnames, target_masks, survey = main_cmx_or_sv(fiberassign)
targetcol = target_colnames[0] #- DESI_TARGET or SV1_DESI_TARGET
desi_mask = target_masks[0] #- desi_mask or sv1_desi_mask
#- Copy column names in common
fibermap = desispec.io.empty_fibermap(len(fiberassign))
for c in ['FIBER', 'TARGETID', 'BRICKNAME']:
fibermap[c] = fiberassign[c]
for c in target_colnames:
fibermap[c] = fiberassign[c]
for band in ['G', 'R', 'Z', 'W1', 'W2']:
key = 'FLUX_'+band
fibermap[key] = meta[key]
#- TODO: FLUX_IVAR_*
#- set OBJTYPE
#- TODO: what about MWS science targets that are also standard stars?
#- Loop over STD options for backwards/forwards compatibility
stdmask = 0
for name in ['STD', 'STD_FSTAR', 'STD_WD'
'STD_FAINT', 'STD_FAINT_BEST',
'STD_BRIGHT', 'STD_BRIGHT_BEST']:
if name in desi_mask.names():
stdmask |= desi_mask[name]
isSTD = (fiberassign[targetcol] & stdmask) != 0
isSKY = (fiberassign[targetcol] & desi_mask.SKY) != 0
isSCI = (~isSTD & ~isSKY)
fibermap['OBJTYPE'][isSKY] = 'SKY'
fibermap['OBJTYPE'][isSCI | isSTD] = 'TGT'
fibermap['LAMBDAREF'] = 5400.0
fibermap['TARGET_RA'] = fiberassign['TARGET_RA']
fibermap['TARGET_DEC'] = fiberassign['TARGET_DEC']
fibermap['FIBER_RA'] = fiberassign['TARGET_RA']
fibermap['FIBER_DEC'] = fiberassign['TARGET_DEC']
fibermap['FIBERASSIGN_X'] = fiberassign['FIBERASSIGN_X']
fibermap['FIBERASSIGN_Y'] = fiberassign['FIBERASSIGN_Y']
fibermap['DELTA_X'] = 0.0
fibermap['DELTA_Y'] = 0.0
#- TODO: POSITIONER -> LOCATION
#- TODO: TARGETCAT (how should we propagate this info into here?)
#- TODO: NaNs in fibermap for unassigned positioners targets
return fibermap
#-------------------------------------------------------------------------
#- specsim related routines
def simulate_spectra(wave, flux, fibermap=None, obsconditions=None, redshift=None,
dwave_out=None, seed=None, psfconvolve=True,
specsim_config_file = "desi"):
'''
Simulates an exposure without reading/writing data files
Args:
wave (array): 1D wavelengths in Angstroms
flux (array): 2D[nspec,nwave] flux in 1e-17 erg/s/cm2/Angstrom
or astropy Quantity with flux units
fibermap (Table, optional): table from fiberassign or fibermap; uses
X/YFOCAL_DESIGN, TARGETID, DESI_TARGET
obsconditions(dict-like, optional): observation metadata including
SEEING (arcsec), EXPTIME (sec), AIRMASS,
MOONFRAC (0-1), MOONALT (deg), MOONSEP (deg)
redshift (array-like, optional): list/array with each index being the redshifts for that target
seed (int, optional): random seed
psfconvolve (bool, optional): passed to simspec.simulator.Simulator camera_output.
if True, convolve with PSF and include per-camera outputs
specsim_config_file (str, optional): path to DESI instrument config file.
default is desi config in specsim package.
Returns:
A specsim.simulator.Simulator object
TODO: galsim support
'''
import specsim.simulator
import specsim.config
import astropy.units as u
from astropy.coordinates import SkyCoord
from desiutil.log import get_logger
log = get_logger('DEBUG')
freeze_iers()
# Input cosmology to calculate the angular diameter distance of the galaxy's redshift
from astropy.cosmology import FlatLambdaCDM
LCDM = FlatLambdaCDM(H0=70, Om0=0.3)
ang_diam_dist = LCDM.angular_diameter_distance
random_state = np.random.RandomState(seed)
nspec, nwave = flux.shape
#- Convert to unit-ful quantities for specsim
if not isinstance(flux, u.Quantity):
fluxunits = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm**2)
flux = flux * fluxunits
if not isinstance(wave, u.Quantity):
wave = wave * u.Angstrom
log.debug('loading specsim desi config {}'.format(specsim_config_file))
config = _specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=dwave_out, specsim_config_file=specsim_config_file)
#- Create simulator
log.debug('creating specsim desi simulator')
# desi = specsim.simulator.Simulator(config, num_fibers=nspec)
desi = desisim.specsim.get_simulator(config, num_fibers=nspec,
camera_output=psfconvolve)
if obsconditions is None:
log.warning('Assuming DARK conditions')
obsconditions = reference_conditions['DARK']
elif isinstance(obsconditions, str):
obsconditions = reference_conditions[obsconditions.upper()]
desi.atmosphere.seeing_fwhm_ref = obsconditions['SEEING'] * u.arcsec
desi.observation.exposure_time = obsconditions['EXPTIME'] * u.s
desi.atmosphere.airmass = obsconditions['AIRMASS']
desi.atmosphere.moon.moon_phase = np.arccos(2*obsconditions['MOONFRAC']-1)/np.pi
desi.atmosphere.moon.moon_zenith = (90 - obsconditions['MOONALT']) * u.deg
desi.atmosphere.moon.separation_angle = obsconditions['MOONSEP'] * u.deg
try:
desi.observation.exposure_start = astropy.time.Time(obsconditions['MJD'], format='mjd')
log.info('exposure_start {}'.format(desi.observation.exposure_start.utc.isot))
except KeyError:
log.info('MJD not in obsconditions, using DATE-OBS {}'.format(desi.observation.exposure_start.utc.isot))
for obskey in reference_conditions['DARK'].keys():
obsval = obsconditions[obskey]
log.debug('obsconditions {} = {}'.format(obskey, obsval))
#- Set fiber locations from meta Table or default fiberpos
fiberpos = desimodel.io.load_fiberpos()
if fibermap is not None and len(fiberpos) != len(fibermap):
ii = np.in1d(fiberpos['FIBER'], fibermap['FIBER'])
fiberpos = fiberpos[ii]
if fibermap is None:
fibermap = astropy.table.Table()
fibermap['X'] = fiberpos['X'][0:nspec]
fibermap['Y'] = fiberpos['Y'][0:nspec]
fibermap['FIBER'] = fiberpos['FIBER'][0:nspec]
fibermap['LOCATION'] = fiberpos['LOCATION'][0:nspec]
#- Extract fiber locations from meta Table -> xy[nspec,2]
assert np.all(fibermap['FIBER'] == fiberpos['FIBER'][0:nspec])
if 'XFOCAL_DESIGN' in fibermap.dtype.names:
xy = np.vstack([fibermap['XFOCAL_DESIGN'], fibermap['YFOCAL_DESIGN']]).T * u.mm
elif 'X' in fibermap.dtype.names:
xy = np.vstack([fibermap['X'], fibermap['Y']]).T * u.mm
else:
xy = np.vstack([fiberpos['X'], fiberpos['Y']]).T * u.mm
if 'TARGETID' in fibermap.dtype.names:
unassigned = (fibermap['TARGETID'] == -1)
if np.any(unassigned):
#- see https://github.com/astropy/astropy/issues/5961
#- for the units -> array -> units trick
xy[unassigned,0] = np.asarray(fiberpos['X'][unassigned], dtype=xy.dtype) * u.mm
xy[unassigned,1] = np.asarray(fiberpos['Y'][unassigned], dtype=xy.dtype) * u.mm
#- Determine source types
#- TODO: source shapes + galsim instead of fixed types + fiberloss table
source_types = get_source_types(fibermap)
# source types are sky elg lrg qso bgs star , they
# are only used in specsim.fiberloss for the desi.instrument.fiberloss_method="table" method
if specsim_config_file == "desi":
desi.instrument.fiberloss_method = 'fastsim'
log.debug('running simulation with {} fiber loss method'.format(desi.instrument.fiberloss_method))
unique_source_types = set(source_types)
comment_line="source types:"
for u in set(source_types) :
comment_line+=" {} {}".format(np.sum(source_types==u),u)
log.debug(comment_line)
source_fraction=None
source_half_light_radius=None
source_minor_major_axis_ratio=None
source_position_angle=None
if desi.instrument.fiberloss_method == 'fastsim' or desi.instrument.fiberloss_method == 'galsim' :
# the following parameters are used only with fastsim and galsim methods
elgs=(source_types=="elg")
lrgs=(source_types=="lrg")
bgss=(source_types=="bgs")
if np.sum(lrgs)>0 or np.sum(elgs)>0:
log.warning("the half light radii are fixed here for LRGs and ELGs (and not magnitude or redshift dependent)")
if np.sum(bgss)>0 and redshift is None:
log.warning("the half light radii are fixed here for BGS (as redshifts weren't supplied)")
# BGS parameters based on SDSS main sample, in g-band
# see analysis from J. Moustakas in
# https://github.com/desihub/desitarget/blob/master/doc/nb/bgs-morphology-properties.ipynb
# B/T (bulge-to-total ratio): 0.48 (0.36 - 0.59).
# Bulge Sersic n: 2.27 (1.12 - 3.60).
# log10 (Bulge Half-light radius): 0.11 (-0.077 - 0.307) arcsec
# log10 (Disk Half-light radius): 0.67 (0.54 - 0.82) arcsec
# This gives
# bulge_fraction = 0.48
# disk_fraction = 0.52
# bulge_half_light_radius = 1.3 arcsec
# disk_half_light_radius = 4.7 arcsec
# note we use De Vaucouleurs' law , which correspond to a Sersic index n=4
# source_fraction[:,0] is DISK profile (exponential) fraction
# source_fraction[:,1] is BULGE profile (devaucouleurs) fraction
# 1 - np.sum(source_fraction,axis=1) is POINT source profile fraction
# see specsim.GalsimFiberlossCalculator.create_source routine
source_fraction=np.zeros((nspec,2))
source_fraction[elgs,0]=1. # ELG are disk only
source_fraction[lrgs,1]=1. # LRG are bulge only
source_fraction[bgss,0]=0.52 # disk comp in BGS
source_fraction[bgss,1]=0.48 # bulge comp in BGS
# source_half_light_radius[:,0] is the half light radius in arcsec for the DISK profile
# source_half_light_radius[:,1] is the half light radius in arcsec for the BULGE profile
# see specsim.GalsimFiberlossCalculator.create_source routine
source_half_light_radius=np.zeros((nspec,2))
source_half_light_radius[elgs,0]=0.45 # ELG are disk only, arcsec
source_half_light_radius[lrgs,1]=1. # LRG are bulge only, arcsec
# 4.7 is angular size of z=0.1 disk, and 1.3 is angular size of z=0.1 bulge
bgs_disk_z01 = 4.7 # in arcsec
bgs_bulge_z01 = 1.3 # in arcsec
# Convert to angular size of the objects in this sample with given redshifts
if redshift is None:
angscales = np.ones(np.sum(bgss))
else:
bgs_redshifts = redshift[bgss]
# Avoid infinities
if np.any(bgs_redshifts <= 0.):
bgs_redshifts[bgs_redshifts <= 0.] = 0.0001
angscales = ( ang_diam_dist(0.1) / ang_diam_dist(bgs_redshifts) ).value
source_half_light_radius[bgss,0]= bgs_disk_z01 * angscales # disk comp in BGS, arcsec
source_half_light_radius[bgss,1]= bgs_bulge_z01 * angscales # bulge comp in BGS, arcsec
if desi.instrument.fiberloss_method == 'galsim' :
# the following parameters are used only with galsim method
# source_minor_major_axis_ratio[:,0] is the axis ratio for the DISK profile
# source_minor_major_axis_ratio[:,1] is the axis ratio for the BULGE profile
# see specsim.GalsimFiberlossCalculator.create_source routine
source_minor_major_axis_ratio=np.zeros((nspec,2))
source_minor_major_axis_ratio[elgs,0]=0.7
source_minor_major_axis_ratio[lrgs,1]=0.7
source_minor_major_axis_ratio[bgss,1]=0.7
# the source position angle is in degrees
# see specsim.GalsimFiberlossCalculator.create_source routine
source_position_angle = np.zeros((nspec,2))
random_angles = 360.*random_state.uniform(size=nspec)
source_position_angle[elgs,0]=random_angles[elgs]
source_position_angle[lrgs,1]=random_angles[lrgs]
source_position_angle[bgss,1]=random_angles[bgss]
#- Work around randomness in specsim quickfiberloss calculations
#- while not impacting global random state.
#- See https://github.com/desihub/specsim/issues/83
randstate = np.random.get_state()
np.random.seed(seed)
desi.simulate(source_fluxes=flux, focal_positions=xy, source_types=source_types,
source_fraction=source_fraction,
source_half_light_radius=source_half_light_radius,
source_minor_major_axis_ratio=source_minor_major_axis_ratio,
source_position_angle=source_position_angle)
np.random.set_state(randstate)
return desi
def _specsim_config_for_wave(wave, dwave_out=None, specsim_config_file = "desi"):
'''
Generate specsim config object for a given wavelength grid
Args:
wave: array of linearly spaced wavelengths in Angstroms
Options:
specsim_config_file: (str) path to DESI instrument config file.
default is desi config in specsim package.
Returns:
specsim Configuration object with wavelength parameters set to match
this input wavelength grid
'''
import specsim.config
dwave = round(np.mean(np.diff(wave)), 3)
assert np.allclose(dwave, np.diff(wave), rtol=1e-6, atol=1e-3)
config = specsim.config.load_config(specsim_config_file)
config.wavelength_grid.min = wave[0]
config.wavelength_grid.max = wave[-1] + dwave/2.0
config.wavelength_grid.step = dwave
if dwave_out is None:
dwave_out = 1.0
config.instrument.cameras.b.constants.output_pixel_size = "{:.3f} Angstrom".format(dwave_out)
config.instrument.cameras.r.constants.output_pixel_size = "{:.3f} Angstrom".format(dwave_out)
if specsim_config_file == "desi":
config.instrument.cameras.z.constants.output_pixel_size = "{:.3f} Angstrom".format(dwave_out)
config.update()
return config
def get_source_types(fibermap):
'''
Return a list of specsim source types based upon fibermap['DESI_TARGET']
Args:
fibermap: fibermap Table including DESI_TARGET column
Returns array of source_types 'sky', 'elg', 'lrg', 'qso', 'star'
Unassigned fibers fibermap['TARGETID'] == -1 will be treated as 'sky'
If fibermap.meta['FLAVOR'] = 'arc' or 'flat', returned source types will
match that flavor, though specsim doesn't use those as source_types
TODO: specsim/desimodel doesn't have a fiber input loss model for BGS yet,
so BGS targets get source_type = 'lrg' (!)
'''
from desiutil.log import get_logger
log = get_logger()
if ('DESI_TARGET' not in fibermap.dtype.names) and \
('SV1_DESI_TARGET' not in fibermap.dtype.names):
log.warning("(SV1_)DESI_TARGET not in fibermap table; using source_type='star' for everything")
return np.array(['star',] * len(fibermap))
target_colnames, target_masks, survey = main_cmx_or_sv(fibermap)
targetcol = target_colnames[0] #- DESI_TARGET or SV1_DESI_TARGET
tm = target_masks[0] #- desi_mask or sv1_desi_mask
source_type = np.zeros(len(fibermap), dtype='U4')
assert np.all(source_type == '')
if 'TARGETID' in fibermap.dtype.names:
unassigned = fibermap['TARGETID'] == -1
source_type[unassigned] = 'sky'
source_type[(fibermap['OBJTYPE'] == 'FLT')] = 'FLAT'
source_type[(fibermap['OBJTYPE'] == 'ARC')] = 'ARC'
source_type[(fibermap[targetcol] & tm.SKY) != 0] = 'sky'
source_type[(fibermap[targetcol] & tm.ELG) != 0] = 'elg'
source_type[(fibermap[targetcol] & tm.LRG) != 0] = 'lrg'
source_type[(fibermap[targetcol] & tm.QSO) != 0] = 'qso'
source_type[(fibermap[targetcol] & tm.BGS_ANY) != 0] = 'bgs'
starmask = 0
for name in ['STD', 'STD_FSTAR', 'STD_WD', 'MWS_ANY',
'STD_FAINT', 'STD_FAINT_BEST',
'STD_BRIGHT', 'STD_BRIGHT_BEST']:
if name in desitarget.targetmask.desi_mask.names():
starmask |= desitarget.targetmask.desi_mask[name]
source_type[(fibermap[targetcol] & starmask) != 0] = 'star'
#- Simulate unassigned fibers as sky
## TODO: when fiberassign and desitarget are updated, use
## desitarget.targetmask.desi_mask.NO_TARGET to identify these
source_type[fibermap['TARGETID'] < 0] = 'sky'
assert not np.any(source_type == '')
for name in sorted(np.unique(source_type)):
n = np.count_nonzero(source_type == name)
log.debug('{} {} targets'.format(name, n))
return source_type
#-------------------------------------------------------------------------
#- I/O related routines
def read_fiberassign(tilefile_or_id, indir=None):
'''
Returns fiberassignment table for tileid
Args:
tilefile_or_id (int or str): tileid (int) or full path to tile file (str)
Returns:
fiberassignment Table from HDU 1
'''
#- tileid is full path to file instead of int ID or just filename
if isinstance(tilefile_or_id, str) and os.path.exists(tilefile_or_id):
return astropy.table.Table.read(tilefile_or_id)
if indir is None:
indir = os.path.join(os.environ['DESI_TARGETS'], 'fiberassign')
if isinstance(tilefile_or_id, (int, np.int32, np.int64)):
tilefile = os.path.join(indir, 'tile_{:05d}.fits'.format(tilefile_or_id))
else:
tilefile = os.path.join(indir, tilefile_or_id)
return astropy.table.Table.read(tilefile, 'FIBER_ASSIGNMENTS')
#-------------------------------------------------------------------------
#- Move this to desispec.io?
def testslit_fibermap():
# from WBS 1.6 PDR Fiber Slit document
# science slit has 20 bundles of 25 fibers
# test slit has 1 fiber per bundle except in the middle where it is fully populated
nspectro=10
testslit_nspec_per_spectro=20
testslit_nspec = nspectro*testslit_nspec_per_spectro
# fibermap = np.zeros(testslit_nspec, dtype=desispec.io.fibermap.fibermap_columns)
fibermap = desispec.io.empty_fibermap(testslit_nspec)
fibermap['FIBER'] = np.zeros((testslit_nspec)).astype(int)
fibermap['SPECTROID'] = np.zeros((testslit_nspec)).astype(int)
for spectro in range(nspectro) :
fiber_index=testslit_nspec_per_spectro*spectro
first_fiber_id=500*spectro
for b in range(20) :
# Fibers at Top of top block or Bottom of bottom block
if b <= 10:
fibermap['FIBER'][fiber_index] = 25*b + first_fiber_id
else:
fibermap['FIBER'][fiber_index] = 25*b + 24 + first_fiber_id
fibermap['SPECTROID'][fiber_index] = spectro
fiber_index+=1
return fibermap
#-------------------------------------------------------------------------
#- MOVE THESE TO desitarget.mocks.io (?)
#-------------------------------------------------------------------------
def get_mock_spectra(fiberassign, mockdir=None, nside=64, obscon=None):
'''
Args:
fiberassign: table loaded from fiberassign tile file
Options:
mockdir (str): base directory under which files are found
nside (int): healpix nside for file directory grouping
obscon (str): (observing conditions) None/dark/bright extra dir level
Returns (flux, wave, meta) tuple
'''
nspec = len(fiberassign)
flux = None
meta = None
wave = None
objmeta = None
target_colnames, target_masks, survey = main_cmx_or_sv(fiberassign)
targetcol = target_colnames[0] #- DESI_TARGET or SV1_DESI_TARGET
desi_mask = target_masks[0] #- desi_mask or sv1_desi_mask
issky = (fiberassign[targetcol] & desi_mask.SKY) != 0
skyids = fiberassign['TARGETID'][issky]
#- check several ways in which an unassigned fiber might appear
unassigned = np.isnan(fiberassign['TARGET_RA'])
unassigned |= np.isnan(fiberassign['TARGET_DEC'])
unassigned |= (fiberassign['TARGETID'] < 0)
## TODO: check desi_mask.NO_TARGET once that bit exists
for truthfile, targetids in zip(*targets2truthfiles(
fiberassign[~unassigned], basedir=mockdir, nside=nside,
obscon=obscon)):
#- Sky fibers aren't in the truth files
ok = ~np.in1d(targetids, skyids)
tmpflux, tmpwave, tmpmeta, tmpobjmeta = read_mock_spectra(truthfile, targetids[ok])
if flux is None:
nwave = tmpflux.shape[1]
flux = np.zeros((nspec, nwave))
meta = np.zeros(nspec, dtype=tmpmeta.dtype)
meta['TARGETID'] = -1
wave = tmpwave.astype('f8')
objmeta = dict()
for key in tmpobjmeta.keys():
objmeta[key] = list()
ii = np.in1d(fiberassign['TARGETID'], tmpmeta['TARGETID'])
flux[ii] = tmpflux
meta[ii] = tmpmeta
assert np.all(wave == tmpwave)
for key in tmpobjmeta.keys():
if key not in objmeta:
objmeta[key] = list()
objmeta[key].append(tmpobjmeta[key])
#- Stack the per-objtype meta tables
for key in objmeta.keys():
objmeta[key] = astropy.table.Table(np.hstack(objmeta[key]))
#- Set meta['TARGETID'] for sky fibers
#- TODO: other things to set?
meta['TARGETID'][issky] = skyids
meta['TARGETID'][unassigned] = fiberassign['TARGETID'][unassigned]
assert np.all(fiberassign['TARGETID'] == meta['TARGETID'])
return flux, wave, astropy.table.Table(meta), objmeta
def read_mock_spectra(truthfile, targetids, mockdir=None):
r'''
Reads mock spectra from a truth file
Args:
truthfile (str): full path to a mocks truth-\*.fits file
targetids (array-like): targetids to load from that file
mockdir: ???
Returns (flux, wave, truth) tuples:
flux[nspec, nwave]: flux in 1e-17 erg/s/cm2/Angstrom
wave[nwave]: wavelengths in Angstroms
truth[nspec]: metadata truth table
objtruth: dictionary keyed by objtype type with type-specific truth
'''
if len(targetids) != len(np.unique(targetids)):
from desiutil.log import get_logger
log = get_logger()
log.error("Requested TARGETIDs for {} are not unique".format(
os.path.basename(truthfile)))
#- astropy.io.fits doesn't return a real ndarray, causing problems
#- with the reordering downstream so use fitsio instead
# with fits.open(truthfile, memmap=False) as fx:
# truth = fx['TRUTH'].data
# wave = fx['WAVE'].data
# flux = fx['FLUX'].data
objtruth = dict()
with fitsio.FITS(truthfile) as fx:
truth = fx['TRUTH'].read()
wave = fx['WAVE'].read()
flux = fx['FLUX'].read()
if 'OBJTYPE' in truth.dtype.names:
# output of desisim.obs.new_exposure
if isinstance(truth['OBJTYPE'][0], bytes):
objtype = [oo.decode('ascii').strip().upper() \
for oo in truth['OBJTYPE']]
else:
objtype = [oo.strip().upper() for oo in truth['OBJTYPE']]
else:
# output of desitarget.mock.build.write_targets_truth
if isinstance(truth['TEMPLATETYPE'][0], bytes):
objtype = [oo.decode('ascii').strip().upper() \
for oo in truth['TEMPLATETYPE']]
else:
objtype = [oo.strip().upper() for oo in truth['TEMPLATETYPE']]
for obj in set(objtype):
extname = 'TRUTH_{}'.format(obj)
if extname in fx:
objtruth[obj] = fx[extname].read()
missing = np.in1d(targetids, truth['TARGETID'], invert=True)
if np.any(missing):
missingids = targetids[missing]
raise ValueError('Targets missing from {}: {}'.format(truthfile, missingids))
#- Trim to just the spectra for these targetids
ii = np.in1d(truth['TARGETID'], targetids)
flux = flux[ii]
truth = truth[ii]
if bool(objtruth):
for obj in objtruth.keys():
ii = np.in1d(objtruth[obj]['TARGETID'], targetids)
objtruth[obj] = objtruth[obj][ii]
assert set(targetids) == set(truth['TARGETID'])
#- sort truth to match order of input targetids
# it doesn't matter if objtruth is sorted
if len(targetids) == len(truth['TARGETID']):
i = np.argsort(targetids)
j = np.argsort(truth['TARGETID'])
k = np.argsort(i)
reordered_truth = truth[j[k]]
reordered_flux = flux[j[k]]
else:
#- Slower, but works even with repeated TARGETIDs
ii = np.argsort(truth['TARGETID'])
sorted_truthids = truth['TARGETID'][ii]
reordered_flux = np.empty(shape=(len(targetids), flux.shape[1]), dtype=flux.dtype)
reordered_truth = np.empty(shape=(len(targetids),), dtype=truth.dtype)
for j, tx in enumerate(targetids):
k = np.searchsorted(sorted_truthids, tx)
reordered_flux[j] = flux[ii[k]]
reordered_truth[j] = truth[ii[k]]
assert np.all(reordered_truth['TARGETID'] == targetids)
wave = desispec.io.util.native_endian(wave).astype(np.float64)
reordered_flux = desispec.io.util.native_endian(reordered_flux).astype(np.float64)
return reordered_flux, wave, reordered_truth, objtruth
def targets2truthfiles(targets, basedir, nside=64, obscon=None):
'''
Return list of mock truth files that contain these targets
Args:
targets: table with TARGETID column, e.g. from fiber assignment
basedir: base directory under which files are found
Options:
nside (int): healpix nside for file directory grouping
obscon (str): (observing conditions) None/dark/bright extra dir level
Returns (truthfiles, targetids):
truthfiles: list of truth filenames
targetids: list of lists of targetids in each truthfile
i.e. targetids[i] is the list of targetids from targets['TARGETID'] that
are in truthfiles[i]
'''
import healpy
#import desitarget.mock.io as mockio
from desitarget.io import find_target_files
assert nside >= 2
#- TODO: what should be done with assignments without targets?
targets = targets[targets['TARGETID'] != -1]
theta = np.radians(90-targets['TARGET_DEC'])
phi = np.radians(targets['TARGET_RA'])
pixels = healpy.ang2pix(nside, theta, phi, nest=True)
truthfiles = list()
targetids = list()
for ipix in sorted(np.unique(pixels)):
filename = find_target_files(basedir, flavor='truth', obscon=obscon,
hp=ipix, nside=nside, mock=True)
truthfiles.append(filename)
ii = (pixels == ipix)
targetids.append(np.asarray(targets['TARGETID'][ii]))
return truthfiles, targetids
|
desihub/desisim
|
py/desisim/simexp.py
|
Python
|
bsd-3-clause
| 38,406
|
[
"Galaxy"
] |
4b409860f9c115021d249f74441ba05dbda68773cb8924b68ad3ecbf843a741c
|
""" Models for the shopping cart and assorted purchase types """
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
import pytz
import logging
import smtplib
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.db import transaction
from django.db.models import Sum
from django.core.urlresolvers import reverse
from model_utils.managers import InheritanceManager
from xmodule.modulestore.django import modulestore
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_string
from student.models import CourseEnrollment, UNENROLL_DONE
from util.query import use_read_replica_if_available
from xmodule_django.models import CourseKeyField
from verify_student.models import SoftwareSecurePhotoVerification
from .exceptions import (
InvalidCartItem, PurchasedCallbackException, ItemAlreadyInCartException,
AlreadyEnrolledInCourseException, CourseDoesNotExistException,
MultipleCouponsNotAllowedException, RegCodeAlreadyExistException,
ItemDoesNotExistAgainstRegCodeException
)
from microsite_configuration import microsite
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
# The user is selecting what he/she wants to purchase.
('cart', 'cart'),
# The user has been sent to the external payment processor.
# At this point, the order should NOT be modified.
# If the user returns to the payment flow, he/she will start a new order.
('paying', 'paying'),
# The user has successfully purchased the items in the order.
('purchased', 'purchased'),
# The user's order has been refunded.
('refunded', 'refunded'),
)
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=C0103
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
refunded_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def user_cart_has_items(cls, user, item_type=None):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
If a item_type is passed in, then we check to see if the cart has at least one of
those types of OrderItems
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
return cart.has_items(item_type)
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101
def has_items(self, item_type=None):
"""
Does the cart have any items in it?
If an item_type is passed in then we check to see if there are any items of that class type
"""
if not item_type:
return self.orderitem_set.exists() # pylint: disable=E1101
else:
items = self.orderitem_set.all().select_subclasses()
for item in items:
if isinstance(item, item_type):
return True
return False
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
@transaction.commit_on_success
def start_purchase(self):
"""
Start the purchase process. This will set the order status to "paying",
at which point it should no longer be modified.
Future calls to `Order.get_cart_for_user()` will filter out orders with
status "paying", effectively creating a new (empty) cart.
"""
if self.status == 'cart':
self.status = 'paying'
self.save()
for item in OrderItem.objects.filter(order=self).select_subclasses():
item.start_purchase()
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
for item in orderitems:
item.purchase_item()
# send confirmation e-mail
subject = _("Order Payment Confirmation")
message = render_to_string(
'emails/order_confirmation_email.txt',
{
'order': self,
'order_items': orderitems,
'has_billing_info': settings.FEATURES['STORE_BILLING_INFO']
}
)
try:
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
send_mail(subject, message,
from_address, [self.user.email]) # pylint: disable=E1101
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
class OrderItem(models.Model):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES, db_index=True)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
list_price = models.DecimalField(decimal_places=2, max_digits=30, null=True)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True, db_index=True)
refund_requested_time = models.DateTimeField(null=True, db_index=True)
service_fee = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
# general purpose field, not user-visible. Used for reporting
report_comments = models.TextField(default="")
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def start_purchase(self):
"""
Start the purchase process. This will set the order item status to "paying",
at which point it should no longer be modified.
"""
self.status = 'paying'
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
@property
def additional_instruction_text(self):
"""
Individual instructions for this order item.
Currently, only used for e-mails.
"""
return ''
class Invoice(models.Model):
"""
This table capture all the information needed to support "invoicing"
which is when a user wants to purchase Registration Codes,
but will not do so via a Credit Card transaction.
"""
company_name = models.CharField(max_length=255, db_index=True)
company_contact_name = models.CharField(max_length=255)
company_contact_email = models.CharField(max_length=255)
recipient_name = models.CharField(max_length=255)
recipient_email = models.CharField(max_length=255)
address_line_1 = models.CharField(max_length=255)
address_line_2 = models.CharField(max_length=255, null=True)
address_line_3 = models.CharField(max_length=255, null=True)
city = models.CharField(max_length=255, null=True)
state = models.CharField(max_length=255, null=True)
zip = models.CharField(max_length=15, null=True)
country = models.CharField(max_length=64, null=True)
course_id = CourseKeyField(max_length=255, db_index=True)
total_amount = models.FloatField()
internal_reference = models.CharField(max_length=255, null=True)
customer_reference_number = models.CharField(max_length=63, null=True)
is_valid = models.BooleanField(default=True)
class CourseRegistrationCode(models.Model):
"""
This table contains registration codes
With registration code, a user can register for a course for free
"""
code = models.CharField(max_length=32, db_index=True, unique=True)
course_id = CourseKeyField(max_length=255, db_index=True)
created_by = models.ForeignKey(User, related_name='created_by_user')
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
order = models.ForeignKey(Order, db_index=True, null=True, related_name="purchase_order")
invoice = models.ForeignKey(Invoice, null=True)
@classmethod
@transaction.commit_on_success
def free_user_enrollment(cls, cart):
"""
Here we enroll the user free for all courses available in shopping cart
"""
cart_items = cart.orderitem_set.all().select_subclasses()
if cart_items:
for item in cart_items:
CourseEnrollment.enroll(cart.user, item.course_id)
log.info("Enrolled '{0}' in free course '{1}'"
.format(cart.user.email, item.course_id)) # pylint: disable=E1101
item.status = 'purchased'
item.save()
cart.status = 'purchased'
cart.purchase_time = datetime.now(pytz.utc)
cart.save()
class RegistrationCodeRedemption(models.Model):
"""
This model contains the registration-code redemption info
"""
order = models.ForeignKey(Order, db_index=True, null=True)
registration_code = models.ForeignKey(CourseRegistrationCode, db_index=True)
redeemed_by = models.ForeignKey(User, db_index=True)
redeemed_at = models.DateTimeField(default=datetime.now(pytz.utc), null=True)
@classmethod
def add_reg_code_redemption(cls, course_reg_code, order):
"""
add course registration code info into RegistrationCodeRedemption model
"""
cart_items = order.orderitem_set.all().select_subclasses()
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == course_reg_code.course_id:
# If another account tries to use a existing registration code before the student checks out, an
# error message will appear.The reg code is un-reusable.
code_redemption = cls.objects.filter(registration_code=course_reg_code)
if code_redemption:
log.exception("Registration code '{0}' already used".format(course_reg_code.code))
raise RegCodeAlreadyExistException
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, order=order, redeemed_by=order.user)
code_redemption.save()
item.list_price = item.unit_cost
item.unit_cost = 0
item.save()
log.info("Code '{0}' is used by user {1} against order id '{2}' "
.format(course_reg_code.code, order.user.username, order.id))
return course_reg_code
log.warning("Course item does not exist against registration code '{0}'".format(course_reg_code.code))
raise ItemDoesNotExistAgainstRegCodeException
@classmethod
def create_invoice_generated_registration_redemption(cls, course_reg_code, user):
"""
This function creates a RegistrationCodeRedemption entry in case the registration codes were invoice generated
and thus the order_id is missing.
"""
code_redemption = RegistrationCodeRedemption(registration_code=course_reg_code, redeemed_by=user)
code_redemption.save()
class SoftDeleteCouponManager(models.Manager):
""" Use this manager to get objects that have a is_active=True """
def get_active_coupons_query_set(self):
"""
filter the is_active = True Coupons only
"""
return super(SoftDeleteCouponManager, self).get_query_set().filter(is_active=True)
def get_query_set(self):
"""
get all the coupon objects
"""
return super(SoftDeleteCouponManager, self).get_query_set()
class Coupon(models.Model):
"""
This table contains coupon codes
A user can get a discount offer on course if provide coupon code
"""
code = models.CharField(max_length=32, db_index=True)
description = models.CharField(max_length=255, null=True, blank=True)
course_id = CourseKeyField(max_length=255)
percentage_discount = models.IntegerField(default=0)
created_by = models.ForeignKey(User)
created_at = models.DateTimeField(default=datetime.now(pytz.utc))
is_active = models.BooleanField(default=True)
def __unicode__(self):
return "[Coupon] code: {} course: {}".format(self.code, self.course_id)
objects = SoftDeleteCouponManager()
class CouponRedemption(models.Model):
"""
This table contain coupon redemption info
"""
order = models.ForeignKey(Order, db_index=True)
user = models.ForeignKey(User, db_index=True)
coupon = models.ForeignKey(Coupon, db_index=True)
@classmethod
def get_discount_price(cls, percentage_discount, value):
"""
return discounted price against coupon
"""
discount = Decimal("{0:.2f}".format(Decimal(percentage_discount / 100.00) * value))
return value - discount
@classmethod
def add_coupon_redemption(cls, coupon, order, cart_items):
"""
add coupon info into coupon_redemption model
"""
is_redemption_applied = False
coupon_redemptions = cls.objects.filter(order=order, user=order.user)
for coupon_redemption in coupon_redemptions:
if coupon_redemption.coupon.code != coupon.code or coupon_redemption.coupon.id == coupon.id:
log.exception("Coupon redemption already exist for user '{0}' against order id '{1}'"
.format(order.user.username, order.id))
raise MultipleCouponsNotAllowedException
for item in cart_items:
if getattr(item, 'course_id'):
if item.course_id == coupon.course_id:
coupon_redemption = cls(order=order, user=order.user, coupon=coupon)
coupon_redemption.save()
discount_price = cls.get_discount_price(coupon.percentage_discount, item.unit_cost)
item.list_price = item.unit_cost
item.unit_cost = discount_price
item.save()
log.info("Discount generated for user {0} against order id '{1}' "
.format(order.user.username, order.id))
is_redemption_applied = True
return is_redemption_applied
return is_redemption_applied
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = CourseKeyField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [
item.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")
if isinstance(item, cls)
]
@classmethod
def get_total_amount_of_purchased_item(cls, course_key):
"""
This will return the total amount of money that a purchased course generated
"""
total_cost = 0
result = cls.objects.filter(course_id=course_key, status='purchased').aggregate(total=Sum('unit_cost', field='qty * unit_cost')) # pylint: disable=E1101
if result['total'] is not None:
total_cost = result['total']
return total_cost
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks
course = modulestore().get_course(course_id) # actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't
if not course:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_key=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.line_desc = _(u'Registration for Course: {course_name}').format(
course_name=course.display_name_with_default)
item.currency = currency
order.currency = currency
item.report_comments = item.csv_report_comments
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
if not modulestore().has_course(self.course_id):
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
CourseEnrollment.enroll(user=self.user, course_key=self.course_id, mode=self.mode)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = (_('Please visit your <a href="{dashboard_link}">dashboard</a> to see your new course.')
.format(dashboard_link=reverse('dashboard')))
return self.pk_with_subclass, set([notification])
@property
def csv_report_comments(self):
"""
Tries to fetch an annotation associated with the course_id from the database. If not found, returns u"".
Otherwise returns the annotation
"""
try:
return PaidCourseRegistrationAnnotation.objects.get(course_id=self.course_id).annotation
except PaidCourseRegistrationAnnotation.DoesNotExist:
return u""
class PaidCourseRegistrationAnnotation(models.Model):
"""
A model that maps course_id to an additional annotation. This is specifically needed because when Stanford
generates report for the paid courses, each report item must contain the payment account associated with a course.
And unfortunately we didn't have the concept of a "SKU" or stock item where we could keep this association,
so this is to retrofit it.
"""
course_id = CourseKeyField(unique=True, max_length=128, db_index=True)
annotation = models.TextField(null=True)
def __unicode__(self):
# pylint: disable=no-member
return u"{} : {}".format(self.course_id.to_deprecated_string(), self.annotation)
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = CourseKeyField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(UNENROLL_DONE)
def refund_cert_callback(sender, course_enrollment=None, **kwargs):
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if not course_enrollment.refundable():
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error("Matching CertificateItem not found while trying to refund. User %s, Course %s", course_enrollment.user, course_enrollment.course_id)
return
target_cert.status = 'refunded'
target_cert.refund_requested_time = datetime.now(pytz.utc)
target_cert.save()
target_cert.order.status = 'refunded'
target_cert.order.save()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except Exception as exception: # pylint: disable=broad-except
err_str = ('Failed sending email to billing to request a refund for verified certificate'
' (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})\n{exception}')
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number,
exception=exception,
))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
raise InvalidCartItem(_("Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id))
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode,
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
course_name = modulestore().get_course(course_id).display_name
# Translators: In this particular case, mode_name refers to a
# particular mode (i.e. Honor Code Certificate, Verified Certificate, etc)
# by which a user could enroll in the given course.
item.line_desc = _("{mode_name} for course {course}").format(
mode_name=mode_info.name,
course=course_name
)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
try:
verification_attempt = SoftwareSecurePhotoVerification.active_for_user(self.course_enrollment.user)
verification_attempt.submit()
except Exception:
log.exception(
"Could not submit verification attempt for enrollment {}".format(self.course_enrollment)
)
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
@property
def single_item_receipt_template(self):
if self.mode in ('verified', 'professional'):
return 'shoppingcart/verified_cert_receipt.html'
else:
return super(CertificateItem, self).single_item_receipt_template
@property
def single_item_receipt_context(self):
course = modulestore().get_course(self.course_id)
return {
"course_id": self.course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"course_start_date_text": course.start_date_text,
"course_has_started": course.start > datetime.today().replace(tzinfo=pytz.utc),
"course_root_url": reverse(
'course_root',
kwargs={'course_id': self.course_id.to_deprecated_string()} # pylint: disable=no-member
),
"dashboard_url": reverse('dashboard'),
}
@property
def additional_instruction_text(self):
return _("Note - you have up to 2 weeks into the course to unenroll from the Verified Certificate option "
"and receive a full refund. To receive your refund, contact {billing_email}. "
"Please include your order number in your e-mail. "
"Please do NOT include your credit card information.").format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL)
@classmethod
def verified_certificates_count(cls, course_id, status):
"""Return a queryset of CertificateItem for every verified enrollment in course_id with the given status."""
return use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status).count())
# TODO combine these three methods into one
@classmethod
def verified_certificates_monetary_field_sum(cls, course_id, status, field_to_aggregate):
"""
Returns a Decimal indicating the total sum of field_to_aggregate for all verified certificates with a particular status.
Sample usages:
- status 'refunded' and field_to_aggregate 'unit_cost' will give the total amount of money refunded for course_id
- status 'purchased' and field_to_aggregate 'service_fees' gives the sum of all service fees for purchased certificates
etc
"""
query = use_read_replica_if_available(
CertificateItem.objects.filter(course_id=course_id, mode='verified', status=status)).aggregate(Sum(field_to_aggregate))[field_to_aggregate + '__sum']
if query is None:
return Decimal(0.00)
else:
return query
@classmethod
def verified_certificates_contributing_more_than_minimum(cls, course_id):
return use_read_replica_if_available(
CertificateItem.objects.filter(
course_id=course_id,
mode='verified',
status='purchased',
unit_cost__gt=(CourseMode.min_course_price_for_verified_for_currency(course_id, 'usd')))).count()
class DonationConfiguration(ConfigurationModel):
"""Configure whether donations are enabled on the site."""
pass
class Donation(OrderItem):
"""A donation made by a user.
Donations can be made for a specific course or to the organization as a whole.
Users can choose the donation amount.
"""
# Types of donations
DONATION_TYPES = (
("general", "A general donation"),
("course", "A donation to a particular course")
)
# The type of donation
donation_type = models.CharField(max_length=32, default="general", choices=DONATION_TYPES)
# If a donation is made for a specific course, then store the course ID here.
# If the donation is made to the organization as a whole,
# set this field to CourseKeyField.Empty
course_id = CourseKeyField(max_length=255, db_index=True)
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, donation_amount, course_id=None, currency='usd'):
"""Add a donation to an order.
Args:
order (Order): The order to add this donation to.
donation_amount (Decimal): The amount the user is donating.
Keyword Args:
course_id (CourseKey): If provided, associate this donation with a particular course.
currency (str): The currency used for the the donation.
Raises:
InvalidCartItem: The provided course ID is not valid.
Returns:
Donation
"""
# This will validate the currency but won't actually add the item to the order.
super(Donation, cls).add_to_order(order, currency=currency)
# Create a line item description, including the name of the course
# if this is a per-course donation.
# This will raise an exception if the course can't be found.
description = cls._line_item_description(course_id=course_id)
params = {
"order": order,
"user": order.user,
"status": order.status,
"qty": 1,
"unit_cost": donation_amount,
"currency": currency,
"line_desc": description
}
if course_id is not None:
params["course_id"] = course_id
params["donation_type"] = "course"
else:
params["donation_type"] = "general"
return cls.objects.create(**params)
def purchased_callback(self):
"""Donations do not need to be fulfilled, so this method does nothing."""
pass
def generate_receipt_instructions(self):
"""Provide information about tax-deductible donations in the receipt.
Returns:
tuple of (Donation, unicode)
"""
return self.pk_with_subclass, set([self._tax_deduction_msg()])
@property
def additional_instruction_text(self):
"""Provide information about tax-deductible donations in the confirmation email.
Returns:
unicode
"""
return self._tax_deduction_msg()
def _tax_deduction_msg(self):
"""Return the translated version of the tax deduction message.
Returns:
unicode
"""
return _(
u"We greatly appreciate this generous contribution and your support of the {platform_name} mission. "
u"This receipt was prepared to support charitable contributions for tax purposes. "
u"We confirm that neither goods nor services were provided in exchange for this gift."
).format(platform_name=settings.PLATFORM_NAME)
@classmethod
def _line_item_description(self, course_id=None):
"""Create a line-item description for the donation.
Includes the course display name if provided.
Keyword Arguments:
course_id (CourseKey)
Raises:
CourseDoesNotExistException: The course ID is not valid.
Returns:
unicode
"""
# If a course ID is provided, include the display name of the course
# in the line item description.
if course_id is not None:
course = modulestore().get_course(course_id)
if course is None:
err = _(
u"Could not find a course with the ID '{course_id}'"
).format(course_id=course_id)
raise CourseDoesNotExistException(err)
return _(u"Donation for {course}").format(course=course.display_name)
# The donation is for the organization as a whole, not a specific course
else:
return _(u"Donation for {platform_name}").format(platform_name=settings.PLATFORM_NAME)
|
adlnet-archive/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 42,752
|
[
"VisIt"
] |
2f2a6866c8129f1b381404b34d2c287591282896db7c70ccf27e49ea645b9093
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
from zeroinstall.support import tasks
from zeroinstall.injector import handler, download
gobject = tasks.get_loop().gobject
glib = tasks.get_loop().glib
version = '2.5-post'
class GUIHandler(handler.Handler):
pulse = None
mainwindow = None
def _reset_counters(self):
if not self.monitored_downloads:
self.n_completed_downloads = 0
self.total_bytes_downloaded = 0
return False
def abort_all_downloads(self):
for dl in self.monitored_downloads:
dl.abort()
def downloads_changed(self):
if self.monitored_downloads and self.pulse is None:
def pulse():
self.mainwindow.update_download_status(only_update_visible = True)
return True
pulse()
self.pulse = glib.timeout_add(200, pulse)
elif len(self.monitored_downloads) == 0:
# Delay before resetting, in case we start a new download quickly
glib.timeout_add(500, self._reset_counters)
# Stop animation
if self.pulse:
glib.source_remove(self.pulse)
self.pulse = None
self.mainwindow.update_download_status()
def impl_added_to_store(self, impl):
self.mainwindow.update_download_status(only_update_visible = True)
@tasks.async
def _switch_to_main_window(self, reason):
if self.mainwindow.systray_icon:
self.mainwindow.systray_icon.set_tooltip(reason)
self.mainwindow.systray_icon.set_blinking(True)
# Wait for the user to click the icon, then continue
yield self.mainwindow.systray_icon_blocker
yield tasks.TimeoutBlocker(0.5, 'Delay')
@tasks.async
def confirm_import_feed(self, pending, valid_sigs, retval):
yield self._switch_to_main_window(_('Need to confirm a new GPG key'))
from zeroinstall.gtkui import trust_box
box = trust_box.TrustBox(pending, valid_sigs, retval, parent = self.mainwindow.window)
box.show()
yield box.closed
@tasks.async
def confirm_install(self, message):
yield self._switch_to_main_window(_('Need to confirm installation of distribution packages'))
from zeroinstall.injector.download import DownloadAborted
from zeroinstall.gui import dialog
import gtk
box = gtk.MessageDialog(self.mainwindow.window,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
str(message))
box.set_position(gtk.WIN_POS_CENTER)
install = dialog.MixedButton(_('Install'), gtk.STOCK_OK)
if gtk.pygtk_version >= (2,22,0):
install.set_can_default(True)
else:
install.set_flags(gtk.CAN_DEFAULT)
box.add_action_widget(install, gtk.RESPONSE_OK)
install.show_all()
box.set_default_response(gtk.RESPONSE_OK)
box.show()
response = dialog.DialogResponse(box)
yield response
box.destroy()
if response.response != gtk.RESPONSE_OK:
raise DownloadAborted()
def report_error(self, ex, tb = None):
if isinstance(ex, download.DownloadAborted):
return # No need to tell the user about this, since they caused it
self.mainwindow.report_exception(ex, tb = tb)
|
afb/0install
|
zeroinstall/gui/gui.py
|
Python
|
lgpl-2.1
| 3,020
|
[
"VisIt"
] |
668a475b78054b4eb18377dfc6b6dea57004784aa6c23e664a99f14388f00c62
|
# Copyright (C) 2004 CCLRC & NERC( Natural Environment Research Council ).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later. http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
na_to_cdms.py
=============
Container module for class NADictToCdmsObjects that is sub-classed
by NAToNC classes.
"""
# Imports from python standard library
import sys
import re
import time
import logging
# Import from nappy package
from nappy.na_error import na_error
import nappy.utils
import nappy.utils.common_utils
config_dict = nappy.utils.getConfigDict()
na_to_nc_map = config_dict["na_to_nc_map"]
header_partitions = config_dict["header_partitions"]
hp = header_partitions
# Import external packages (if available)
if sys.platform.find("win") > -1:
raise na_error.NAPlatformError("Windows does not support CDMS. CDMS is required to convert to CDMS objects and NetCDF.")
try:
import cdms2 as cdms
import numpy as N
except:
try:
import cdms
import Numeric as N
except:
raise Exception("Could not import third-party software. Nappy requires the CDMS and Numeric packages to be installed to convert to CDMS and NetCDF.")
# Define global variables
safe_nc_id = re.compile("[\/\s\[\(\)\]\=\+\-\?\#\~\@\&\$\%\!\*\{\}\^]+")
time_units_pattn = re.compile("\w+\s+since\s+\d{4}-\d{1,2}-\d{1,2}\s+\d+:\d+:\d+")
max_id_length = 40
special_comment_known_strings = (hp["sc_start"], hp["sc_end"], hp["addl_vatts"],
hp["addl_globals"], "\n")
normal_comment_known_strings = (hp["nc_start"], hp["nc_end"], hp["data_next"],
hp["addl_vatts"], hp["addl_globals"], "\n")
time_units_warning_message = """\nWARNING: Could not recognise time units. For true NetCDF compability
please insert the correct time unit string below in the format:
<units> since <YYYY>-<MM>-<DD> <hh>-<mm>-<ss>
Where:
<units> is a known time interval such as years, months, days, etc.
<YYYY> is the year, <MM> is the month, <DD> is the day,
<hh> is the hour, <mm> is minutes, <ss> is seconds.
"""
DEBUG = nappy.utils.getDebug()
logging.basicConfig()
log = logging.getLogger(__name__)
class NADictToCdmsObjects:
"""
Converts a NA File instance to a tuple of CDMS objects.
"""
def __init__(self, na_file_obj, variables="all", aux_variables="all",
global_attributes=[("Conventions", "CF-1.0")],
time_units=None, time_warning=True,
rename_variables={}):
"""
Sets up instance variables.
"""
self.na_file_obj = na_file_obj
self.variables = variables
self.aux_variables = aux_variables
self.global_attributes = global_attributes
self.time_units = time_units
self.time_warning = time_warning
self.rename_variables = rename_variables
# Check if we have capability to convert this FFI
if self.na_file_obj.FFI in (2110, 2160, 2310):
raise Exception("Cannot convert NASA Ames File Format Index (FFI) " + `self.na_file_obj.FFI` + " to CDMS objects. No mapping implemented yet.")
self.output_message = [] # for output displaying message
self.converted = False
def convert(self):
"""
Reads the NASA Ames file object and converts to CDMS objects.
Returns (variable_list, aux_variable_list, global_attributes_list).
All these can be readily written to a CDMS File object.
"""
if self.converted == True:
log.info("Already converted to CDMS objects so not re-doing.")
return (self.cdms_variables, self.cdms_aux_variables, self.global_attributes)
self.na_file_obj.readData()
# Convert global attribute
self._mapNACommentsToGlobalAttributes()
# Convert axes
if not hasattr(self, 'cdms_axes'): self._convertCdmsAxes()
# Convert main variables
if not hasattr(self, 'cdms_variables'): self._convertCdmsVariables()
# Then do auxiliary variables
if hasattr(self.na_file_obj, "NAUXV") and (type(self.na_file_obj.NAUXV) == type(1) and self.na_file_obj.NAUXV > 0): # Are there any auxiliary variables?
if not hasattr(self, 'cdms_aux_variables'):
self._convertCdmsAuxVariables()
else:
self.cdms_aux_variables = []
self.converted = True
return (self.cdms_variables, self.cdms_aux_variables, self.global_attributes)
def _mapNACommentsToGlobalAttributes(self):
"""
Maps the NASA Ames comments section to global attributes and append them to the
self.global_attributes list.
"""
glob_atts = dict(self.global_attributes)
for key in na_to_nc_map.keys():
if type(key) == type((1,2)):
if key == ("SCOM", "NCOM"):
# Map special and normal comments into the global comments section
comment_line = ""
# Add Special comments first
if self.na_file_obj.NSCOML > 0:
comment_line += (hp["sc_start"] + "\n")
for i in self.na_file_obj.SCOM:
if i.strip() not in special_comment_known_strings:
comment_line += ("\n" + i)
comment_line += ("\n" + hp["sc_end"] + "\n")
# Now add normal comments
if self.na_file_obj.NNCOML > 0:
comment_line += (hp["nc_start"] + "\n")
for i in self.na_file_obj.NCOM:
if i.strip() not in normal_comment_known_strings:
comment_line += ("\n" + i)
comment_line += ("\n" + hp["nc_end"])
# Tidy comment line then write to global atts dict
comment_line = comment_line.replace("\n\n", "\n")
glob_atts["comment"] = comment_line
elif key == ("ONAME", "ORG"):
# Map the two organisation NA files to the institution field in CDMS (NetCDF)
institution = "%s (ONAME from NASA Ames file); %s (ORG from NASA Ames file)." % \
(self.na_file_obj.ONAME, self.na_file_obj.ORG)
glob_atts["institution"] = institution
else:
# Any other strange tuple just gets merged into a string
item = (getattr(self.na_file_obj, key[0])) + "\n" + (getattr(self.na_file_obj, key[1]))
glob_atts[na_to_nc_map[key]] = item
elif key == "RDATE":
# RDATE = Revision date - update this and put in history global attribute
date_parts = getattr(self.na_file_obj, "RDATE")
date_string = "%.4d-%.2d-%.2d" % tuple(date_parts)
hist = date_string + " - NASA Ames File created/revised.\n"
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
version = nappy.utils.getVersion()
hist = "%s\n%s - Converted to CDMS (NetCDF) format using nappy-%s." % (hist, time_string, version)
# self.cdms_file.history = hist
log.debug("No history mapping from na so added it here from global attributes.")
glob_atts["history"] = hist
else:
# Anything else just needs to be stored as a global attribute
glob_atts[na_to_nc_map[key]] = getattr(self.na_file_obj, key)
# Now remake global atts list
new_atts = []
for key, value in self.global_attributes:
new_atts.append( (key, glob_atts[key]) )
used_keys = [i[0] for i in new_atts]
for key in glob_atts.keys():
if key not in used_keys:
new_atts.append( (key, glob_atts[key]) )
self.global_attributes = new_atts[:]
def _convertCdmsVariables(self):
"""
Creates cdms variable list for writing out.
"""
self.cdms_variables = []
if self.variables in (None, "all"):
for var_number in range(self.na_file_obj.NV):
self.cdms_variables.append(self._convertNAToCdmsVariable(var_number))
else:
if type(self.variables[0]) == type(1) or re.match("\d+", str(self.variables[0])): # They are integers = indices
for var_number in self.variables:
vn = int(var_number)
self.cdms_variables.append(self._convertNAToCdmsVariable(vn))
elif type(self.variables[0]) == type("string"): # Vars are strings
for var_name in self.variables:
if var_name in self.na_file_obj.VNAME:
var_number = self.na_file_obj.VNAME.index(var_name)
self.cdms_variables.append(self._convertNAToCdmsVariable(var_number))
else:
raise Exception("Variable name not known: " + var_name)
def _convertNAToCdmsVariable(self, var_number, attributes={}):
"""
Creates a single cdms variable from the variable number provided in the list.
"""
(var_name, units, miss, scal) = self.na_file_obj.getVariable(var_number)
msg = "\nAdding variable: %s" % self.na_file_obj.VNAME[var_number]
log.debug(msg)
self.output_message.append(msg)
array = N.array(self.na_file_obj.V[var_number])
array = array * scal
# Set up axes
if not hasattr(self, 'cdms_axes'):
self._convertCdmsAxes()
# Set up variable
var=cdms.createVariable(array, axes=self.cdms_axes, fill_value=miss, attributes=attributes)
# Sort units etc
if units: var.units=units
# Add the best variable name
if len(var_name) < max_id_length:
var.id=safe_nc_id.sub("_", var_name).lower()
else:
var.id="naVariable_%s" % (var_number)
# Check if mapping provided for renaming this variable
if var_name in self.rename_variables.keys():
var_name = self.rename_variables[var_name]
var.long_name = var.name = var.title = var_name
# Add a NASA Ames variable number (for mapping correctly back to NASA Ames)
var.nasa_ames_var_number = var_number
return var
def _convertCdmsAuxVariables(self):
"""
Creates a cdms variable from an auxiliary variable
"""
self.cdms_aux_variables = []
if self.aux_variables in (None, "all"):
for avar_number in range(self.na_file_obj.NAUXV):
self.cdms_aux_variables.append(self._convertNAAuxToCdmsVariable(avar_number))
else:
if type(self.aux_variables[0]) == type(1): # They are integers = indices
for avar_number in self.aux_variables:
self.cdms_aux_variables.append(self._convertNAAuxToCdmsVariable(avar_number))
elif type(self.aux_variables[0]) == type("string"): # They are strings
for avar_name in self.aux_variables:
if avar_name in self.na_file_obj.ANAME:
avar_number = self.na_file_obj.ANAME.index(avar_name)
self.cdms_aux_variables.append(self._convertNAAuxToCdmsVariable(avar_number))
else:
raise Exception("Auxiliary variable name not known: " + avar_name)
def _convertNAAuxToCdmsVariable(self, avar_number, attributes={}):
"""
Converts an auxiliary variable to a cdms variable.
"""
(var_name, units, miss, scal) = self.na_file_obj.getAuxVariable(avar_number)
array = N.array(self.na_file_obj.A[avar_number])
array = array * scal
msg="\nAdding auxiliary variable: %s" % self.na_file_obj.ANAME[avar_number]
log.debug(msg)
self.output_message.append(msg)
# Set up axes
if not hasattr(self, 'cdms_axes'):
self._convertCdmsAxes()
# Set up variable
var = cdms.createVariable(array, axes=[self.cdms_axes[0]], fill_value=miss,
attributes=attributes)
# Sort units etc
if units: var.units = units
if len(var_name) < max_id_length:
var.id = safe_nc_id.sub("_", var_name).lower()
else:
var.id = "naAuxVariable_%s" % (avar_number)
# Check if mapping provided for renaming this variable
if var_name in self.rename_variables.keys():
var_name = self.rename_variables[var_name]
var.long_name = var.name = var.title = var_name
# Add a NASA Ames auxiliary variable number (for mapping correctly back to NASA Ames)
var.nasa_ames_aux_var_number = avar_number
return var
def _convertCdmsAxes(self):
"""
Creates cdms axes from information provided in the NASA Ames dictionary.
"""
if not hasattr(self, 'cdms_axes'):
self.cdms_axes = []
for ivar_number in range(self.na_file_obj.NIV):
self.cdms_axes.append(self._convertNAIndVarToCdmsAxis(ivar_number))
def _convertNAIndVarToCdmsAxis(self, ivar_number):
"""
Creates a cdms axis from a NASA Ames independent variable.
"""
if self.na_file_obj._normalized_X == False: self.na_file_obj._normalizeIndVars()
if self.na_file_obj.NIV == 1:
array = self.na_file_obj.X
else:
array = self.na_file_obj.X[ivar_number]
axis = cdms.createAxis(array)
axis.id = axis.name = axis.long_name = self.na_file_obj.XNAME[ivar_number]
(var_name, units) = self.na_file_obj.getIndependentVariable(ivar_number)
# Sort units etc
if units: axis.units = units
if len(var_name) < max_id_length:
axis.id = safe_nc_id.sub("_", var_name).lower()
else:
axis.id = "naIndVariable_%s" % (ivar_number)
if units: axis.units = units
axis_types = ("longitude", "latitude", "level", "time")
for axis_type in axis_types:
if re.search(axis_type, var_name, re.IGNORECASE):
axis.standard_name = axis.id = axis_type
# Designate it CF-style if known axis type (e.g. axis.designateTime() etc..)
exec "axis.designate%s()" % axis_type.title()
# Check warning for time units pattern
if axis.isTime() and (not hasattr(axis, "units") or not time_units_pattn.match(axis.units)):
if self.time_units == None:
time_units_input = "I WON'T MATCH"
while time_units_input != "" and not time_units_pattn.match(time_units_input):
message = time_units_warning_message
if self.time_warning == True:
log.debug(message)
time_units_input = raw_input("Please insert your time unit string here (or leave blank):").strip()
else:
time_units_input = ""
self.output_message.append(message)
self.time_units = time_units_input
axis.units = self.time_units
axis.long_name = axis.name = "time (%s)" % self.time_units
if not hasattr(axis, "units") or axis.units == None:
if units:
axis.units = units
else:
axis.units = "Not known"
return axis
|
eufarn7sp/egads
|
egads/thirdparty/nappy/nc_interface/na_to_cdms.py
|
Python
|
gpl-3.0
| 15,829
|
[
"NetCDF"
] |
be46f6f722f9dd037f0168e4f70ca7936c22a5acbfed0797a295f81700bacdac
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/database/models.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import operator
from king_phisher import errors
from king_phisher import utilities
from king_phisher.server import signals
import sqlalchemy
import sqlalchemy.event
import sqlalchemy.ext.declarative
import sqlalchemy.orm
DATABASE_TABLE_REGEX = '[a-z_]+'
"""A regular expression which will match all valid database table names."""
SCHEMA_VERSION = 7
"""The schema version of the database, used for compatibility checks."""
database_tables = {}
"""A dictionary which contains all the database tables and their column names."""
database_table_objects = {}
"""A dictionary which contains all the database tables and their primitive objects."""
logger = logging.getLogger('KingPhisher.Server.Database.Models')
def current_timestamp(*args, **kwargs):
"""
The function used for creating the timestamp used by database objects.
:return: The current timestamp.
:rtype: :py:class:`datetime.datetime`
"""
return datetime.datetime.utcnow()
def get_tables_with_column_id(column_id):
"""
Get all tables which contain a column named *column_id*.
:param str column_id: The column name to get all the tables of.
:return: The list of matching tables.
:rtype: set
"""
return set(x[0] for x in database_tables.items() if column_id in x[1])
def forward_signal_delete(mapper, connection, target):
signals.safe_send('db-table-delete', logger, target.__tablename__, mapper=mapper, connection=connection, target=target)
def forward_signal_insert(mapper, connection, target):
signals.safe_send('db-table-insert', logger, target.__tablename__, mapper=mapper, connection=connection, target=target)
def forward_signal_update(mapper, connection, target):
signals.safe_send('db-table-update', logger, target.__tablename__, mapper=mapper, connection=connection, target=target)
def register_table(table):
"""
Register a database table. This will populate the information provided in
DATABASE_TABLES dictionary. This also forwards signals to the appropriate
listeners within the :py:mod:`server.signal` module.
:param cls table: The table to register.
"""
columns = tuple(col.name for col in table.__table__.columns)
database_tables[table.__tablename__] = columns
database_table_objects[table.__tablename__] = table
sqlalchemy.event.listen(table, 'before_delete', forward_signal_delete)
sqlalchemy.event.listen(table, 'before_insert', forward_signal_insert)
sqlalchemy.event.listen(table, 'before_update', forward_signal_update)
return table
class BaseRowCls(object):
"""
The base class from which other database table objects inherit from.
Provides a standard ``__repr__`` method and default permission checks which
are to be overridden as desired by subclasses.
"""
__repr_attributes__ = ()
"""Attributes which should be included in the __repr__ method."""
is_private = False
"""Whether the table is only allowed to be accessed by the server or not."""
def __repr__(self):
description = "<{0} id={1} ".format(self.__class__.__name__, repr(self.id))
for repr_attr in self.__repr_attributes__:
description += "{0}={1!r} ".format(repr_attr, getattr(self, repr_attr))
description += '>'
return description
def assert_session_has_permissions(self, *args, **kwargs):
"""
A convenience function which wraps :py:meth:`~.session_has_permissions`
and raises a :py:exc:`~king_phisher.errors.KingPhisherPermissionError`
if the session does not have the specified permissions.
"""
if self.session_has_permissions(*args, **kwargs):
return
raise errors.KingPhisherPermissionError()
def session_has_permissions(self, access, session):
"""
Check that the authenticated session has the permissions specified in
*access*. The permissions in *access* are abbreviated with the first
letter of create, read, update, and delete.
:param str access: The desired permissions.
:param session: The authenticated session to check access for.
:return: Whether the session has the desired permissions.
:rtype: bool
"""
if self.is_private:
return False
access = access.lower()
for case in utilities.switch(access, comp=operator.contains, swapped=True):
if case('c') and not self.session_has_create_access(session):
break
if case('r') and not self.session_has_read_access(session):
break
if case('u') and not self.session_has_update_access(session):
break
if case('d') and not self.session_has_delete_access(session):
break
else:
return True
return False
def session_has_create_access(self, session):
if self.is_private:
return False
return True
def session_has_delete_access(self, session):
if self.is_private:
return False
return True
def session_has_read_access(self, session):
if self.is_private:
return False
return True
def session_has_read_prop_access(self, session, prop):
return self.session_has_read_access(session)
def session_has_update_access(self, session):
if self.is_private:
return False
return True
Base = sqlalchemy.ext.declarative.declarative_base(cls=BaseRowCls)
metadata = Base.metadata
class TagMixIn(object):
__repr_attributes__ = ('name',)
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String, nullable=False)
description = sqlalchemy.Column(sqlalchemy.String)
@register_table
class AlertSubscription(Base):
__repr_attributes__ = ('campaign_id', 'user_id')
__tablename__ = 'alert_subscriptions'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
user_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('users.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
type = sqlalchemy.Column(sqlalchemy.Enum('email', 'sms', name='alert_subscription_type'), default='sms', server_default='sms', nullable=False)
mute_timestamp = sqlalchemy.Column(sqlalchemy.DateTime)
def session_has_create_access(self, session):
return session.user == self.user_id
def session_has_delete_access(self, session):
return session.user == self.user_id
def session_has_read_access(self, session):
return session.user == self.user_id
def session_has_update_access(self, session):
return session.user == self.user_id
@register_table
class AuthenticatedSession(Base):
__repr_attributes__ = ('user_id',)
__tablename__ = 'authenticated_sessions'
is_private = True
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
created = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
last_seen = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
user_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('users.id'), nullable=False)
@register_table
class Campaign(Base):
__repr_attributes__ = ('name',)
__tablename__ = 'campaigns'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String, unique=True, nullable=False)
description = sqlalchemy.Column(sqlalchemy.String)
user_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('users.id'), nullable=False)
created = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
reject_after_credentials = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
expiration = sqlalchemy.Column(sqlalchemy.DateTime)
campaign_type_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaign_types.id'))
company_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('companies.id'))
# relationships
alert_subscriptions = sqlalchemy.orm.relationship('AlertSubscription', backref='campaign', cascade='all, delete-orphan')
credentials = sqlalchemy.orm.relationship('Credential', backref='campaign', cascade='all, delete-orphan')
deaddrop_connections = sqlalchemy.orm.relationship('DeaddropConnection', backref='campaign', cascade='all, delete-orphan')
deaddrop_deployments = sqlalchemy.orm.relationship('DeaddropDeployment', backref='campaign', cascade='all, delete-orphan')
landing_pages = sqlalchemy.orm.relationship('LandingPage', backref='campaign', cascade='all, delete-orphan')
messages = sqlalchemy.orm.relationship('Message', backref='campaign', cascade='all, delete-orphan')
visits = sqlalchemy.orm.relationship('Visit', backref='campaign', cascade='all, delete-orphan')
@property
def has_expired(self):
if self.expiration is None:
return False
if self.expiration > current_timestamp():
return False
return True
@register_table
class CampaignType(TagMixIn, Base):
__tablename__ = 'campaign_types'
# relationships
campaigns = sqlalchemy.orm.relationship('Campaign', backref='campaign_type')
@register_table
class Company(Base):
__repr_attributes__ = ('name',)
__tablename__ = 'companies'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String, unique=True, nullable=False)
description = sqlalchemy.Column(sqlalchemy.String)
industry_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('industries.id'))
url_main = sqlalchemy.Column(sqlalchemy.String)
url_email = sqlalchemy.Column(sqlalchemy.String)
url_remote_access = sqlalchemy.Column(sqlalchemy.String)
# relationships
campaigns = sqlalchemy.orm.relationship('Campaign', backref='company', cascade='all')
@register_table
class CompanyDepartment(TagMixIn, Base):
__tablename__ = 'company_departments'
# relationships
messages = sqlalchemy.orm.relationship('Message', backref='company_department')
@register_table
class Credential(Base):
__repr_attributes__ = ('campaign_id', 'username')
__tablename__ = 'credentials'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
visit_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('visits.id'), nullable=False)
message_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('messages.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
username = sqlalchemy.Column(sqlalchemy.String)
password = sqlalchemy.Column(sqlalchemy.String)
submitted = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
@register_table
class DeaddropDeployment(Base):
__repr_attributes__ = ('campaign_id', 'destination')
__tablename__ = 'deaddrop_deployments'
id = sqlalchemy.Column(sqlalchemy.String, default=lambda: utilities.random_string(16), primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
destination = sqlalchemy.Column(sqlalchemy.String)
# relationships
deaddrop_connections = sqlalchemy.orm.relationship('DeaddropConnection', backref='deaddrop_deployment', cascade='all, delete-orphan')
@register_table
class DeaddropConnection(Base):
__repr_attributes__ = ('campaign_id', 'deployment_id', 'visitor_ip')
__tablename__ = 'deaddrop_connections'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
deployment_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('deaddrop_deployments.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
visit_count = sqlalchemy.Column(sqlalchemy.Integer, default=1)
visitor_ip = sqlalchemy.Column(sqlalchemy.String)
local_username = sqlalchemy.Column(sqlalchemy.String)
local_hostname = sqlalchemy.Column(sqlalchemy.String)
local_ip_addresses = sqlalchemy.Column(sqlalchemy.String)
first_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
last_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
@register_table
class Industry(TagMixIn, Base):
__tablename__ = 'industries'
# relationships
companies = sqlalchemy.orm.relationship('Company', backref='industry')
@register_table
class LandingPage(Base):
__repr_attributes__ = ('campaign_id', 'hostname', 'page')
__tablename__ = 'landing_pages'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
hostname = sqlalchemy.Column(sqlalchemy.String, nullable=False)
page = sqlalchemy.Column(sqlalchemy.String, nullable=False)
@register_table
class StorageData(Base):
__repr_attributes__ = ('namespace', 'key', 'value')
__tablename__ = 'storage_data'
is_private = True
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
created = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
namespace = sqlalchemy.Column(sqlalchemy.String)
key = sqlalchemy.Column(sqlalchemy.String, nullable=False)
value = sqlalchemy.Column(sqlalchemy.Binary)
@register_table
class Message(Base):
__repr_attributes__ = ('campaign_id', 'target_email')
__tablename__ = 'messages'
id = sqlalchemy.Column(sqlalchemy.String, default=utilities.make_message_uid, primary_key=True)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
target_email = sqlalchemy.Column(sqlalchemy.String)
first_name = sqlalchemy.Column(sqlalchemy.String)
last_name = sqlalchemy.Column(sqlalchemy.String)
opened = sqlalchemy.Column(sqlalchemy.DateTime)
opener_ip = sqlalchemy.Column(sqlalchemy.String)
opener_user_agent = sqlalchemy.Column(sqlalchemy.String)
sent = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
trained = sqlalchemy.Column(sqlalchemy.Boolean, default=False)
company_department_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('company_departments.id'))
# relationships
credentials = sqlalchemy.orm.relationship('Credential', backref='message', cascade='all, delete-orphan')
visits = sqlalchemy.orm.relationship('Visit', backref='message', cascade='all, delete-orphan')
@register_table
class MetaData(Base):
__repr_attributes__ = ('value_type', 'value')
__tablename__ = 'meta_data'
is_private = True
id = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
value_type = sqlalchemy.Column(sqlalchemy.String, default='str')
value = sqlalchemy.Column(sqlalchemy.String)
@register_table
class User(Base):
__tablename__ = 'users'
id = sqlalchemy.Column(sqlalchemy.String, default=lambda: utilities.random_string(16), primary_key=True)
phone_carrier = sqlalchemy.Column(sqlalchemy.String)
phone_number = sqlalchemy.Column(sqlalchemy.String)
email_address = sqlalchemy.Column(sqlalchemy.String)
otp_secret = sqlalchemy.Column(sqlalchemy.String(16))
# relationships
alert_subscriptions = sqlalchemy.orm.relationship('AlertSubscription', backref='user', cascade='all, delete-orphan')
campaigns = sqlalchemy.orm.relationship('Campaign', backref='user', cascade='all, delete-orphan')
def session_has_create_access(self, session):
return False
def session_has_delete_access(self, session):
return False
def session_has_read_access(self, session):
return session.user == self.id
def session_has_read_prop_access(self, session, prop):
if prop in ('id', 'campaigns'): # everyone can read the id
return True
return self.session_has_read_access(session)
def session_has_update_access(self, session):
return session.user == self.id
@register_table
class Visit(Base):
__repr_attributes__ = ('campaign_id', 'message_id')
__tablename__ = 'visits'
id = sqlalchemy.Column(sqlalchemy.String, default=utilities.make_visit_uid, primary_key=True)
message_id = sqlalchemy.Column(sqlalchemy.String, sqlalchemy.ForeignKey('messages.id'), nullable=False)
campaign_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('campaigns.id'), nullable=False)
visit_count = sqlalchemy.Column(sqlalchemy.Integer, default=1)
visitor_ip = sqlalchemy.Column(sqlalchemy.String)
visitor_details = sqlalchemy.Column(sqlalchemy.String)
first_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
last_visit = sqlalchemy.Column(sqlalchemy.DateTime, default=current_timestamp)
# relationships
credentials = sqlalchemy.orm.relationship('Credential', backref='visit', cascade='all, delete-orphan')
|
hdemeyer/king-phisher
|
king_phisher/server/database/models.py
|
Python
|
bsd-3-clause
| 17,588
|
[
"VisIt"
] |
f0764e1ab5b6eca95af71aa3b9d108a00134f01013f0d32301db79954a24ac59
|
"""
This module defines most special functions and mathematical constants
provided by mpmath. [Exception: elliptic functions are currently
in elliptic.py]
Most of the actual computational code is located in the lib* modules
(libelefun, libhyper, ...); this module simply wraps this code to
handle precision management in a user friendly way, provide type
conversions, etc.
In addition, this module defines a number of functions that would
be inconvenient to define in the lib* modules, due to requiring
high level operations (e.g. numerical quadrature) for the computation,
or the need to support multiple arguments of mixed types.
"""
import libmpf
import libelefun
import libmpc
import libmpi
import gammazeta
import libhyper
from settings import dps_to_prec
from mptypes import (\
mpnumeric, mpmathify,
mpf, make_mpf,
mpc, make_mpc,
mpi, make_mpi,
constant,
prec_rounding, mp,
extraprec,
zero, one, inf, ninf, nan, j, isnan, isinf, isint, eps,
ComplexResult,
)
class _pi(constant):
r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi`::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362464'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
mpf('1.2246467991473532e-16')
One solution is to use the :func:`sinpi` function instead::
>>> sinpi(1)
mpf('0.0')
See the documentation of trigonometric functions for additional
details.
"""
class _degree(constant):
"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> print sin(30 * degree)
0.5
>>> print asin(0.5) / degree
30.0
"""
class _e(constant):
"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`ln`) and of the exponential function
(:func:`exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> mp.dps = 50
>>> print e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e`::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427165'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
class _ln2(constant): pass
class _ln10(constant): pass
class _phi(constant):
r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> print (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> print findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> print limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
class _euler(constant):
r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> print limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma`::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858165'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> print -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> print quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> print nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> print nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> print -diff(gamma, 1)
0.5772156649015328606065121
>>> print limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> print limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> print (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`psi` and :func:`stieltjes` respectively.
"""
class _catalan(constant):
r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> print nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K`::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871503'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> print quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> print quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> print quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> print quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> print pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> print 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
class _khinchin(constant):
r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> print 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*n))
>>> print exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
class _glaisher(constant):
r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> print exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> print quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> print -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
class _apery(constant):
r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from sympy.mpmath import *
>>> mp.dps = 50
>>> print apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> print zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> print -diff(trigamma, 1)/2
1.2020569031595942853997381615114499907649862923405
>>> print 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> print 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
# Mathematical constants
pi = _pi(libelefun.mpf_pi, "pi")
degree = _degree(libelefun.mpf_degree, "degree")
e = _e(libelefun.mpf_e, "e")
ln2 = _ln2(libelefun.mpf_ln2, "ln(2)")
ln10 = _ln10(libelefun.mpf_ln10, "ln(10)")
phi = _phi(libelefun.mpf_phi, "Golden ratio (phi)")
euler = _euler(gammazeta.mpf_euler, "Euler's constant (gamma)")
catalan = _catalan(gammazeta.mpf_catalan, "Catalan's constant")
khinchin = _khinchin(gammazeta.mpf_khinchin, "Khinchin's constant")
glaisher = _glaisher(gammazeta.mpf_glaisher, "Glaisher's constant")
apery = _apery(gammazeta.mpf_apery, "Apery's constant")
def funcwrapper(f):
def g(*args, **kwargs):
orig = mp.prec
try:
args = [mpmathify(z) for z in args]
mp.prec = orig + 10
v = f(*args, **kwargs)
finally:
mp.prec = orig
return +v
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def mpfunc(name, real_f, complex_f, doc, interval_f=None):
def f(x, **kwargs):
if not isinstance(x, mpnumeric):
x = mpmathify(x)
prec, rounding = prec_rounding
if kwargs:
prec = kwargs.get('prec', prec)
if 'dps' in kwargs:
prec = dps_to_prec(kwargs['dps'])
rounding = kwargs.get('rounding', rounding)
if isinstance(x, mpf):
try:
return make_mpf(real_f(x._mpf_, prec, rounding))
except ComplexResult:
# Handle propagation to complex
if mp.trap_complex:
raise
return make_mpc(complex_f((x._mpf_, libmpf.fzero), prec, rounding))
elif isinstance(x, mpc):
return make_mpc(complex_f(x._mpc_, prec, rounding))
elif isinstance(x, mpi):
if interval_f:
return make_mpi(interval_f(x._val, prec))
raise NotImplementedError("%s of a %s" % (name, type(x)))
f.__name__ = name
f.__doc__ = "Computes the %s of x" % doc
return f
def altfunc(f, name, desc):
def g(x):
orig = mp.prec
try:
mp.prec = orig + 10
return one/f(x)
finally:
mp.prec = orig
g.__name__ = name
g.__doc__ = "Computes the %s of x, 1/%s(x)" % (desc, f.__name__)
return g
def altinvfunc(f, name, desc):
def g(x):
orig = mp.prec
try:
mp.prec = orig + 10
return f(one/x)
finally:
mp.prec = orig
g.__name__ = name
g.__doc__ = "Computes the inverse %s of x, %s(1/x)" % (desc, f.__name__)
return g
sqrt = mpfunc('sqrt', libelefun.mpf_sqrt, libmpc.mpc_sqrt, "principal square root", libmpi.mpi_sqrt)
sqrt.__doc__ = r"""
``sqrt(x)`` computes the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print sqrt(10)
3.16227766016838
>>> print sqrt(100)
10.0
>>> print sqrt(-4)
(0.0 + 2.0j)
>>> print sqrt(1+1j)
(1.09868411346781 + 0.455089860562227j)
>>> print sqrt(inf)
+inf
Square root evaluation is fast at huge precision::
>>> mp.dps = 50000
>>> a = sqrt(3)
>>> str(a)[-10:]
'9329332814'
:func:`sqrt` supports interval arguments::
>>> mp.dps = 15
>>> print sqrt(mpi(16, 100))
[4.0, 10.0]
>>> print sqrt(mpi(2))
[1.4142135623730949234, 1.4142135623730951455]
>>> print sqrt(mpi(2)) ** 2
[1.9999999999999995559, 2.0000000000000004441]
"""
cbrt = mpfunc('cbrt', libelefun.mpf_cbrt, libmpc.mpc_cbrt, "principal cubic root")
cbrt.__doc__ = """
``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
function is faster and more accurate than raising to a floating-point
fraction::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> 125**(mpf(1)/3)
mpf('4.9999999999999991')
>>> cbrt(125)
mpf('5.0')
Every nonzero complex number has three cube roots. This function
returns the cube root defined by `\exp(\log(x)/3)` where the
principal branch of the natural logarithm is used. Note that this
does not give a real cube root for negative real numbers::
>>> print cbrt(-1)
(0.5 + 0.866025403784439j)
"""
exp = mpfunc('exp', libelefun.mpf_exp, libmpc.mpc_exp, "exponential function", libmpi.mpi_exp)
exp.__doc__ = r"""
Computes the exponential function,
.. math ::
\exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
For complex numbers, the exponential function also satisfies
.. math ::
\exp(x+yi) = e^x (\cos y + i \sin y).
**Basic examples**
Some values of the exponential function::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print exp(0)
1.0
>>> print exp(1)
2.718281828459045235360287
>>> print exp(-1)
0.3678794411714423215955238
>>> print exp(inf)
+inf
>>> print exp(-inf)
0.0
Arguments can be arbitrarily large::
>>> print exp(10000)
8.806818225662921587261496e+4342
>>> print exp(-10000)
1.135483865314736098540939e-4343
Evaluation is supported for interval arguments::
>>> print exp(mpi(-inf,0))
[0.0, 1.0]
>>> print exp(mpi(0,1))
[1.0, 2.71828182845904523536028749558]
The exponential function can be evaluated efficiently to arbitrary
precision::
>>> mp.dps = 10000
>>> print exp(pi) #doctest: +ELLIPSIS
23.140692632779269005729...8984304016040616
**Functional properties**
Numerical verification of Euler's identity for the complex
exponential function::
>>> mp.dps = 15
>>> print exp(j*pi)+1
(0.0 + 1.22464679914735e-16j)
>>> print chop(exp(j*pi)+1)
0.0
This recovers the coefficients (reciprocal factorials) in the
Maclaurin series expansion of exp::
>>> nprint(taylor(exp, 0, 5))
[1.0, 1.0, 0.5, 0.166667, 4.16667e-2, 8.33333e-3]
The exponential function is its own derivative and antiderivative::
>>> print exp(pi)
23.1406926327793
>>> print diff(exp, pi)
23.1406926327793
>>> print quad(exp, [-inf, pi])
23.1406926327793
The exponential function can be evaluated using various methods,
including direct summation of the series, limits, and solving
the defining differential equation::
>>> print nsum(lambda k: pi**k/fac(k), [0,inf])
23.1406926327793
>>> print limit(lambda k: (1+pi/k)**k, inf)
23.1406926327793
>>> print odefun(lambda t, x: x, 0, 1)(pi)
23.1406926327793
"""
ln = mpfunc('ln', libelefun.mpf_log, libmpc.mpc_log, "natural logarithm", libmpi.mpi_log)
ln.__doc__ = r"""Computes the natural logarithm of `x`, `\ln x`.
See :func:`log` for additional documentation."""
cos = mpfunc('cos', libelefun.mpf_cos, libmpc.mpc_cos, "cosine", libmpi.mpi_cos)
sin = mpfunc('sin', libelefun.mpf_sin, libmpc.mpc_sin, "sine", libmpi.mpi_sin)
tan = mpfunc('tan', libelefun.mpf_tan, libmpc.mpc_tan, "tangent", libmpi.mpi_tan)
cosh = mpfunc('cosh', libelefun.mpf_cosh, libmpc.mpc_cosh, "hyperbolic cosine")
sinh = mpfunc('sinh', libelefun.mpf_sinh, libmpc.mpc_sinh, "hyperbolic sine")
tanh = mpfunc('tanh', libelefun.mpf_tanh, libmpc.mpc_tanh, "hyperbolic tangent")
acos = mpfunc('acos', libelefun.mpf_acos, libmpc.mpc_acos, "inverse cosine")
asin = mpfunc('asin', libelefun.mpf_asin, libmpc.mpc_asin, "inverse sine")
atan = mpfunc('atan', libelefun.mpf_atan, libmpc.mpc_atan, "inverse tangent")
asinh = mpfunc('asinh', libelefun.mpf_asinh, libmpc.mpc_asinh, "inverse hyperbolic sine")
acosh = mpfunc('acosh', libelefun.mpf_acosh, libmpc.mpc_acosh, "inverse hyperbolic cosine")
atanh = mpfunc('atanh', libelefun.mpf_atanh, libmpc.mpc_atanh, "inverse hyperbolic tangent")
sec = altfunc(cos, 'sec', 'secant')
csc = altfunc(sin, 'csc', 'cosecant')
cot = altfunc(tan, 'cot', 'cotangent')
sech = altfunc(cosh, 'sech', 'hyperbolic secant')
csch = altfunc(sinh, 'csch', 'hyperbolic cosecant')
coth = altfunc(tanh, 'coth', 'hyperbolic cotangent')
asec = altinvfunc(acos, 'asec', 'secant')
acsc = altinvfunc(asin, 'acsc', 'cosecant')
acot = altinvfunc(atan, 'acot', 'cotangent')
asech = altinvfunc(acosh, 'asech', 'hyperbolic secant')
acsch = altinvfunc(asinh, 'acsch', 'hyperbolic cosecant')
acoth = altinvfunc(atanh, 'acoth', 'hyperbolic cotangent')
cospi = mpfunc('cospi', libelefun.mpf_cos_pi, libmpc.mpc_cos_pi, "")
sinpi = mpfunc('sinpi', libelefun.mpf_sin_pi, libmpc.mpc_sin_pi, "")
cosh.__doc__ = r"""
Computes the hyperbolic cosine of `x`,
`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print cosh(0)
1.0
>>> print cosh(1)
1.543080634815243778477906
>>> print cosh(-inf), cosh(+inf)
+inf +inf
The hyperbolic cosine is an even, convex function with
a global minimum at `x = 0`, having a Maclaurin series
that starts::
>>> nprint(chop(taylor(cosh, 0, 5)))
[1.0, 0.0, 0.5, 0.0, 4.16667e-2, 0.0]
Generalized to complex numbers, the hyperbolic cosine is
equivalent to a cosine with the argument rotated
in the imaginary direction, or `\cosh x = \cos ix`::
>>> print cosh(2+3j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
>>> print cos(3-2j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
"""
sinh.__doc__ = r"""
Computes the hyperbolic sine of `x`,
`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print sinh(0)
0.0
>>> print sinh(1)
1.175201193643801456882382
>>> print sinh(-inf), sinh(+inf)
-inf +inf
The hyperbolic sine is an odd function, with a Maclaurin
series that starts::
>>> nprint(chop(taylor(sinh, 0, 5)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 8.33333e-3]
Generalized to complex numbers, the hyperbolic sine is
essentially a sine with a rotation `i` applied to
the argument; more precisely, `\sinh x = -i \sin ix`::
>>> print sinh(2+3j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
>>> print j*sin(3-2j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
"""
tanh.__doc__ = r"""
Computes the hyperbolic tangent of `x`,
`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print tanh(0)
0.0
>>> print tanh(1)
0.7615941559557648881194583
>>> print tanh(-inf), tanh(inf)
-1.0 1.0
The hyperbolic tangent is an odd, sigmoidal function, similar
to the inverse tangent and error function. Its Maclaurin
series is::
>>> nprint(chop(taylor(tanh, 0, 5)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
Generalized to complex numbers, the hyperbolic tangent is
essentially a tangent with a rotation `i` applied to
the argument; more precisely, `\tanh x = -i \tan ix`::
>>> print tanh(2+3j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
>>> print j*tan(3-2j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
"""
cos.__doc__ = r"""
Computes the cosine of `x`, `\cos(x)`.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print cos(pi/3)
0.5
>>> print cos(100000001)
-0.9802850113244713353133243
>>> print cos(2+3j)
(-4.189625690968807230132555 - 9.109227893755336597979197j)
>>> print cos(inf)
nan
>>> nprint(chop(taylor(cos, 0, 6)))
[1.0, 0.0, -0.5, 0.0, 4.16667e-2, 0.0, -1.38889e-3]
>>> print cos(mpi(0,1))
[0.540302305868139717400936602301, 1.0]
>>> print cos(mpi(0,2))
[-0.41614683654714238699756823214, 1.0]
"""
sin.__doc__ = r"""
Computes the sine of `x`, `\sin(x)`.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print sin(pi/3)
0.8660254037844386467637232
>>> print sin(100000001)
0.1975887055794968911438743
>>> print sin(2+3j)
(9.1544991469114295734673 - 4.168906959966564350754813j)
>>> print sin(inf)
nan
>>> nprint(chop(taylor(sin, 0, 6)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 8.33333e-3, 0.0]
>>> print sin(mpi(0,1))
[0.0, 0.841470984807896506652502331201]
>>> print sin(mpi(0,2))
[0.0, 1.0]
"""
tan.__doc__ = r"""
Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
The tangent function is singular at `x = (n+1/2)\pi`, but
``tan(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print tan(pi/3)
1.732050807568877293527446
>>> print tan(100000001)
-0.2015625081449864533091058
>>> print tan(2+3j)
(-0.003764025641504248292751221 + 1.003238627353609801446359j)
>>> print tan(inf)
nan
>>> nprint(chop(taylor(tan, 0, 6)))
[0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
>>> print tan(mpi(0,1))
[0.0, 1.55740772465490223050697482944]
>>> print tan(mpi(0,2)) # Interval includes a singularity
[-inf, +inf]
"""
sec.__doc__ = r"""
Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
The secant function is singular at `x = (n+1/2)\pi`, but
``sec(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print sec(pi/3)
2.0
>>> print sec(10000001)
-1.184723164360392819100265
>>> print sec(2+3j)
(-0.04167496441114427004834991 + 0.0906111371962375965296612j)
>>> print sec(inf)
nan
>>> nprint(chop(taylor(sec, 0, 6)))
[1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 8.47222e-2]
>>> print sec(mpi(0,1))
[1.0, 1.85081571768092561791175324143]
>>> print sec(mpi(0,2)) # Interval includes a singularity
[-inf, +inf]
"""
csc.__doc__ = r"""
Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
This cosecant function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``csc(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print csc(pi/3)
1.154700538379251529018298
>>> print csc(10000001)
-1.864910497503629858938891
>>> print csc(2+3j)
(0.09047320975320743980579048 + 0.04120098628857412646300981j)
>>> print csc(inf)
nan
>>> print csc(mpi(0,1)) # Interval includes a singularity
[1.18839510577812121626159945235, +inf]
>>> print csc(mpi(0,2))
[1.0, +inf]
"""
cot.__doc__ = r"""
Computes the cotangent of `x`,
`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
This cotangent function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``cot(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print cot(pi/3)
0.5773502691896257645091488
>>> print cot(10000001)
1.574131876209625656003562
>>> print cot(2+3j)
(-0.003739710376336956660117409 - 0.9967577965693583104609688j)
>>> print cot(inf)
nan
>>> print cot(mpi(0,1)) # Interval includes a singularity
[0.642092615934330703006419986575, +inf]
>>> print cot(mpi(1,2))
[-inf, +inf]
"""
acos.__doc__ = r"""
Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
cosine is real-valued only for `-1 \le x \le 1`. On this interval,
:func:`acos` is defined to be a monotonically decreasing
function assuming values between `+\pi` and `0`.
Basic values are::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print acos(-1)
3.141592653589793238462643
>>> print acos(0)
1.570796326794896619231322
>>> print acos(1)
0.0
>>> nprint(chop(taylor(acos, 0, 6)))
[1.5708, -1.0, 0.0, -0.166667, 0.0, -7.5e-2, 0.0]
:func:`acos` is defined so as to be a proper inverse function of
`\cos(\theta)` for `0 \le \theta < \pi`.
We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
>>> for x in [1, 10, -1, 2+3j, 10+3j]:
... print cos(acos(x)), acos(cos(x))
...
1.0 1.0
(10.0 + 0.0j) 2.566370614359172953850574
-1.0 1.0
(2.0 + 3.0j) (2.0 + 3.0j)
(10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
The inverse cosine has two branch points: `x = \pm 1`. :func:`acos`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
asin.__doc__ = r"""
Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
sine is real-valued only for `-1 \le x \le 1`.
On this interval, it is defined to be a monotonically increasing
function assuming values between `-\pi/2` and `\pi/2`.
Basic values are::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print asin(-1)
-1.570796326794896619231322
>>> print asin(0)
0.0
>>> print asin(1)
1.570796326794896619231322
>>> nprint(chop(taylor(asin, 0, 6)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 7.5e-2, 0.0]
:func:`asin` is defined so as to be a proper inverse function of
`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print chop(sin(asin(x))), asin(sin(x))
...
1.0 1.0
10.0 -0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.0 + 3.0j)
(-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
The inverse sine has two branch points: `x = \pm 1`. :func:`asin`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
atan.__doc__ = r"""
Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
This is a real-valued function for all real `x`, with range
`(-\pi/2, \pi/2)`.
Basic values are::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print atan(-inf)
-1.570796326794896619231322
>>> print atan(-1)
-0.7853981633974483096156609
>>> print atan(0)
0.0
>>> print atan(1)
0.7853981633974483096156609
>>> print atan(inf)
1.570796326794896619231322
>>> nprint(chop(taylor(atan, 0, 6)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
The inverse tangent is often used to compute angles. However,
the atan2 function is often better for this as it preserves sign
(see :func:`atan2`).
:func:`atan` is defined so as to be a proper inverse function of
`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> mp.dps = 25
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print tan(atan(x)), atan(tan(x))
...
1.0 1.0
10.0 0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
(-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
The inverse tangent has two branch points: `x = \pm i`. :func:`atan`
places the branch cuts along the line segments `(-i \infty, -i)` and
`(+i, +i \infty)`. In general,
.. math ::
\tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
where the principal-branch log is implied.
"""
acot.__doc__ = r"""Computes the inverse cotangent of `x`,
`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
asec.__doc__ = r"""Computes the inverse secant of `x`,
`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
acsc.__doc__ = r"""Computes the inverse cosecant of `x`,
`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
sinpi.__doc__ = """
Computes `\sin(\pi x)`, more accurately than the expression
``sin(pi*x)``::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print sinpi(10**10), sin(pi*(10**10))
0.0 -2.23936276195592e-6
>>> print sinpi(10**10+0.5), sin(pi*(10**10+0.5))
1.0 0.999999999998721
"""
cospi.__doc__ = """
Computes `\cos(\pi x)`, more accurately than the expression
``cos(pi*x)``::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print cospi(10**10), cos(pi*(10**10))
1.0 0.999999999997493
>>> print cospi(10**10+0.5), cos(pi*(10**10+0.5))
0.0 1.59960492420134e-6
"""
@funcwrapper
def sinc(x):
r"""
``sinc(x)`` computes the unnormalized sinc function, defined as
.. math ::
\mathrm{sinc}(x) = \begin{cases}
\sin(x)/x, & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
See :func:`sincpi` for the normalized sinc function.
Simple values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print sinc(0)
1.0
>>> print sinc(1)
0.841470984807897
>>> print sinc(inf)
0.0
The integral of the sinc function is the sine integral Si::
>>> print quad(sinc, [0, 1])
0.946083070367183
>>> print si(1)
0.946083070367183
"""
if isinf(x):
return 1/x
if not x:
return x+1
return sin(x)/x
@funcwrapper
def sincpi(x):
r"""
``sincpi(x)`` computes the normalized sinc function, defined as
.. math ::
\mathrm{sinc}_{\pi}(x) = \begin{cases}
\sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
Equivalently, we have
`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
The normalization entails that the function integrates
to unity over the entire real line::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print quadosc(sincpi, [-inf, inf], period=2.0)
1.0
Like, :func:`sinpi`, :func:`sincpi` is evaluated accurately
at its roots::
>>> print sincpi(10)
0.0
"""
if isinf(x):
return 1/x
if not x:
return x+1
return sinpi(x)/(pi*x)
floor = mpfunc('floor', libmpf.mpf_floor, libmpc.mpc_floor, "")
floor.__doc__ = r"""
Computes the floor of `x`, `\lfloor x \rfloor`, defined as
the largest integer less than or equal to `x`::
>>> from sympy.mpmath import *
>>> print floor(3.5)
3.0
Note: :func:`floor` returns a floating-point number, not a
Python ``int``. If `\lfloor x \rfloor` is too large to be
represented exactly at the present working precision, the
result will be rounded, not necessarily in the floor
direction."""
ceil = mpfunc('ceil', libmpf.mpf_ceil, libmpc.mpc_ceil, "")
ceil.__doc__ = r"""
Computes the ceiling of `x`, `\lceil x \rceil`, defined as
the smallest integer greater than or equal to `x`::
>>> from sympy.mpmath import *
>>> print ceil(3.5)
4.0
Note: :func:`ceil` returns a floating-point number, not a
Python ``int``. If `\lceil x \rceil` is too large to be
represented exactly at the present working precision, the
result will be rounded, not necessarily in the ceiling
direction."""
@funcwrapper
def nthroot(x, n):
r"""
``nthroot(x, n)`` computes the principal `n`-th root of `x`,
`x^{1/n}`. Here `n` must be an integer, and can be negative
(`x^{-1/n}` is `1/x^{1/n}`).
For `n = 2` or `n = 3`, using this function is equivalent to
calling :func:`sqrt` or :func:`cbrt`. In general,
``nthroot(x, n)`` is defined to compute `\exp(\log(x)/n)`.
:func:`nthroot` is implemented to use Newton's method for small
`n`. At high precision, this makes `x^{1/n}` not much more
expensive than the regular exponentiation, `x^n`. For very large
`n`, :func:`nthroot` falls back to use the exponential function.
:func:`nthroot` is faster and more accurate than raising to a
floating-point fraction::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> 16807 ** (mpf(1)/5)
mpf('7.0000000000000009')
>>> nthroot(16807, 5)
mpf('7.0')
"""
n = int(n)
if isinstance(x, mpf):
try:
return make_mpf(libelefun.mpf_nthroot(x._mpf_, n, *prec_rounding))
except ComplexResult:
if mp.trap_complex:
raise
x = (x._mpf_, libmpf.fzero)
else:
x = x._mpc_
return make_mpc(libmpc.mpc_nthroot(x, n, *prec_rounding))
def hypot(x, y):
r"""
Computes the Euclidean norm of the vector `(x, y)`, equal
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
x = mpmathify(x)
y = mpmathify(y)
return make_mpf(libmpf.mpf_hypot(x._mpf_, y._mpf_, *prec_rounding))
def ldexp(x, n):
r"""
Computes `x 2^n` efficiently. No rounding is performed.
The argument `x` must be a real floating-point number (or
possible to convert into one) and `n` must be a Python ``int``.
>>> from sympy.mpmath import *
>>> ldexp(1, 10)
mpf('1024.0')
>>> ldexp(1, -3)
mpf('0.125')
"""
x = mpmathify(x)
return make_mpf(libmpf.mpf_shift(x._mpf_, n))
def frexp(x):
r"""
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
`n` a Python integer, and such that `x = y 2^n`. No rounding is
performed.
>>> from sympy.mpmath import *
>>> frexp(7.5)
(mpf('0.9375'), 3)
"""
x = mpmathify(x)
y, n = libmpf.mpf_frexp(x._mpf_)
return make_mpf(y), n
def sign(x):
r"""
Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
(with the special case `\sign(0) = 0`)::
>>> from sympy.mpmath import *
>>> sign(10)
mpf('1.0')
>>> sign(-10)
mpf('-1.0')
>>> sign(0)
mpf('0.0')
Note that the sign function is also defined for complex numbers,
for which it gives the projection onto the unit circle::
>>> mp.dps = 15
>>> print sign(1+j)
(0.707106781186547 + 0.707106781186547j)
"""
x = mpmathify(x)
if not x or isnan(x):
return x
if isinstance(x, mpf):
return mpf(cmp(x, 0))
return x / abs(x)
@extraprec(5)
def arg(x):
r"""
Computes the complex argument (phase) of `x`, defined as the
signed angle between the positive real axis and `x` in the
complex plane::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print arg(3)
0.0
>>> print arg(3+3j)
0.785398163397448
>>> print arg(3j)
1.5707963267949
>>> print arg(-3)
3.14159265358979
>>> print arg(-3j)
-1.5707963267949
The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
with the sign convention that a nonnegative imaginary part
results in a nonnegative argument.
The value returned by :func:`arg` is an ``mpf`` instance.
"""
x = mpc(x)
return atan2(x.imag, x.real)
def fabs(x):
r"""
Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
:func:`fabs` converts non-mpmath numbers (such as ``int``)
into mpmath numbers::
>>> from sympy.mpmath import *
>>> fabs(3)
mpf('3.0')
>>> fabs(-3)
mpf('3.0')
>>> fabs(3+4j)
mpf('5.0')
"""
return abs(mpmathify(x))
def re(x):
r"""
Returns the real part of `x`, `\Re(x)`. Unlike ``x.real``,
:func:`re` converts `x` to a mpmath number::
>>> from sympy.mpmath import *
>>> re(3)
mpf('3.0')
>>> re(-1+4j)
mpf('-1.0')
"""
return mpmathify(x).real
def im(x):
r"""
Returns the imaginary part of `x`, `\Im(x)`. Unlike ``x.imag``,
:func:`im` converts `x` to a mpmath number::
>>> from sympy.mpmath import *
>>> im(3)
mpf('0.0')
>>> im(-1+4j)
mpf('4.0')
"""
return mpmathify(x).imag
def conj(x):
r"""
Returns the complex conjugate of `x`, `\overline{x}`. Unlike
``x.conjugate()``, :func:`im` converts `x` to a mpmath number::
>>> from sympy.mpmath import *
>>> conj(3)
mpf('3.0')
>>> conj(-1+4j)
mpc(real='-1.0', imag='-4.0')
"""
return mpmathify(x).conjugate()
def log(x, b=None):
r"""
Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
unspecified, :func:`log` computes the natural (base `e`) logarithm
and is equivalent to :func:`ln`. In general, the base `b` logarithm
is defined in terms of the natural logarithm as
`\log_b(x) = \ln(x)/\ln(b)`.
By convention, we take `\log(0) = -\infty`.
The natural logarithm is real if `x > 0` and complex if `x < 0` or if
`x` is complex. The principal branch of the complex logarithm is
used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
**Examples**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print log(1)
0.0
>>> print log(2)
0.693147180559945
>>> print log(1000,10)
3.0
>>> print log(4, 16)
0.5
>>> print log(j)
(0.0 + 1.5707963267949j)
>>> print log(-1)
(0.0 + 3.14159265358979j)
>>> print log(0)
-inf
>>> print log(inf)
+inf
The natural logarithm is the antiderivative of `1/x`::
>>> print quad(lambda x: 1/x, [1, 5])
1.6094379124341
>>> print log(5)
1.6094379124341
>>> print diff(log, 10)
0.1
The Taylor series expansion of the natural logarithm around
`x = 1` has coefficients `(-1)^{n+1}/n`::
>>> nprint(taylor(log, 1, 7))
[0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
:func:`log` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> print log(pi)
1.1447298858494001741434273513530587116472948129153
>>> print log(pi, pi**3)
0.33333333333333333333333333333333333333333333333333
>>> mp.dps = 25
>>> print log(3+4j)
(1.609437912434100374600759 + 0.9272952180016122324285125j)
"""
if b is None:
return ln(x)
wp = mp.prec + 20
return ln(x, prec=wp) / ln(b, prec=wp)
def log10(x):
r"""
Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
is equivalent to ``log(x, 10)``.
"""
return log(x, 10)
def power(x, y):
r"""
Converts `x` and `y` to mpmath numbers and evaluates
`x^y = \exp(y \log(x))`::
>>> from sympy.mpmath import *
>>> mp.dps = 30
>>> print power(2, 0.5)
1.41421356237309504880168872421
This shows the leading few digits of a large Mersenne prime
(performing the exact calculation ``2**43112609-1`` and
displaying the result in Python would be very slow)::
>>> print power(2, 43112609)-1
3.16470269330255923143453723949e+12978188
"""
return mpmathify(x) ** mpmathify(y)
def modf(x,y):
r"""
Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
For mpmath numbers, this is equivalent to ``x % y``.
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print modf(100, pi)
2.61062773871641
You can use :func:`modf` to compute fractional parts of numbers::
>>> print modf(10.25, 1)
0.25
"""
x = mpmathify(x)
y = mpmathify(y)
return x % y
def degrees(x):
r"""
Converts the radian angle `x` to a degree angle::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print degrees(pi/3)
60.0
"""
return x / degree
def radians(x):
r"""
Converts the degree angle `x` to radians::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print radians(60)
1.0471975511966
"""
return x * degree
def atan2(y, x):
r"""
Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
giving the signed angle between the positive `x`-axis and the
point `(x, y)` in the 2D plane. This function is defined for
real `x` and `y` only.
The two-argument arctangent essentially computes
`\mathrm{atan}(y/x)`, but accounts for the signs of both
`x` and `y` to give the angle for the correct quadrant. The
following examples illustrate the difference::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print atan2(1,1), atan(1/1.)
0.785398163397448 0.785398163397448
>>> print atan2(1,-1), atan(1/-1.)
2.35619449019234 -0.785398163397448
>>> print atan2(-1,1), atan(-1/1.)
-0.785398163397448 -0.785398163397448
>>> print atan2(-1,-1), atan(-1/-1.)
-2.35619449019234 0.785398163397448
The angle convention is the same as that used for the complex
argument; see :func:`arg`.
"""
x = mpmathify(x)
y = mpmathify(y)
return make_mpf(libelefun.mpf_atan2(y._mpf_, x._mpf_, *prec_rounding))
fib = fibonacci = mpfunc('fibonacci', libelefun.mpf_fibonacci, libmpc.mpc_fibonacci, "")
fibonacci.__doc__ = r"""
``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
with the initial values `F(0) = 0`, `F(1) = 1`. :func:`fibonacci`
extends this definition to arbitrary real and complex arguments
using the formula
.. math ::
F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
where `\phi` is the golden ratio. :func:`fibonacci` also uses this
continuous formula to compute `F(n)` for extremely large `n`, where
calculating the exact integer would be wasteful.
For convenience, :func:`fib` is available as an alias for
:func:`fibonacci`.
**Basic examples**
Some small Fibonacci numbers are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for i in range(10):
... print fibonacci(i),
...
0.0 1.0 1.0 2.0 3.0 5.0 8.0 13.0 21.0 34.0
>>> print fibonacci(50)
12586269025.0
The recurrence for `F(n)` extends backwards to negative `n`::
>>> for i in range(10):
... print fibonacci(-i),
...
0.0 1.0 -1.0 2.0 -3.0 5.0 -8.0 13.0 -21.0 34.0
Large Fibonacci numbers will be computed approximately unless
the precision is set high enough::
>>> print fib(200)
2.8057117299251e+41
>>> mp.dps = 45
>>> print fib(200)
280571172992510140037611932413038677189525.0
:func:`fibonacci` can compute approximate Fibonacci numbers
of stupendous size::
>>> mp.dps = 15
>>> print fibonacci(10**25)
3.49052338550226e+2089876402499787337692720
**Real and complex arguments**
The extended Fibonacci function is an analytic function. The
property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
>>> mp.dps = 15
>>> print fib(pi)
2.1170270579161
>>> print fib(pi-1) + fib(pi-2)
2.1170270579161
>>> print fib(3+4j)
(-5248.51130728372 - 14195.962288353j)
>>> print fib(2+4j) + fib(1+4j)
(-5248.51130728372 - 14195.962288353j)
The Fibonacci function has infinitely many roots on the
negative half-real axis. The first root is at 0, the second is
close to -0.18, and then there are infinitely many roots that
asymptotically approach `-n+1/2`::
>>> print findroot(fib, -0.2)
-0.183802359692956
>>> print findroot(fib, -2)
-1.57077646820395
>>> print findroot(fib, -17)
-16.4999999596115
>>> print findroot(fib, -24)
-23.5000000000479
**Mathematical relationships**
For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
>>> mp.dps = 50
>>> print fibonacci(101)/fibonacci(100)
1.6180339887498948482045868343656381177203127439638
>>> print phi
1.6180339887498948482045868343656381177203091798058
The sum of reciprocal Fibonacci numbers converges to an irrational
number for which no closed form expression is known::
>>> mp.dps = 15
>>> print nsum(lambda n: 1/fib(n), [1, inf])
3.35988566624318
Amazingly, however, the sum of odd-index reciprocal Fibonacci
numbers can be expressed in terms of a Jacobi theta function::
>>> print nsum(lambda n: 1/fib(2*n+1), [0, inf])
1.82451515740692
>>> print sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
1.82451515740692
Some related sums can be done in closed form::
>>> print nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
1.11803398874989
>>> print phi - 0.5
1.11803398874989
>>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,k+1))
>>> print nsum(f, [1, inf])
0.618033988749895
>>> print phi-1
0.618033988749895
**References**
1. http://mathworld.wolfram.com/FibonacciNumber.html
"""
zeta = mpfunc('zeta', gammazeta.mpf_zeta, gammazeta.mpc_zeta, 'Riemann zeta function')
altzeta = mpfunc('zeta', gammazeta.mpf_altzeta, gammazeta.mpc_altzeta, 'Dirichlet eta function')
zeta.__doc__ = r"""
``zeta(s)`` computes the Riemann zeta function, `\zeta(s)`.
The Riemann zeta function is defined for `\Re(s) > 1` by
.. math ::
\zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
and for `\Re(s) \le 1` by analytic continuation. It has a pole
at `s = 1`.
**Examples**
Some exact values of the zeta function are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print zeta(2)
1.64493406684823
>>> print pi**2 / 6
1.64493406684823
>>> print zeta(0)
-0.5
>>> print zeta(-1)
-0.0833333333333333
>>> print zeta(-2)
0.0
:func:`zeta` supports arbitrary precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> print zeta(pi)
1.1762417383825827588721504519380520911697389900217
>>> print zeta(1+2j) # doctest: +NORMALIZE_WHITESPACE
(0.5981655697623817367034568491742186771747764868876 -
0.35185474521784529049653859679690026505229177886045j)
The Riemann zeta function has so-called nontrivial zeros on
the critical line `s = 1/2 + it`::
>>> mp.dps = 15
>>> print findroot(zeta, 0.5+14j)
(0.5 + 14.1347251417347j)
>>> print findroot(zeta, 0.5+21j)
(0.5 + 21.0220396387716j)
>>> print findroot(zeta, 0.5+25j)
(0.5 + 25.0108575801457j)
For investigation of the zeta function zeros, the Riemann-Siegel
Z-function is often more convenient than working with the Riemann
zeta function directly (see :func:`siegelz`).
For large positive `s`, `\zeta(s)` rapidly approaches 1::
>>> print zeta(30)
1.00000000093133
>>> print zeta(100)
1.0
>>> print zeta(inf)
1.0
The following series converges and in fact has a simple
closed form value::
>>> print nsum(lambda k: zeta(k)-1, [2, inf])
1.0
**Algorithm**
The primary algorithm is Borwein's algorithm for the Dirichlet
eta function. Three separate implementations are used: for general
real arguments, general complex arguments, and for integers. The
reflection formula is applied to arguments in the negative
half-plane. For very large real arguments, either direct
summation or the Euler prime product is used.
It should be noted that computation of `\zeta(s)` gets very slow
when `s` is far away from the real axis.
**References**
1. http://mathworld.wolfram.com/RiemannZetaFunction.html
2. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf
"""
altzeta.__doc__ = r"""
Computes the Dirichlet eta function, `\eta(s)`, also known as the
alternating zeta function. This function is defined in analogy
with the Riemann zeta function as providing the sum of the
alternating series
.. math ::
\eta(s) = 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
Note that `\eta(1) = \log(2)` is the alternating harmonic series.
The eta function unlike the Riemann zeta function is an entire
function, having a finite value for all complex `s`.
The alternating and non-alternating zeta functions are related
via the simple formula
.. math ::
\eta(s) = (1 - 2^{1-s}) \zeta(s).
This formula can be used to define `\eta(s)` for `\Re(s) \le 0`,
where the series diverges.
**Examples**
Some special values are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print altzeta(1)
0.693147180559945
>>> print altzeta(0)
0.5
>>> print altzeta(-1)
0.25
>>> print altzeta(-2)
0.0
An example of a sum that can be computed more accurately and
efficiently via :func:`altzeta` than via numerical summation::
>>> sum(-(-1)**n / n**2.5 for n in range(1, 100))
0.86720495150398402
>>> print altzeta(2.5)
0.867199889012184
At positive even integers, the Dirichlet eta function
evaluates to a rational multiple of a power of `\pi`::
>>> print altzeta(2)
0.822467033424113
>>> print pi**2/12
0.822467033424113
Like the Riemann zeta function, `\eta(s)`, approaches 1
as `s` approaches positive infinity, although it does
so from below rather than from above::
>>> print altzeta(30)
0.999999999068682
>>> print altzeta(inf)
1.0
>>> altzeta(1000, rounding='d')
mpf('0.99999999999999989')
>>> altzeta(1000, rounding='u')
mpf('1.0')
**References**
1. http://mathworld.wolfram.com/DirichletEtaFunction.html
2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
gamma = mpfunc('gamma', gammazeta.mpf_gamma, gammazeta.mpc_gamma, "gamma function")
factorial = mpfunc('factorial', gammazeta.mpf_factorial, gammazeta.mpc_factorial, "factorial")
fac = factorial
factorial.__doc__ = r"""
Computes the factorial, `x!`. For integers `n \ge 0`, we have
`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
is defined for real or complex `x` by `x! = \Gamma(x+1)`.
**Examples**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for k in range(6):
... print k, fac(k)
...
0 1.0
1 1.0
2 2.0
3 6.0
4 24.0
5 120.0
>>> print fac(inf)
+inf
>>> print fac(0.5), sqrt(pi)/2
0.886226925452758 0.886226925452758
For large positive `x`, `x!` can be approximated by
Stirling's formula::
>>> x = 10**10
>>> print fac(x)
2.32579620567308e+95657055186
>>> print sqrt(2*pi*x)*(x/e)**x
2.32579597597705e+95657055186
:func:`fac` supports evaluation for astronomically large values::
>>> print fac(10**30)
6.22311232304258e+29565705518096748172348871081098
Reciprocal factorials appear in the Taylor series of the
exponential function (among many other contexts)::
>>> print nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
2.71828182845905 2.71828182845905
>>> print nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
23.1406926327793 23.1406926327793
"""
gamma.__doc__ = r"""
Computes the gamma function, `\Gamma(x)`. The gamma function is a
shifted version of the ordinary factorial, satisfying
`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
is defined by
.. math ::
\Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
by analytic continuation.
**Examples**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for k in range(1, 6):
... print k, gamma(k)
...
1 1.0
2 1.0
3 2.0
4 6.0
5 24.0
>>> print gamma(inf)
+inf
>>> print gamma(0)
Traceback (most recent call last):
...
ValueError: gamma function pole
The gamma function of a half-integer is a rational multiple of
`\sqrt{\pi}`::
>>> print gamma(0.5), sqrt(pi)
1.77245385090552 1.77245385090552
>>> print gamma(1.5), sqrt(pi)/2
0.886226925452758 0.886226925452758
We can check the integral definition::
>>> print gamma(3.5)
3.32335097044784
>>> print quad(lambda t: t**2.5*exp(-t), [0,inf])
3.32335097044784
:func:`gamma` supports arbitrary-precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> print gamma(sqrt(3))
0.91510229697308632046045539308226554038315280564184
>>> mp.dps = 25
>>> print gamma(2j)
(0.009902440080927490985955066 - 0.07595200133501806872408048j)
Arguments can also be large. Note that the gamma function grows
very quickly::
>>> mp.dps = 15
>>> print gamma(10**20)
6.33636415517321e+1956570547910964391727
"""
def psi(m, z):
r"""
Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
functions are defined as the logarithmic derivatives of the gamma
function:
.. math ::
\psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
present implementation of :func:`psi`, the order `m` must be a
nonnegative integer, while the argument `z` may be an arbitrary
complex number (with exception for the polygamma function's poles
at `z = 0, -1, -2, \ldots`).
**Examples**
For various rational arguments, the polygamma function reduces to
a combination of standard mathematical constants::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print psi(0, 1), -euler
-0.5772156649015328606065121 -0.5772156649015328606065121
>>> print psi(1, '1/4'), pi**2+8*catalan
17.19732915450711073927132 17.19732915450711073927132
>>> print psi(2, '1/2'), -14*apery
-16.82879664423431999559633 -16.82879664423431999559633
The polygamma functions are derivatives of each other::
>>> print diff(lambda x: psi(3, x), pi), psi(4, pi)
-0.1105749312578862734526952 -0.1105749312578862734526952
>>> print quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
-0.375 -0.375
The digamma function diverges logarithmically as `z \to \infty`,
while higher orders tend to zero::
>>> print psi(0,inf), psi(1,inf), psi(2,inf)
+inf 0.0 0.0
Evaluation for a complex argument::
>>> print psi(2, -1-2j)
(0.03902435405364952654838445 + 0.1574325240413029954685366j)
Evaluation is supported for large orders `m` and/or large
arguments `z`::
>>> print psi(3, 10**100)
2.0e-300
>>> print psi(250, 10**30+10**20*j)
(-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
**Application to infinite series**
Any infinite series where the summand is a rational function of
the index `k` can be evaluated in closed form in terms of polygamma
functions of the roots and poles of the summand::
>>> a = sqrt(2)
>>> b = sqrt(3)
>>> print nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
0.4049668927517857061917531
>>> print (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
0.4049668927517857061917531
This follows from the series representation (`m > 0`)
.. math ::
\psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
\frac{1}{(z+k)^{m+1}}.
Since the roots of a polynomial may be complex, it is sometimes
necessary to use the complex polygamma function to evaluate
an entirely real-valued sum::
>>> print nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
1.694361433907061256154665
>>> nprint(polyroots([1,-2,3]))
[(1.0 - 1.41421j), (1.0 + 1.41421j)]
>>> r1 = 1-sqrt(2)*j
>>> r2 = r1.conjugate()
>>> print (psi(0,-r2)-psi(0,-r1))/(r1-r2)
(1.694361433907061256154665 + 0.0j)
"""
z = mpmathify(z)
m = int(m)
if isinstance(z, mpf):
return make_mpf(gammazeta.mpf_psi(m, z._mpf_, *prec_rounding))
else:
return make_mpc(gammazeta.mpc_psi(m, z._mpc_, *prec_rounding))
def psi0(z):
"""Shortcut for psi(0,z) (the digamma function)"""
return psi(0, z)
def psi1(z):
"""Shortcut for psi(1,z) (the trigamma function)"""
return psi(1, z)
def psi2(z):
"""Shortcut for psi(2,z) (the tetragamma function)"""
return psi(2, z)
def psi3(z):
"""Shortcut for psi(3,z) (the pentagamma function)"""
return psi(3, z)
polygamma = psi
digamma = psi0
trigamma = psi1
tetragamma = psi2
pentagamma = psi3
harmonic = mpfunc('harmonic', gammazeta.mpf_harmonic, gammazeta.mpc_harmonic,
"nth harmonic number")
harmonic.__doc__ = r"""
If `n` is an integer, ``harmonic(n)`` gives a floating-point
approximation of the `n`-th harmonic number `H(n)`, defined as
.. math ::
H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
The firrst few harmonic numbers are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(8):
... print n, harmonic(n)
...
0 0.0
1 1.0
2 1.5
3 1.83333333333333
4 2.08333333333333
5 2.28333333333333
6 2.45
7 2.59285714285714
The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
>>> print harmonic(inf)
+inf
:func:`harmonic` is evaluated using the digamma function rather
than by summing the harmonic series term by term. It can therefore
be computed quickly for arbitrarily large `n`, and even for
nonintegral arguments::
>>> print harmonic(10**100)
230.835724964306
>>> print harmonic(0.5)
0.613705638880109
>>> print harmonic(3+4j)
(2.24757548223494 + 0.850502209186044j)
:func:`harmonic` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> print harmonic(11)
3.0198773448773448773448773448773448773448773448773
>>> print harmonic(pi)
1.8727388590273302654363491032336134987519132374152
The harmonic series diverges, but at a glacial pace. It is possible
to calculate the exact number of terms required before the sum
exceeds a given amount, say 100::
>>> mp.dps = 50
>>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
>>> print v
15092688622113788323693563264538101449859496.864101
>>> v = int(ceil(v))
>>> print v
15092688622113788323693563264538101449859497
>>> print harmonic(v-1)
99.999999999999999999999999999999999999999999942747
>>> print harmonic(v)
100.000000000000000000000000000000000000000000009
"""
def bernoulli(n):
r"""
Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
The Bernoulli numbers are rational numbers, but this function
returns a floating-point approximation. To obtain an exact
fraction, use :func:`bernfrac` instead.
**Examples**
Numerical values of the first few Bernoulli numbers::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(15):
... print n, bernoulli(n)
...
0 1.0
1 -0.5
2 0.166666666666667
3 0.0
4 -0.0333333333333333
5 0.0
6 0.0238095238095238
7 0.0
8 -0.0333333333333333
9 0.0
10 0.0757575757575758
11 0.0
12 -0.253113553113553
13 0.0
14 1.16666666666667
Bernoulli numbers can be approximated with arbitrary precision::
>>> mp.dps = 50
>>> print bernoulli(100)
-2.8382249570693706959264156336481764738284680928013e+78
Arbitrarily large `n` are supported::
>>> mp.dps = 15
>>> print bernoulli(10**20 + 2)
3.09136296657021e+1876752564973863312327
The Bernoulli numbers are related to the Riemann zeta function
at integer arguments::
>>> print -bernoulli(8) * (2*pi)**8 / (2*fac(8))
1.00407735619794
>>> print zeta(8)
1.00407735619794
**Algorithm**
For small `n` (`n < 3000`) :func:`bernoulli` uses a recurrence
formula due to Ramanujan. All results in this range are cached,
so sequential computation of small Bernoulli numbers is
guaranteed to be fast.
For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
function.
"""
return make_mpf(gammazeta.mpf_bernoulli(int(n), *prec_rounding))
bernfrac = gammazeta.bernfrac
stieltjes_cache = {}
def stieltjes(n, a=1):
r"""
For a nonnegative integer `n`, ``stieltjes(n)`` computes the
`n`-th Stieltjes constant `\gamma_n`, defined as the
`n`-th coefficient in the Laurent series expansion of the
Riemann zeta function around the pole at `s = 1`. That is,
we have:
.. math ::
\zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
\frac{(-1)^n}{n!} \gamma_n (s-1)^n
More generally, ``stieltjes(n, a)`` gives the corresponding
coefficient `\gamma_n(a)` for the Hurwitz zeta function
`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
**Examples**
The zeroth Stieltjes constant is just Euler's constant `\gamma`::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print stieltjes(0)
0.577215664901533
Some more values are::
>>> print stieltjes(1)
-0.0728158454836767
>>> print stieltjes(10)
0.000205332814909065
>>> print stieltjes(30)
0.00355772885557316
>>> print stieltjes(1000)
-1.57095384420474e+486
>>> print stieltjes(2000)
2.680424678918e+1109
>>> print stieltjes(1, 2.5)
-0.23747539175716
An alternative way to compute `\gamma_1`::
>>> print diff(extradps(25)(lambda x: 1/(x-1) - zeta(x)), 1)
-0.0728158454836767
:func:`stieltjes` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> print stieltjes(2)
-0.0096903631928723184845303860352125293590658061013408
**Algorithm**
:func:`stieltjes` numerically evaluates the integral in
the following representation due to Ainsworth, Howell and
Coffey [1], [2]:
.. math ::
\gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
\frac{2}{a} \Re \int_0^{\infty}
\frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
For some reference values with `a = 1`, see e.g. [4].
**References**
1. O. R. Ainsworth & L. W. Howell, "An integral representation of
the generalized Euler-Mascheroni constants", NASA Technical
Paper 2456 (1985),
http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
2. M. W. Coffey, "The Stieltjes constants, their relation to the
`\eta_j` coefficients, and representation of the Hurwitz
zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
3. http://mathworld.wolfram.com/StieltjesConstants.html
4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
"""
n = mpmathify(n)
a = mpmathify(a)
if n < 0:
raise ValueError("Stieltjes constants defined for n >= 0")
if a == 1:
if n == 0:
return +euler
if n in stieltjes_cache:
prec, s = stieltjes_cache[n]
if prec >= mp.prec:
return +s
mag = 1
def f(x):
xa = x/a
v = (xa-j)*log(a-j*x)**n/(1+xa**2)/(exp(2*pi*x)-1)
return v.real / mag
from quadrature import quad
orig = mp.prec
try:
# Normalize integrand by approx. magnitude to
# speed up quadrature (which uses absolute error)
if n > 50:
mp.prec = 20
mag = quad(f, [0,inf], maxdegree=3)
mp.prec = orig + 10 + int(n**0.5)
s = quad(f, [0,inf], maxdegree=20)
v = log(a)**n/(2*a) - log(a)**(n+1)/(n+1) + 2*s/a*mag
finally:
mp.prec = orig
if a == 1 and isint(n):
stieltjes_cache[n] = (mp.prec, v)
return +v
def isnpint(x):
if not x:
return True
if isinstance(x, mpf):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if isinstance(x, mpc):
return not x.imag and isnpint(x.real)
def gammaprod(a, b):
r"""
Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
product / quotient of gamma functions:
.. math ::
\frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
{\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
Unlike direct calls to :func:`gamma`, :func:`gammaprod` considers
the entire product as a limit and evaluates this limit properly if
any of the numerator or denominator arguments are nonpositive
integers such that poles of the gamma function are encountered.
That is, :func:`gammaprod` evaluates
.. math ::
\lim_{\epsilon \to 0}
\frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
\Gamma(a_p+\epsilon)}
{\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
\Gamma(b_q+\epsilon)}
In particular:
* If there are equally many poles in the numerator and the
denominator, the limit is a rational number times the remaining,
regular part of the product.
* If there are more poles in the numerator, :func:`gammaprod`
returns ``+inf``.
* If there are more poles in the denominator, :func:`gammaprod`
returns 0.
**Examples**
The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> gammaprod([], [0])
mpf('0.0')
A limit::
>>> gammaprod([-4], [-3])
mpf('-0.25')
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
mpf('-0.25')
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
mpf('-0.25')
"""
a = [mpmathify(x) for x in a]
b = [mpmathify(x) for x in b]
poles_num = []
poles_den = []
regular_num = []
regular_den = []
for x in a: [regular_num, poles_num][isnpint(x)].append(x)
for x in b: [regular_den, poles_den][isnpint(x)].append(x)
# One more pole in numerator or denominator gives 0 or inf
if len(poles_num) < len(poles_den): return mpf(0)
if len(poles_num) > len(poles_den): return mpf('+inf')
# All poles cancel
# lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i)
p = mpf(1)
orig = mp.prec
try:
mp.prec = orig + 15
while poles_num:
i = poles_num.pop()
j = poles_den.pop()
p *= (-1)**(i+j) * gamma(1-j) / gamma(1-i)
for x in regular_num: p *= gamma(x)
for x in regular_den: p /= gamma(x)
finally:
mp.prec = orig
return +p
def beta(x, y):
r"""
Computes the beta function,
`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
The beta function is also commonly defined by the integral
representation
.. math ::
B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
**Examples**
For integer and half-integer arguments where all three gamma
functions are finite, the beta function becomes either rational
number or a rational multiple of `\pi`::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print beta(5, 2)
0.0333333333333333
>>> print beta(1.5, 2)
0.266666666666667
>>> print 16*beta(2.5, 1.5)
3.14159265358979
Where appropriate, :func:`beta` evaluates limits. A pole
of the beta function is taken to result in ``+inf``::
>>> print beta(-0.5, 0.5)
0.0
>>> print beta(-3, 3)
-0.333333333333333
>>> print beta(-2, 3)
+inf
>>> print beta(inf, 1)
0.0
>>> print beta(inf, 0)
nan
:func:`beta` supports complex numbers and arbitrary precision
evaluation::
>>> print beta(1, 2+j)
(0.4 - 0.2j)
>>> mp.dps = 25
>>> print beta(j,0.5)
(1.079424249270925780135675 - 1.410032405664160838288752j)
>>> mp.dps = 50
>>> print beta(pi, e)
0.037890298781212201348153837138927165984170287886464
Various integrals can be computed by means of the
beta function::
>>> mp.dps = 15
>>> print quad(lambda t: t**2.5*(1-t)**2, [0, 1])
0.0230880230880231
>>> print beta(3.5, 3)
0.0230880230880231
>>> print quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
0.319504062596158
>>> print beta(2.5, 0.75)/2
0.319504062596158
"""
x = mpmathify(x)
y = mpmathify(y)
if isinf(y):
x, y = y, x
if isinf(x):
if x == inf and not y.imag:
if y == -inf:
return nan
if y > 0:
return zero
if isint(y):
return nan
if y < 0:
return sign(gamma(y)) * inf
return nan
return gammaprod([x, y], [x+y])
def binomial(n, k):
r"""
Computes the binomial coefficient
.. math ::
{n \choose k} = \frac{n!}{k!(n-k)!}.
The binomial coefficient gives the number of ways that `k` items
can be chosen from a set of `n` items. More generally, the binomial
coefficient is a well-defined function of arbitrary real or
complex `n` and `k`, via the gamma function.
**Examples**
Generate Pascal's triangle::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(5):
... nprint([binomial(n,k) for k in range(n+1)])
...
[1.0]
[1.0, 1.0]
[1.0, 2.0, 1.0]
[1.0, 3.0, 3.0, 1.0]
[1.0, 4.0, 6.0, 4.0, 1.0]
There is 1 way to select 0 items from the empty set, and 0 ways to
select 1 item from the empty set::
>>> print binomial(0, 0)
1.0
>>> print binomial(0, 1)
0.0
:func:`binomial` supports large arguments::
>>> print binomial(10**20, 10**20-5)
8.33333333333333e+157
>>> print binomial(10**20, 10**10)
2.60784095465201e+104342944813
Nonintegral binomial coefficients find use in series
expansions::
>>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
[1.0, 0.25, -9.375e-2, 5.46875e-2, -3.75977e-2]
>>> nprint([binomial(0.25, k) for k in range(5)])
[1.0, 0.25, -9.375e-2, 5.46875e-2, -3.75977e-2]
An integral representation::
>>> n, k = 5, 3
>>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
>>> print chop(quad(f, [-pi,pi])/(2*pi))
10.0
>>> print binomial(n,k)
10.0
"""
return gammaprod([n+1], [k+1, n-k+1])
def rf(x, n):
r"""
Computes the rising factorial or Pochhammer symbol,
.. math ::
x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the rising factorial is a polynomial::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(5):
... nprint(taylor(lambda x: rf(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 2.0, 3.0, 1.0]
[0.0, 6.0, 11.0, 6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> print rf(2+3j, 5.5)
(-7202.03920483347 - 3777.58810701527j)
"""
return gammaprod([x+n], [x])
def ff(x, n):
r"""
Computes the falling factorial,
.. math ::
(x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the falling factorial is a polynomial::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(5):
... nprint(taylor(lambda x: ff(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, -1.0, 1.0]
[0.0, 2.0, -3.0, 1.0]
[0.0, -6.0, 11.0, -6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> print ff(2+3j, 5.5)
(-720.41085888203 + 316.101124983878j)
"""
return gammaprod([x+1], [x-n+1])
@funcwrapper
def fac2(x):
r"""
Computes the double factorial `x!!`, defined for integers
`x > 0` by
.. math ::
x!! = \begin{cases}
1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
\end{cases}
and more generally by [1]
.. math ::
x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
\Gamma\left(\frac{x}{2}+1\right).
**Examples**
The integer sequence of double factorials begins::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> nprint([fac2(n) for n in range(10)])
[1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
For large `x`, double factorials follow a Stirling-like asymptotic
approximation::
>>> x = mpf(10000)
>>> print fac2(x)
5.97272691416282e+17830
>>> print sqrt(pi)*x**((x+1)/2)*exp(-x/2)
5.97262736954392e+17830
The recurrence formula `x!! = x (x-2)!!` can be reversed to
define the double factorial of negative odd integers (but
not negative even integers)::
>>> print fac2(-1), fac2(-3), fac2(-5), fac2(-7)
1.0 -1.0 0.333333333333333 -0.0666666666666667
>>> fac2(-2)
Traceback (most recent call last):
...
ValueError: gamma function pole
With the exception of the poles at negative even integers,
:func:`fac2` supports evaluation for arbitrary complex arguments.
The recurrence formula is valid generally::
>>> print fac2(pi+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
>>> print (pi+2j)*fac2(pi-2+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
Double factorials should not be confused with nested factorials,
which are immensely larger::
>>> print fac(fac(20))
5.13805976125208e+43675043585825292774
>>> print fac2(20)
3715891200.0
Double factorials appear, among other things, in series expansions
of Gaussian functions and the error function. Infinite series
include::
>>> print nsum(lambda k: 1/fac2(k), [0, inf])
3.05940740534258
>>> print sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
3.05940740534258
>>> print nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
4.06015693855741
>>> print e * erf(1) * sqrt(pi)
4.06015693855741
A beautiful Ramanujan sum::
>>> print nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
0.90917279454693
>>> print (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
0.90917279454693
**References**
1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
2. http://mathworld.wolfram.com/DoubleFactorial.html
"""
if isinf(x):
if x == inf:
return x
return nan
return 2**(x/2)*(pi/2)**((cospi(x)-1)/4)*gamma(x/2+1)
#---------------------------------------------------------------------------#
# #
# Hypergeometric functions #
# #
#---------------------------------------------------------------------------#
class _mpq(tuple):
@property
def _mpf_(self):
return (mpf(self[0])/self[1])._mpf_
def __add__(self, other):
if isinstance(other, _mpq):
a, b = self
c, d = other
return _mpq((a*d+b*c, b*d))
return NotImplemented
def __sub__(self, other):
if isinstance(other, _mpq):
a, b = self
c, d = other
return _mpq((a*d-b*c, b*d))
return NotImplemented
mpq_1 = _mpq((1,1))
mpq_0 = _mpq((0,1))
def parse_param(x):
if isinstance(x, tuple):
p, q = x
return [[p, q]], [], []
if isinstance(x, (int, long)):
return [[x, 1]], [], []
x = mpmathify(x)
if isinstance(x, mpf):
return [], [x._mpf_], []
if isinstance(x, mpc):
return [], [], [x._mpc_]
def _as_num(x):
if isinstance(x, list):
return _mpq(x)
return x
def hypsum(ar, af, ac, br, bf, bc, x):
prec, rnd = prec_rounding
if hasattr(x, "_mpf_") and not (ac or bc):
v = libhyper.hypsum_internal(ar, af, ac, br, bf, bc, x._mpf_, None, prec, rnd)
return make_mpf(v)
else:
if hasattr(x, "_mpc_"):
re, im = x._mpc_
else:
re, im = x._mpf_, libmpf.fzero
v = libhyper.hypsum_internal(ar, af, ac, br, bf, bc, re, im, prec, rnd)
return make_mpc(v)
def eval_hyp2f1(a,b,c,z):
prec, rnd = prec_rounding
ar, af, ac = parse_param(a)
br, bf, bc = parse_param(b)
cr, cf, cc = parse_param(c)
absz = abs(z)
if absz == 1:
# TODO: determine whether it actually does, and otherwise
# return infinity instead
print "Warning: 2F1 might not converge for |z| = 1"
if absz <= 1:
# All rational
if ar and br and cr:
return sum_hyp2f1_rat(ar[0], br[0], cr[0], z)
return hypsum(ar+br, af+bf, ac+bc, cr, cf, cc, z)
# Use 1/z transformation
a = (ar and _as_num(ar[0])) or mpmathify(a)
b = (br and _as_num(br[0])) or mpmathify(b)
c = (cr and _as_num(cr[0])) or mpmathify(c)
orig = mp.prec
try:
mp.prec = orig + 15
h1 = eval_hyp2f1(a, mpq_1-c+a, mpq_1-b+a, 1/z)
h2 = eval_hyp2f1(b, mpq_1-c+b, mpq_1-a+b, 1/z)
#s1 = G(c)*G(b-a)/G(b)/G(c-a) * (-z)**(-a) * h1
#s2 = G(c)*G(a-b)/G(a)/G(c-b) * (-z)**(-b) * h2
f1 = gammaprod([c,b-a],[b,c-a])
f2 = gammaprod([c,a-b],[a,c-b])
s1 = f1 * (-z)**(mpq_0-a) * h1
s2 = f2 * (-z)**(mpq_0-b) * h2
v = s1 + s2
finally:
mp.prec = orig
return +v
def sum_hyp0f1_rat(a, z):
prec, rnd = prec_rounding
if hasattr(z, "_mpf_"):
return make_mpf(libhyper.mpf_hyp0f1_rat(a, z._mpf_, prec, rnd))
else:
return make_mpc(libhyper.mpc_hyp0f1_rat(a, z._mpc_, prec, rnd))
def sum_hyp1f1_rat(a, b, z):
prec, rnd = prec_rounding
if hasattr(z, "_mpf_"):
return make_mpf(libhyper.mpf_hyp1f1_rat(a, b, z._mpf_, prec, rnd))
else:
return make_mpc(libhyper.mpc_hyp1f1_rat(a, b, z._mpc_, prec, rnd))
def sum_hyp2f1_rat(a, b, c, z):
prec, rnd = prec_rounding
if hasattr(z, "_mpf_"):
return make_mpf(libhyper.mpf_hyp2f1_rat(a, b, c, z._mpf_, prec, rnd))
else:
return make_mpc(libhyper.mpc_hyp2f1_rat(a, b, c, z._mpc_, prec, rnd))
#---------------------------------------------------------------------------#
# And now the user-friendly versions #
#---------------------------------------------------------------------------#
def hyper(a_s, b_s, z):
r"""
Evaluates the generalized hypergeometric function
.. math ::
\,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
\sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
{(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
where `(x)_n` denotes the rising factorial (see :func:`rf`).
The parameters lists ``a_s`` and ``b_s`` may contain integers,
real numbers, complex numbers, as well as exact fractions given in
the form of tuples `(p, q)`. :func:`hyper` is optimized to handle
integers and fractions more efficiently than arbitrary
floating-point parameters (since rational parameters are by
far the most common).
**Examples**
We can compare the output of :func:`hyper` with :func:`nsum`::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> a,b,c,d = 2,3,4,5
>>> x = 0.25
>>> print hyper([a,b],[c,d],x)
1.078903941164934876086237
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
>>> print nsum(fn, [0, inf])
1.078903941164934876086237
The parameters can be any combination of integers, fractions,
floats and complex numbers::
>>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
>>> x = 0.2j
>>> print hyper([a,b],[c,d,e],x)
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
>>> b, e = -0.5, mpf(2)/3
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
>>> print nsum(fn, [0, inf])
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
"""
p = len(a_s)
q = len(b_s)
z = mpmathify(z)
degree = p, q
if degree == (0, 1):
br, bf, bc = parse_param(b_s[0])
if br:
return sum_hyp0f1_rat(br[0], z)
return hypsum([], [], [], br, bf, bc, z)
if degree == (1, 1):
ar, af, ac = parse_param(a_s[0])
br, bf, bc = parse_param(b_s[0])
if ar and br:
a, b = ar[0], br[0]
return sum_hyp1f1_rat(a, b, z)
return hypsum(ar, af, ac, br, bf, bc, z)
if degree == (2, 1):
return eval_hyp2f1(a_s[0], a_s[1], b_s[0], z)
ars, afs, acs, brs, bfs, bcs = [], [], [], [], [], []
for a in a_s:
r, f, c = parse_param(a)
ars += r
afs += f
acs += c
for b in b_s:
r, f, c = parse_param(b)
brs += r
bfs += f
bcs += c
return hypsum(ars, afs, acs, brs, bfs, bcs, z)
def hyp0f1(a, z):
r"""Hypergeometric function `\,_0F_1`. ``hyp0f1(a,z)`` is equivalent
to ``hyper([],[a],z)``; see documentation for :func:`hyper` for more
information."""
return hyper([], [a], z)
def hyp1f1(a,b,z):
r"""Hypergeometric function `\,_1F_1`. ``hyp1f1(a,b,z)`` is equivalent
to ``hyper([a],[b],z)``; see documentation for :func:`hyper` for more
information."""
return hyper([a], [b], z)
def hyp2f1(a,b,c,z):
r"""Hypergeometric function `\,_2F_1`. ``hyp2f1(a,b,c,z)`` is equivalent
to ``hyper([a,b],[c],z)``; see documentation for :func:`hyper` for more
information."""
return hyper([a,b], [c], z)
def _lower_gamma(z, b):
return hyp1f1(1, 1+z, b) * b**z * exp(-b) / z
def _check_pos(x):
return isinstance(x, mpf) and x > 0
@funcwrapper
def gammainc(z, a=0, b=inf, regularized=False):
r"""
``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
gamma function with integration limits `[a, b]`:
.. math ::
\Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
The generalized incomplete gamma function reduces to the
following special cases when one or both endpoints are fixed:
* `\Gamma(z,0,\infty)` is the standard ("complete")
gamma function, `\Gamma(z)` (available directly
as the mpmath function :func:`gamma`)
* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
function, `\Gamma(z,a)`
* `\Gamma(z,0,b)` is the "lower" incomplete gamma
function, `\gamma(z,b)`.
Of course, we have
`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
for all `z` and `x`.
Note however that some authors reverse the order of the
arguments when defining the lower and upper incomplete
gamma function, so one should be careful to get the correct
definition.
If also given the keyword argument ``regularized=True``,
:func:`gammainc` computes the "regularized" incomplete gamma
function
.. math ::
P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
**Examples**
We can compare with numerical quadrature to verify that
:func:`gammainc` computes the integral in the definition::
>>> from sympy.mpmath import *
>>> mp.dps = 20
>>> print gammainc(2+3j, 4, 10)
(0.009772126686277051606 - 0.077063730631298989245j)
>>> print quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
(0.009772126686277051606 - 0.077063730631298989245j)
The incomplete gamma functions satisfy simple recurrence
relations::
>>> mp.dps = 15
>>> z = 3.5
>>> a = 2
>>> print gammainc(z+1, a), z*gammainc(z,a) + a**z*exp(-a)
10.6013029693353 10.6013029693353
>>> print gammainc(z+1,0,a), z*gammainc(z,0,a) - a**z*exp(-a)
1.03042542723211 1.03042542723211
If `z` is an integer, the recurrence reduces the incomplete gamma
function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
`Q` are polynomials::
>>> mp.dps = 15
>>> print gammainc(1, 2), exp(-2)
0.135335283236613 0.135335283236613
>>> mp.dps = 50
>>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
'(326*exp(-1) + (-872)*exp(-2))'
The incomplete gamma functions reduce to functions such as
the exponential integral Ei and the error function for special
arguments::
>>> mp.dps = 15
>>> print gammainc(0, 4), -ei(-4)
0.00377935240984891 0.00377935240984891
>>> print gammainc(0.5, 0, 2), sqrt(pi)*erf(sqrt(2))
1.6918067329452 1.6918067329452
"""
if b == inf:
if a == 0:
v = gamma(z)
else:
if z == 0:
# Reduces to exponential integral. Mind branch cuts.
if _check_pos(a):
return -ei(-a)
else:
return -ei(-a) + (log(-a)-log(-1/a))/2-log(a)
# XXX: avoid poles
v = gamma(z) - _lower_gamma(z, a)
elif a == 0:
v = _lower_gamma(z, b)
else:
if z == 0:
# Reduces to exponential integral
if _check_pos(a) and _check_pos(b):
return ei(-b) - ei(-a)
else:
return ei(-b)-ei(-a) + \
(log(-a)-log(-1/a))/2-log(a) + \
(log(-1/b)-log(-b))/2+log(b)
# XXX: avoid poles
v = _lower_gamma(z, b) - _lower_gamma(z, a)
if regularized:
return v / gamma(z)
else:
return v
erf = mpfunc("erf", libhyper.mpf_erf, libhyper.mpc_erf,
"Error function, erf(z)")
erf.__doc__ = r"""
Computes the error function, `\mathrm{erf}(x)`. The error
function is the normalized antiderivative of the Gaussian function
`\exp(-t^2)`. More precisely,
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
**Basic examples**
Simple values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print erf(0)
0.0
>>> print erf(1)
0.842700792949715
>>> print erf(-1)
-0.842700792949715
>>> print erf(inf)
1.0
>>> print erf(-inf)
-1.0
For large real `x`, `\mathrm{erf}(x)` approaches 1 very
rapidly::
>>> print erf(3)
0.999977909503001
>>> print erf(5)
0.999999999998463
The error function is an odd function::
>>> nprint(chop(taylor(erf, 0, 5)))
[0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
:func:`erf` implements arbitrary-precision evaluation and
supports complex numbers::
>>> mp.dps = 50
>>> print erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> print erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
**Related functions**
See also :func:`erfc`, which is more accurate for large `x`,
and :func:`erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`fresnels` and :func:`fresnelc`
are also related to the error function.
"""
erfc = mpfunc("erfc", libhyper.mpf_erfc, libhyper.mpc_erfc,
"Complementary error function, erfc(z) = 1-erf(z)")
erfc.__doc__ = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print 1 - erf(10)
0.0
>>> print erfc(10)
2.08848758376254e-45
:func:`erfc` works accurately even for ludicrously large
arguments::
>>> print erfc(10**10)
4.3504398860243e-43429448190325182776
"""
@funcwrapper
def erfi(z):
r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print erfi(0)
0.0
>>> print erfi(1)
1.65042575879754
>>> print erfi(-1)
-1.65042575879754
>>> print erfi(inf)
+inf
>>> print erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> print erfi(3j)
(0.0 + 0.999977909503001j)
>>> print erf(3)
0.999977909503001
>>> print erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> print erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
**Possible issues**
The current implementation of :func:`erfi` is much less efficient
and accurate than the one for erf.
"""
return (2/sqrt(pi)*z) * sum_hyp1f1_rat((1,2),(3,2), z**2)
@funcwrapper
def erfinv(x):
r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print erfinv(0)
0.0
>>> print erfinv(1)
+inf
>>> print erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`erfinv` computes inverse values of
:func:`erf` as promised::
>>> print erf(erfinv(0.75))
0.75
>>> print erf(erfinv(-0.995))
-0.995
:func:`erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> erf(3)
mpf('0.99997790950300141455862722387041767962015229291260075')
>>> erfinv(_)
mpf('3.0')
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> print quad(erfinv, [0, 1])
0.564189583547756
>>> print 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
if x.imag or (x < -1) or (x > 1):
raise ValueError("erfinv(x) is defined only for -1 <= x <= 1")
if isnan(x): return x
if not x: return x
if x == 1: return inf
if x == -1: return -inf
if abs(x) < 0.9:
a = 0.53728*x**3 + 0.813198*x
else:
# An asymptotic formula
u = log(2/pi/(abs(x)-1)**2)
a = sign(x) * sqrt(u - log(u))/sqrt(2)
from optimization import findroot
return findroot(lambda t: erf(t)-x, a)
@funcwrapper
def npdf(x, mu=0, sigma=1):
r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print quad(npdf, [-inf, inf])
1.0
>>> print quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> print quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`ncdf`, which gives the cumulative
distribution.
"""
sigma = mpmathify(sigma)
return exp(-(x-mu)**2/(2*sigma**2)) / (sigma*sqrt(2*pi))
@funcwrapper
def ncdf(x, mu=0, sigma=1):
r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`npdf`, which gives the probability density.
Elementary properties include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print ncdf(pi, mu=pi)
0.5
>>> print ncdf(-inf)
0.0
>>> print ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> print diff(ncdf, 2)
0.053990966513188
>>> print npdf(2)
0.053990966513188
>>> print diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> print npdf(0, 1, 0.5)
0.107981933026376
"""
a = (x-mu)/(sigma*sqrt(2))
if a < 0:
return erfc(-a)/2
else:
return (1+erf(a))/2
def ei_as(a):
extra = 10
mp.dps += extra
s = k = p = 1
while abs(p) > eps:
p = (p*k)/a
s += p
k += 1
s = (s * exp(a))/a
mp.dps -= extra
return s
@funcwrapper
def ei(z):
r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
This function should not be confused with the family of related
functions denoted by `E_n` which are also called "exponential
integrals".
**Basic examples**
Some basic values and limits are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print ei(0)
-inf
>>> print ei(1)
1.89511781635594
>>> print ei(inf)
+inf
>>> print ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> print ei(-4)
-0.00377935240984891
>>> print quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> mp.dps = 50
>>> print ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> print ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`chi`, :func:`shi`,
:func:`ci`, :func:`si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> print ei(3)
9.93383257062542
>>> print chi(3) + shi(3)
9.93383257062542
>>> print ci(3j) - j*si(3j) - pi*j/2
(9.93383257062542 + 0.0j)
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> print z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> print ei(z)
0.769881289937359
For x large enough use the asymptotic expansion
ei_as(x) = exp(x)/x * Sum(k!/x^k, (k,0,inf))
k!/x^k goes as exp(f(k))
f(k) = k*log(k/(x*e)) + log(k)/2, with extremal point in
log(k/x) + 1/(2*k) = 0; therefore the smallest term of the
asympotic series is k!/x^k ~= e^(-k - 1/2)
requiring this to be equal to e^-prec one gets x ~= k ~= prec*log(2)
so that one should use ei_as(x) for x > prec*log(2)
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://www.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
if z == inf:
return z
if z == -inf:
return -mpf(0)
if not z:
return -inf
if abs(z) > mp.prec * 0.7 + 50:
r = ei_as(z)
if z.imag > 0:
r += j*pi
elif z.imag < 0:
r -= j*pi
return r
v = z*hypsum([[1,1],[1,1]],[],[],[[2,1],[2,1]],[],[],z) + \
(log(z)-log(1/z))/2 + euler
if isinstance(z, mpf) and z < 0:
return v.real
return v
@funcwrapper
def li(z):
r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Note that there is a second logarithmic integral, the Li
function, defined by
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt
This "offset logarithmic integral" can be computed via
:func:`li` using the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`polylog`.
**Examples**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 30
>>> print li(0)
0.0
>>> print li(1)
-inf
>>> print li(1)
-inf
>>> print li(2)
1.04516378011749278484458888919
>>> print findroot(li, 2)
1.45136923488338105028396848589
>>> print li(inf)
+inf
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> print li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> print ei(log(3))
2.1635885946671919729
>>> print li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> print x/log(x)
4.34294481903252e+97
>>> print li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{li}(x)`. For example,
it is known that there are exactly 1,925,320,391,606,803,968,923
prime numbers less than `10^{23}` [1]. The logarithmic integral
provides a very accurate estimate::
>>> print li(2) + li(10**23)
1.92532039161405e+21
A definite integral is::
>>> print quad(li, [0, 1])
-0.693147180559945
>>> print -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
if not z:
return z
if z == 1:
return -inf
return ei(log(z))
ci = mpfunc('ci', libhyper.mpf_ci, libhyper.mpc_ci, '')
si = mpfunc('si', libhyper.mpf_si, libhyper.mpc_si, '')
ci.__doc__ = r"""
Computes the cosine integral,
.. math ::
\mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
**Examples**
Some values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print ci(0)
-inf
>>> print ci(1)
0.3374039229009681346626462
>>> print ci(pi)
0.07366791204642548599010096
>>> print ci(inf)
0.0
>>> print ci(-inf)
(0.0 + 3.141592653589793238462643j)
>>> print ci(2+3j)
(1.408292501520849518759125 - 2.983617742029605093121118j)
The cosine integral behaves roughly like the sinc function
(see :func:`sinc`) for large real `x`::
>>> print ci(10**10)
-4.875060251748226537857298e-11
>>> print sinc(10**10)
-4.875060250875106915277943e-11
>>> print chop(limit(ci, inf))
0.0
It has infinitely many roots on the positive real axis::
>>> print findroot(ci, 1)
0.6165054856207162337971104
>>> print findroot(ci, 2)
3.384180422551186426397851
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> print -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
-0.190029749656644
>>> print ci(5)
-0.190029749656644
Some infinite series can be evaluated using the
cosine integral::
>>> print nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
-0.239811742000565
>>> print ci(1) - euler
-0.239811742000565
"""
si.__doc__ = r"""
Computes the sine integral,
.. math ::
\mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
The sine integral is thus the antiderivative of the sinc
function (see :func:`sinc`).
**Examples**
Some values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print si(0)
0.0
>>> print si(1)
0.9460830703671830149413533
>>> print si(-1)
-0.9460830703671830149413533
>>> print si(pi)
1.851937051982466170361053
>>> print si(inf)
1.570796326794896619231322
>>> print si(-inf)
-1.570796326794896619231322
>>> print si(2+3j)
(4.547513889562289219853204 + 1.399196580646054789459839j)
The sine integral approaches `\pi/2` for large real `x`::
>>> print si(10**10)
1.570796326707584656968511
>>> print pi/2
1.570796326794896619231322
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> print quad(sinc, [0, 5])
1.54993124494467
>>> print si(5)
1.54993124494467
Some infinite series can be evaluated using the
sine integral::
>>> print nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
0.946083070367183
>>> print si(1)
0.946083070367183
"""
@funcwrapper
def chi(z):
r"""
Computes the hyperbolic cosine integral, defined
in analogy with the cosine integral (see :func:`ci`) as
.. math ::
\mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
Some values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print chi(0)
-inf
>>> print chi(1)
0.8378669409802082408946786
>>> print chi(inf)
+inf
>>> print findroot(chi, 0.5)
0.5238225713898644064509583
>>> print chi(2+3j)
(-0.1683628683277204662429321 + 2.625115880451325002151688j)
"""
if not z:
return -inf
z2 = (z/2)**2
return euler + log(z) + \
z2*hypsum([[1,1],[1,1]],[],[],[[2,1],[2,1],[3,2]],[],[],z2)
@funcwrapper
def shi(z):
r"""
Computes the hyperbolic sine integral, defined
in analogy with the sine integral (see :func:`si`) as
.. math ::
\mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
Some values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print shi(0)
0.0
>>> print shi(1)
1.057250875375728514571842
>>> print shi(-1)
-1.057250875375728514571842
>>> print shi(inf)
+inf
>>> print shi(2+3j)
(-0.1931890762719198291678095 + 2.645432555362369624818525j)
"""
z2 = (z/2)**2
return z*hypsum([[1,2]],[],[],[[3,2],[3,2]],[],[],z2)
@funcwrapper
def fresnels(z):
r"""
Computes the Fresnel sine integral
.. math ::
S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print fresnels(0)
0.0
>>> print fresnels(inf)
0.5
>>> print fresnels(-inf)
-0.5
>>> print fresnels(1)
0.4382591473903547660767567
>>> print fresnels(1+2j)
(36.72546488399143842838788 + 15.58775110440458732748279j)
Comparing with the definition::
>>> print fresnels(3)
0.4963129989673750360976123
>>> print quad(lambda t: sin(pi*t**2/2), [0,3])
0.4963129989673750360976123
"""
if z == inf:
return mpf(0.5)
if z == -inf:
return mpf(-0.5)
return pi*z**3/6*hypsum([[3,4]],[],[],[[3,2],[7,4]],[],[],-pi**2*z**4/16)
@funcwrapper
def fresnelc(z):
r"""
Computes the Fresnel cosine integral
.. math ::
C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print fresnelc(0)
0.0
>>> print fresnelc(inf)
0.5
>>> print fresnelc(-inf)
-0.5
>>> print fresnelc(1)
0.7798934003768228294742064
>>> print fresnelc(1+2j)
(16.08787137412548041729489 - 36.22568799288165021578758j)
Comparing with the definition::
>>> print fresnelc(3)
0.6057207892976856295561611
>>> print quad(lambda t: cos(pi*t**2/2), [0,3])
0.6057207892976856295561611
"""
if z == inf:
return mpf(0.5)
if z == -inf:
return mpf(-0.5)
return z*hypsum([[1,4]],[],[],[[1,2],[5,4]],[],[],-pi**2*z**4/16)
@funcwrapper
def airyai(z):
r"""
Computes the Airy function `\mathrm{Ai}(x)`, which is
a solution of the Airy differential equation `y''-xy=0`.
The Ai-function behaves roughly like a slowly decaying
sine wave for `x < 0` and like a decreasing exponential for
`x > 0`.
Limits and values include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print airyai(0), 1/(3**(2/3.)*gamma(2/3.))
0.355028053887817 0.355028053887817
>>> print airyai(1)
0.135292416312881
>>> print airyai(-1)
0.535560883292352
>>> print airyai(inf)
0.0
>>> print airyai(-inf)
0.0
:func:`airyai` uses a series expansion around `x = 0`,
so it is slow for extremely large arguments. Here are
some evaluations for moderately large arguments::
>>> print airyai(-100)
0.176753393239553
>>> print airyai(100)
2.63448215208818e-291
>>> print airyai(50+50j)
(-5.31790195707456e-68 - 1.16358800377071e-67j)
>>> print airyai(-50+50j)
(1.04124253736317e+158 + 3.3475255449236e+157j)
The first negative root is::
>>> print findroot(airyai, -2)
-2.33810741045977
We can verify the differential equation::
>>> for x in [-3.4, 0, 2.5, 1+2j]:
... print abs(diff(airyai, x, 2) - x*airyai(x)) < eps
...
True
True
True
True
The Taylor series expansion around `x = 0` starts with
the following coefficients (note that every third term
is zero)::
>>> nprint(chop(taylor(airyai, 0, 5)))
[0.355028, -0.258819, 0.0, 5.91713e-2, -2.15683e-2, 0.0]
The Airy functions are a special case of Bessel functions.
For `x < 0`, we have::
>>> x = 3
>>> print airyai(-x)
-0.378814293677658
>>> p = 2*(x**1.5)/3
>>> print sqrt(x)*(besselj(1/3.,p) + besselj(-1/3.,p))/3
-0.378814293677658
"""
if z == inf or z == -inf:
return 1/z
if z.real > 2:
# cancellation: both terms are ~ 2^(z^1.5),
# result is ~ 2^(-z^1.5), so need ~2*z^1.5 extra bits
mp.prec += 2*int(z.real**1.5)
z3 = z**3 / 9
a = sum_hyp0f1_rat((2,3), z3) / (cbrt(9) * gamma(mpf(2)/3))
b = z * sum_hyp0f1_rat((4,3), z3) / (cbrt(3) * gamma(mpf(1)/3))
return a - b
@funcwrapper
def airybi(z):
r"""
Computes the Airy function `\mathrm{Bi}(x)`, which is
a solution of the Airy differential equation `y''-xy=0`.
The Bi-function behaves roughly like a slowly decaying
sine wave for `x < 0` and like an increasing exponential
for `x > 0`.
Limits and values include::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print airybi(0), 1/(3**(1/6.)*gamma(2/3.))
0.614926627446001 0.614926627446001
>>> print airybi(1)
1.20742359495287
>>> print airybi(-1)
0.103997389496945
>>> print airybi(inf)
+inf
>>> print airybi(-inf)
0.0
:func:`airyai` uses a series expansion around `x = 0`,
so it is slow for extremely large arguments. Here are
some evaluations for moderately large arguments::
>>> print airybi(-100)
0.0242738876801601
>>> print airybi(100)
6.0412239966702e+288
>>> print airybi(50+50j)
(-5.32207626732144e+63 + 1.47845029116524e+65j)
>>> print airybi(-50+50j)
(-3.3475255449236e+157 + 1.04124253736317e+158j)
The first negative root is::
>>> print findroot(airybi, -1)
-1.17371322270913
We can verify the differential equation::
>>> for x in [-3.4, 0, 2.5, 1+2j]:
... print abs(diff(airybi, x, 2) - x*airybi(x)) < eps
...
True
True
True
True
The Taylor series expansion around `x = 0` starts with
the following coefficients (note that every third term
is zero)::
>>> nprint(chop(taylor(airybi, 0, 5)))
[0.614927, 0.448288, 0.0, 0.102488, 3.73574e-2, 0.0]
The Airy functions are a special case of Bessel functions.
For `x < 0`, we have::
>>> x = 3
>>> print airybi(-x)
-0.198289626374927
>>> p = 2*(x**1.5)/3
>>> print sqrt(x/3)*(besselj(-1/3.,p) - besselj(1/3.,p))
-0.198289626374926
"""
if z == inf:
return z
if z == -inf:
return 1/z
z3 = z**3 / 9
rt = nthroot(3, 6)
a = sum_hyp0f1_rat((2,3), z3) / (rt * gamma(mpf(2)/3))
b = z * rt * sum_hyp0f1_rat((4,3), z3) / gamma(mpf(1)/3)
return a + b
ellipk = mpfunc('ellipk', libhyper.mpf_ellipk, libhyper.mpc_ellipk, '')
ellipe = mpfunc('ellipe', libhyper.mpf_ellipe, libhyper.mpc_ellipe, '')
ellipk.__doc__ = \
r"""
Evaluates the complete elliptic integral of the first kind,
`K(m)`, defined by
.. math ::
K(m) = \int_0^{\pi/2} \frac{1}{\sqrt{1-m \sin^2 t}} dt.
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
Alternatively, in terms of a hypergeometric function,
we have:
.. math ::
K(m) = \frac{\pi}{2} \,_2F_1(1/2, 1/2, 1, m)
**Examples**
Values and limits include::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print ellipk(0)
1.570796326794896619231322
>>> print ellipk(inf)
(0.0 + 0.0j)
>>> print ellipk(-inf)
0.0
>>> print ellipk(1)
+inf
>>> print ellipk(-1)
1.31102877714605990523242
>>> print ellipk(2)
(1.31102877714605990523242 - 1.31102877714605990523242j)
Verifying the defining integral and hypergeometric
representation::
>>> print ellipk(0.5)
1.85407467730137191843385
>>> print quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
1.85407467730137191843385
>>> print pi/2*hyp2f1(0.5,0.5,1,0.5)
1.85407467730137191843385
Evaluation is supported for arbitrary complex `m`::
>>> print ellipk(3+4j)
(0.9111955638049650086562171 + 0.6313342832413452438845091j)
A definite integral::
>>> print quad(ellipk, [0, 1])
2.0
"""
ellipe.__doc__ = \
r"""
Evaluates the complete elliptic integral of the second kind,
`E(m)`, defined by
.. math ::
E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} dt.
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
Alternatively, in terms of a hypergeometric function,
we have:
.. math ::
E(m) = \frac{\pi}{2} \,_2F_1(1/2, -1/2, 1, m)
**Examples**
Basic values and limits::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print ellipe(0)
1.570796326794896619231322
>>> print ellipe(1)
1.0
>>> print ellipe(-1)
1.910098894513856008952381
>>> print ellipe(2)
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
>>> print ellipe(inf)
(0.0 + +infj)
>>> print ellipe(-inf)
+inf
Verifying the defining integral and hypergeometric
representation::
>>> print ellipe(0.5)
1.350643881047675502520175
>>> print quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
1.350643881047675502520175
>>> print pi/2*hyp2f1(0.5,-0.5,1,0.5)
1.350643881047675502520175
Evaluation is supported for arbitrary complex `m`::
>>> print ellipe(0.5+0.25j)
(1.360868682163129682716687 - 0.1238733442561786843557315j)
>>> print ellipe(3+4j)
(1.499553520933346954333612 - 1.577879007912758274533309j)
A definite integral::
>>> print quad(ellipe, [0,1])
1.333333333333333333333333
"""
def agm(a, b=1):
r"""
``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
`b`, defined as the limit of the following iteration:
.. math ::
a_0 = a
b_0 = b
a_{n+1} = \frac{a_n+b_n}{2}
b_{n+1} = \sqrt{a_n b_n}
This function can be called with a single argument, computing
`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
**Examples**
It is a well-known theorem that the geometric mean of
two distinct positive numbers is less than the arithmetic
mean. It follows that the arithmetic-geometric mean lies
between the two means::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> a = mpf(3)
>>> b = mpf(4)
>>> print sqrt(a*b)
3.46410161513775
>>> print agm(a,b)
3.48202767635957
>>> print (a+b)/2
3.5
The arithmetic-geometric mean is scale-invariant::
>>> print agm(10*e, 10*pi)
29.261085515723
>>> print 10*agm(e, pi)
29.261085515723
As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
for large `x`::
>>> print agm(10**10)
643448704.760133
>>> print agm(10**50)
1.34814309345871e+48
For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
>>> print agm('0.01')
0.262166887202249
>>> print -pi/2/log('0.0025')
0.262172347753122
The arithmetic-geometric mean can also be computed for complex
numbers::
>>> print agm(3, 2+j)
(2.51055133276184 + 0.547394054060638j)
The AGM iteration converges very quickly (each step doubles
the number of correct digits), so :func:`agm` supports efficient
high-precision evaluation::
>>> mp.dps = 10000
>>> a = agm(1,2)
>>> str(a)[-10:]
'1679581912'
**Mathematical relations**
The arithmetic-geometric mean may be used to evaluate the
following two parametric definite integrals:
.. math ::
I_1 = \int_0^{\infty}
\frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
I_2 = \int_0^{\pi/2}
\frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
We have::
>>> mp.dps = 15
>>> a = 3
>>> b = 4
>>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
>>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
>>> print quad(f1, [0, inf])
0.451115405388492
>>> print quad(f2, [0, pi/2])
0.451115405388492
>>> print pi/(2*agm(a,b))
0.451115405388492
A formula for `\Gamma(1/4)`::
>>> print gamma(0.25)
3.62560990822191
>>> print sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
3.62560990822191
**Possible issues**
The branch cut chosen for complex `a` and `b` is somewhat
arbitrary.
"""
if b == 1:
return agm1(a)
a = mpmathify(a)
b = mpmathify(b)
prec, rounding = prec_rounding
if isinstance(a, mpf) and isinstance(b, mpf):
try:
v = libhyper.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
return make_mpf(v)
except ComplexResult:
pass
if isinstance(a, mpf): a = (a._mpf_, libmpf.fzero)
else: a = a._mpc_
if isinstance(b, mpf): b = (b._mpf_, libmpf.fzero)
else: b = b._mpc_
return make_mpc(libhyper.mpc_agm(a, b, prec, rounding))
agm1 = mpfunc('agm1', libhyper.mpf_agm1, libhyper.mpc_agm1,
'Fast alias for agm(1,a) = agm(a,1)')
@funcwrapper
def jacobi(n, a, b, x):
r"""
``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
case of the hypergeometric function `\,_2F_1` given by:
.. math ::
P_n^{(a,b)}(x) = {n+a \choose n}
\,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> print binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> print chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> print quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`jacobi` approximately satisfies
this equation::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> print A0, A1, A2
26560.2328981879 -21503.7641037294 -5056.46879445852
"""
return binomial(n+a,n) * hyp2f1(-n,1+n+a+b,a+1,(1-x)/2)
@funcwrapper
def legendre(n, x):
r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5):
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.57735, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> print legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> print quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> print quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
if isint(n):
n = int(n)
if x == -1:
# TODO: hyp2f1 should handle this
if isint(n):
return (-1)**(n + (n>=0)) * mpf(-1)
if not int(floor(re(n))) % 2:
return -inf
return inf
return hyp2f1(-n,n+1,1,(1-x)/2)
@funcwrapper
def chebyt(n, x):
r"""
``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
kind `T_n(x)`, defined by the identity
.. math ::
T_n(\cos x) = \cos(n x).
The Chebyshev polynomials of the first kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-1.0, 0.0, 2.0]
[0.0, -3.0, 0.0, 4.0]
[1.0, 0.0, -8.0, 0.0, 8.0]
**Orthogonality**
The Chebyshev polynomials of the first kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = 1/\sqrt{1-x^2}`::
>>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
>>> m, n = 3, 4
>>> nprint(quad(f, [-1, 1]),1)
0.0
>>> m, n = 4, 4
>>> print quad(f, [-1, 1])
1.57079632596448
"""
return hyp2f1(-n,n,0.5,(1-x)/2)
@funcwrapper
def chebyu(n, x):
r"""
``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
kind `U_n(x)`, defined by the identity
.. math ::
U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
The Chebyshev polynomials of the second kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
...
[1.0]
[0.0, 2.0]
[-1.0, 0.0, 4.0]
[0.0, -4.0, 0.0, 8.0]
[1.0, 0.0, -12.0, 0.0, 16.0]
**Orthogonality**
The Chebyshev polynomials of the second kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = \sqrt{1-x^2}`::
>>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
>>> m, n = 3, 4
>>> print quad(f, [-1, 1])
0.0
>>> m, n = 4, 4
>>> print quad(f, [-1, 1])
1.5707963267949
"""
return (n+1) * hyp2f1(-n, n+2, 1.5, (1-x)/2)
@funcwrapper
def besselj(v, x):
r"""
``besselj(n,x)`` computes the Bessel function of the first kind
`J_n(x)`. Bessel functions of the first kind are defined as
solutions of the differential equation
.. math ::
x^2 y'' + x y' + (x^2 - n^2) y = 0
which appears, among other things, when solving the radial
part of Laplace's equation in cylindrical coordinates. This
equation has two solutions for given `n`, where the
`J_n`-function is the solution that is nonsingular at `x = 0`.
For positive integer `n`, `J_n(x)` behaves roughly like a sine
(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
that decays slowly as `x \to \pm\infty`.
Generally, `J_n` is a special case of the hypergeometric
function `\,_0F_1`:
.. math ::
J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
\,_0F_1\left(n+1,-\frac{x^2}{4}\right)
**Examples**
Evaluation is supported for arbitrary arguments, and at
arbitrary precision::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print besselj(2, 1000)
-0.024777229528606
>>> print besselj(4, 0.75)
0.000801070086542314
>>> print besselj(2, 1000j)
(-2.48071721019185e+432 + 0.0j)
>>> mp.dps = 25
>>> print besselj(0.75j, 3+4j)
(-2.778118364828153309919653 - 1.5863603889018621585533j)
>>> mp.dps = 50
>>> print besselj(1, pi)
0.28461534317975275734531059968613140570981118184947
The Bessel functions of the first kind satisfy simple
symmetries around `x = 0`::
>>> mp.dps = 15
>>> nprint([besselj(n,0) for n in range(5)])
[1.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint([besselj(n,pi) for n in range(5)])
[-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
>>> nprint([besselj(n,-pi) for n in range(5)])
[-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
Roots of Bessel functions are often used::
>>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
[2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
>>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
[3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
The roots are not periodic, but the distance between successive
roots asymptotically approaches `2 \pi`. Bessel functions of
the first kind have the following normalization::
>>> print quadosc(j0, [0, inf], period=2*pi)
1.0
>>> print quadosc(j1, [0, inf], period=2*pi)
1.0
For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
trigonometric function::
>>> x = 10
>>> print besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
-0.13726373575505 -0.13726373575505
>>> print besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
-0.211708866331398 -0.211708866331398
"""
if isint(v):
v = int(v)
if isinstance(x, mpf):
return make_mpf(libhyper.mpf_besseljn(v, x._mpf_, mp.prec))
if isinstance(x, mpc):
return make_mpc(libhyper.mpc_besseljn(v, x._mpc_, mp.prec))
hx = x/2
return hx**v * hyp0f1(v+1, -hx**2) / factorial(v)
def j0(x):
"""Computes the Bessel function `J_0(x)`. See :func:`besselj`."""
return besselj(0, x)
def j1(x):
"""Computes the Bessel function `J_1(x)`. See :func:`besselj`."""
return besselj(1, x)
@funcwrapper
def bessely(n,x):
r"""
``bessely(n,x)`` computes the Bessel function of the second kind,
.. math ::
Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
For `n` an integer, this formula should be understood as a
limit.
**Examples**
Some values of `Y_n(x)`::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print bessely(0,0), bessely(1,0), bessely(2,0)
-inf -inf -inf
>>> print bessely(1, pi)
0.3588729167767189594679827
>>> print bessely(0.5, 3+4j)
(9.242861436961450520325216 - 3.085042824915332562522402j)
"""
intdist = abs(n.imag) + abs(n.real-floor(n.real+0.5))
if not intdist:
h = +eps
mp.prec *= 2
n += h
else:
mp.prec += -int(log(intdist, 2)+1)
return (besselj(n,x)*cospi(n) - besselj(-n,x))/sinpi(n)
@funcwrapper
def besseli(n,x):
r"""
``besseli(n,x)`` computes the modified Bessel function of the first
kind,
.. math ::
I_n(x) = i^{-n} J_n(ix)
**Examples**
Some values of `I_n(x)`::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print besseli(0,0)
1.0
>>> print besseli(1,0)
0.0
>>> print besseli(0,1)
1.266065877752008335598245
>>> print besseli(3.5, 2+3j)
(-0.2904369752642538144289025 - 0.4469098397654815837307006j)
For integers `n`, the following integral representation holds::
>>> mp.dps = 15
>>> n = 3
>>> x = 2.3
>>> print quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
0.349223221159309
>>> print besseli(n,x)
0.349223221159309
"""
if isint(n):
n = abs(int(n))
hx = x/2
return hx**n * hyp0f1(n+1, hx**2) / factorial(n)
@funcwrapper
def besselk(n,x):
r"""
``besseli(n,x)`` computes the modified Bessel function of the
second kind,
.. math ::
K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
For `n` an integer, this formula should be understood as a
limit.
**Examples**
Some values and limits of `K_n(x)`::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print besselk(0,0)
+inf
>>> print besselk(1,0)
+inf
>>> print besselk(0,1)
0.4210244382407083333356274
>>> print besselk(3.5, 2+3j)
(-0.02090732889633760668464128 + 0.2464022641351420167819697j)
"""
intdist = abs(n.imag) + abs(n.real-floor(n.real+0.5))
if not intdist:
h = +eps
mp.prec *= 2
n += h
else:
mp.prec += -int(log(intdist, 2)+1)
return pi*(besseli(-n,x)-besseli(n,x))/(2*sinpi(n))
def hankel1(n,x):
r"""
``hankel1(n,x)`` computes the Hankel function of the first kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(1)}(x) = J_n(x) + i Y_n(x).
**Examples**
The Hankel function is generally complex-valued::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print hankel1(2, pi)
(0.4854339326315091097054957 - 0.0999007139290278787734903j)
>>> print hankel1(3.5, pi)
(0.2340002029630507922628888 - 0.6419643823412927142424049j)
"""
return besselj(n,x) + j*bessely(n,x)
def hankel2(n,x):
r"""
``hankel2(n,x)`` computes the Hankel function of the second kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(2)}(x) = J_n(x) - i Y_n(x).
**Examples**
The Hankel function is generally complex-valued::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print hankel2(2, pi)
(0.4854339326315091097054957 + 0.0999007139290278787734903j)
>>> print hankel2(3.5, pi)
(0.2340002029630507922628888 + 0.6419643823412927142424049j)
"""
return besselj(n,x) - j*bessely(n,x)
@funcwrapper
def lambertw(z, k=0, approx=None):
r"""
The Lambert W function `W(z)` is defined as the inverse function
of `w \exp(w)`. In other words, the value of `W(z)` is such that
`z = W(z) \exp(W(z))` for any complex number `z`.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation `w \exp(w)`. All branches are supported by
:func:`lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
**Basic examples**
The Lambert W function is the inverse of `w \exp(w)`::
>>> from sympy.mpmath import *
>>> mp.dps = 35
>>> w = lambertw(1)
>>> print w
0.56714329040978387299996866221035555
>>> print w*exp(w)
1.0
Any branch gives a valid inverse::
>>> w = lambertw(1, k=3)
>>> print w # doctest: +NORMALIZE_WHITESPACE
(-2.8535817554090378072068187234910812 +
17.113535539412145912607826671159289j)
>>> print w*exp(w)
(1.0 + 3.5075477124212226194278700785075126e-36j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower `z^{z^{z^{\ldots}}}`::
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> mp.dps = 50
>>> print -lambertw(-log(0.5))/log(0.5)
0.6411857445049859844862004821148236665628209571911
**Properties**
The Lambert W function grows roughly like the natural logarithm
for large arguments::
>>> mp.dps = 15
>>> print lambertw(1000)
5.2496028524016
>>> print log(1000)
6.90775527898214
>>> print lambertw(10**100)
224.843106445119
>>> print log(10**100)
230.258509299405
The principal branch of the Lambert W function has a rational
Taylor series expansion around `z = 0`::
>>> nprint(taylor(lambertw, 0, 6), 10)
[0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
Some special values and limits are::
>>> mp.dps = 15
>>> print lambertw(0)
0.0
>>> print lambertw(1)
0.567143290409784
>>> print lambertw(e)
1.0
>>> print lambertw(inf)
+inf
>>> print lambertw(0, k=-1)
-inf
>>> print lambertw(0, k=3)
-inf
>>> print lambertw(inf, k=3)
(+inf + 18.8495559215388j)
The `k = 0` and `k = -1` branches join at `z = -1/e` where
`W(z) = -1` for both branches. Since `-1/e` can only be represented
approximately with mpmath numbers, evaluating the Lambert W function
at this point only gives `-1` approximately::
>>> mp.dps = 25
>>> print lambertw(-1/e, 0)
-0.999999999999837133022867
>>> print lambertw(-1/e, -1)
-1.00000000000016286697718
If `-1/e` happens to round in the negative direction, there might be
a small imaginary part::
>>> mp.dps = 15
>>> print lambertw(-1/e)
(-1.0 + 8.22007971511612e-9j)
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at `-1/e`. In some corner cases, :func:`lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert `w \exp(w)`, using a first-order
asymptotic approximation (`O(\log(w))` or `O(w)`) as the initial
estimate.
The definition, implementation and choice of branches is based
on Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359, available online here:
http://www.apmaths.uwo.ca/~djeffrey/Offprints/W-adv-cm.pdf
TODO: use a series expansion when extremely close to the branch point
at `-1/e` and make sure that the proper branch is chosen there
"""
if isnan(z):
return z
mp.prec += 20
# We must be extremely careful near the singularities at -1/e and 0
u = exp(-1)
if abs(z) <= u:
if not z:
# w(0,0) = 0; for all other branches we hit the pole
if not k:
return z
return -inf
if not k:
w = z
# For small real z < 0, the -1 branch behaves roughly like log(-z)
elif k == -1 and not z.imag and z.real < 0:
w = log(-z)
# Use a simple asymptotic approximation.
else:
w = log(z)
# The branches are roughly logarithmic. This approximation
# gets better for large |k|; need to check that this always
# works for k ~= -1, 0, 1.
if k: w += k * 2*pi*j
elif k == 0 and z.imag and abs(z) <= 0.6:
w = z
else:
if z == inf:
if k == 0:
return z
else:
return z + 2*k*pi*j
if z == -inf:
return (-z) + (2*k+1)*pi*j
# Simple asymptotic approximation as above
w = log(z)
if k: w += k * 2*pi*j
# Use Halley iteration to solve w*exp(w) = z
two = mpf(2)
weps = ldexp(eps, 15)
for i in xrange(100):
ew = exp(w)
wew = w*ew
wewz = wew-z
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
if abs(wn-w) < weps*abs(wn):
return wn
else:
w = wn
print "Warning: Lambert W iteration failed to converge:", z
return wn
@funcwrapper
def barnesg(z):
r"""
Evaluates the Barnes G-function, which generalizes the
superfactorial (:func:`superfac`) and by extension also the
hyperfactorial (:func:`hyperfac`) to the complex numbers
in an analogous way to how the gamma function generalizes
the ordinary factorial.
The Barnes G-function may be defined in terms of a Weierstrass
product:
.. math ::
G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
\prod_{n=1}^\infty
\left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
For positive integers `n`, we have have relation to superfactorials
`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
**Examples**
Some elementary values and limits of the Barnes G-function::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print barnesg(1), barnesg(2), barnesg(3)
1.0 1.0 1.0
>>> print barnesg(4)
2.0
>>> print barnesg(5)
12.0
>>> print barnesg(6)
288.0
>>> print barnesg(7)
34560.0
>>> print barnesg(8)
24883200.0
>>> print barnesg(inf)
+inf
>>> print barnesg(0), barnesg(-1), barnesg(-2)
0.0 0.0 0.0
Closed-form values are known for some rational arguments::
>>> print barnesg('1/2')
0.603244281209446
>>> print sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
0.603244281209446
>>> print barnesg('1/4')
0.29375596533861
>>> print nthroot(exp('3/8')/exp(catalan/pi)/
... gamma(0.25)**3/sqrt(glaisher)**9, 4)
0.29375596533861
The Barnes G-function satisfies the functional equation
`G(z+1) = \Gamma(z) G(z)`::
>>> z = pi
>>> print barnesg(z+1)
2.39292119327948
>>> print gamma(z)*barnesg(z)
2.39292119327948
The asymptotic growth rate of the Barnes G-function is related to
the Glaisher-Kinkelin constant::
>>> print limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
0.847536694177301
>>> print exp('1/12')/glaisher
0.847536694177301
The Barnes G-function can be differentiated in closed form::
>>> z = 3
>>> print diff(barnesg, z)
0.264507203401607
>>> print barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
0.264507203401607
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> print barnesg(6.5)
2548.7457695685
>>> print barnesg(-pi)
0.00535976768353037
>>> print barnesg(3+4j)
(-0.000676375932234244 - 4.42236140124728e-5j)
>>> mp.dps = 50
>>> print barnesg(1/sqrt(2))
0.81305501090451340843586085064413533788206204124732
>>> q = barnesg(10j)
>>> print q.real
0.000000000021852360840356557241543036724799812371995850552234
>>> print q.imag
-0.00000000000070035335320062304849020654215545839053210041457588
**References**
1. Whittaker & Watson, *A Course of Modern Analysis*,
Cambridge University Press, 4th edition (1927), p.264
2. http://en.wikipedia.org/wiki/Barnes_G-function
3. http://mathworld.wolfram.com/BarnesG-Function.html
"""
if isinf(z):
if z == inf:
return z
return nan
if isnan(z):
return z
if (not z.imag) and z.real <= 0 and isint(z.real):
return z*0
# Account for size (would not be needed if computing log(G))
if abs(z) > 5:
mp.dps += 2*log(abs(z),2)
# Estimate terms for asymptotic expansion
N = mp.dps // 2 + 5
G = 1
while re(z) < N:
G /= gamma(z)
z += 1
z -= 1
s = mpf(1)/12
s -= log(glaisher)
s += z*log(2*pi)/2
s += (z**2/2-mpf(1)/12)*log(z)
s -= 3*z**2/4
z2k = z2 = z**2
for k in xrange(1, N+1):
t = bernoulli(2*k+2) / (4*k*(k+1)*z2k)
if abs(t) < eps:
#print k, N # check how many terms were needed
break
z2k *= z2
s += t
#if k == N:
# print "warning: series for barnesg failed to converge"
return G*exp(s)
def superfac(z):
r"""
Computes the superfactorial, defined as the product of
consecutive factorials
.. math ::
\mathrm{sf}(n) = \prod_{k=1}^n k!
For general complex `z`, `\mathrm{sf}(z)` is defined
in terms of the Barnes G-function (see :func:`barnesg`).
**Examples**
The first few superfactorials are (OEIS A000178)::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(10):
... print n, superfac(n)
...
0 1.0
1 1.0
2 2.0
3 12.0
4 288.0
5 34560.0
6 24883200.0
7 125411328000.0
8 5.05658474496e+15
9 1.83493347225108e+21
Superfactorials grow very rapidly::
>>> print superfac(1000)
3.24570818422368e+1177245
>>> print superfac(10**10)
2.61398543581249e+467427913956904067453
Evaluation is supported for arbitrary arguments::
>>> mp.dps = 25
>>> print superfac(pi)
17.20051550121297985285333
>>> print superfac(2+3j)
(-0.005915485633199789627466468 + 0.008156449464604044948738263j)
>>> print diff(superfac, 1)
0.2645072034016070205673056
**References**
1. http://www.research.att.com/~njas/sequences/A000178
"""
return barnesg(z+2)
@funcwrapper
def hyperfac(z):
r"""
Computes the hyperfactorial, defined for integers as the product
.. math ::
H(n) = \prod_{k=1}^n k^k.
The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
It can be defined more generally in terms of the Barnes G-function (see
:func:`barnesg`) and the gamma function by the formula
.. math ::
H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
The extension to complex numbers can also be done via
the integral representation
.. math ::
H(z) = (2\pi)^{-z/2} \exp \left[
{z+1 \choose 2} + \int_0^z \log(t!)\,dt
\right].
**Examples**
The rapidly-growing sequence of hyperfactorials begins
(OEIS A002109)::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(10):
... print n, hyperfac(n)
...
0 1.0
1 1.0
2 4.0
3 108.0
4 27648.0
5 86400000.0
6 4031078400000.0
7 3.3197663987712e+18
8 5.56964379417266e+25
9 2.15779412229419e+34
Some even larger hyperfactorials are::
>>> print hyperfac(1000)
5.46458120882585e+1392926
>>> print hyperfac(10**10)
4.60408207642219e+489142638002418704309
The hyperfactorial can be evaluated for arbitrary arguments::
>>> print hyperfac(0.5)
0.880449235173423
>>> print diff(hyperfac, 1)
0.581061466795327
>>> print hyperfac(pi)
205.211134637462
>>> print hyperfac(-10+1j)
(3.01144471378225e+46 - 2.45285242480185e+46j)
The recurrence property of the hyperfactorial holds
generally::
>>> z = 3-4*j
>>> print hyperfac(z)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> print z**z * hyperfac(z-1)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z = mpf(-0.6)
>>> print chop(z**z * hyperfac(z-1))
1.28170142849352
>>> print hyperfac(z)
1.28170142849352
The hyperfactorial may also be computed using the integral
definition::
>>> z = 2.5
>>> print hyperfac(z)
15.9842119922237
>>> print (2*pi)**(-z/2)*exp(binomial(z+1,2) +
... quad(lambda t: loggamma(t+1), [0, z]))
15.9842119922237
:func:`hyperfac` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> print hyperfac(10)
215779412229418562091680268288000000000000000.0
>>> print hyperfac(1/sqrt(2))
0.89404818005227001975423476035729076375705084390942
**References**
1. http://www.research.att.com/~njas/sequences/A002109
2. http://mathworld.wolfram.com/Hyperfactorial.html
"""
# XXX: estimate needed extra bits accurately
if z == inf:
return z
if abs(z) > 5:
extra = 4*int(log(abs(z),2))
else:
extra = 0
mp.prec += extra
if not z.imag and z.real < 0 and isint(z.real):
n = int(re(z))
h = hyperfac(-n-1)
if ((n+1)//2) & 1:
h = -h
if isinstance(z, mpc):
return h + 0j
return h
zp1 = z+1
# Wrong branch cut
#v = gamma(zp1)**z
#mp.prec -= extra
#return v / barnesg(zp1)
v = exp(z*loggamma(zp1))
mp.prec -= extra
return v / barnesg(zp1)
@funcwrapper
def loggamma(z):
r"""
Computes the log-gamma function. Unlike `\ln(\Gamma(z))`, which
has infinitely many complex branch cuts, the log-gamma function
only has a single branch cut along the negative half-axis.
The functions are identical only on (and very close to) the positive
half-axis; elsewhere they differ by `2 n \pi i` (the real parts
agree)::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print loggamma(13.2), log(gamma(13.2))
20.494004194566 20.494004194566
>>> print loggamma(3+4j)
(-1.75662678460378 + 4.74266443803466j)
>>> print log(gamma(3+4j))
(-1.75662678460378 - 1.54052086914493j)
Note: this is a placeholder implementation. It is slower than
:func:`gamma`, and is in particular *not* faster than :func:`gamma`
for large arguments.
"""
a = z.real
b = z.imag
if not b and a > 0:
return log(gamma(z))
u = arg(z)
w = log(gamma(z))
if b:
gi = -b - u/2 + a*u + b*log(abs(z))
n = floor((gi-w.imag)/(2*pi)+0.5) * (2*pi)
return w + n*j
elif a < 0:
n = int(floor(a))
w += (n-(n%2))*pi*j
return w
@funcwrapper
def siegeltheta(t):
r"""
Computes the Riemann-Siegel theta function,
.. math ::
\theta(t) = \frac{
\log\Gamma\left(\frac{1+2it}{4}\right) -
\log\Gamma\left(\frac{1-2it}{4}\right)
}{2i} - \frac{\log \pi}{2} t.
The Riemann-Siegel theta function is important in
providing the phase factor for the Z-function
(see :func:`siegelz`). Evaluation is supported for real and
complex arguments::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print siegeltheta(0)
0.0
>>> print siegeltheta(inf)
+inf
>>> print siegeltheta(-inf)
-inf
>>> print siegeltheta(1)
-1.767547952812290388302216
>>> print siegeltheta(10+0.25j)
(-3.068638039426838572528867 + 0.05804937947429712998395177j)
The Riemann-Siegel theta function has odd symmetry around `t = 0`,
two local extreme points and three real roots including 0 (located
symmetrically)::
>>> nprint(chop(taylor(siegeltheta, 0, 5)))
[0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
>>> print findroot(diffun(siegeltheta), 7)
6.28983598883690277966509
>>> print findroot(siegeltheta, 20)
17.84559954041086081682634
For large `t`, there is a famous asymptotic formula
for `\theta(t)`, to first order given by::
>>> t = mpf(10**6)
>>> print siegeltheta(t)
5488816.353078403444882823
>>> print -t*log(2*pi/t)/2-t/2
5488816.745777464310273645
"""
if t.imag:
# XXX: cancellation occurs
a = loggamma(0.25+0.5j*t)
b = loggamma(0.25-0.5j*t)
return -log(pi)/2*t - 0.5j*(a-b)
else:
if isinf(t):
return t
return loggamma(0.25+0.5j*t).imag - log(pi)/2*t
@funcwrapper
def grampoint(n):
r"""
Gives the `n`-th Gram point `g_n`, defined as the solution
to the equation `\theta(g_n) = \pi n` where `\theta(t)`
is the Riemann-Siegel theta function (:func:`siegeltheta`).
The first few Gram points are::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print grampoint(0)
17.84559954041086081682634
>>> print grampoint(1)
23.17028270124630927899664
>>> print grampoint(2)
27.67018221781633796093849
>>> print grampoint(3)
31.71797995476405317955149
Checking the definition::
>>> print siegeltheta(grampoint(3))
9.42477796076937971538793
>>> print 3*pi
9.42477796076937971538793
A large Gram point::
>>> print grampoint(10**10)
3293531632.728335454561153
Gram points are useful when studying the Z-function
(:func:`siegelz`). See the documentation of that function
for additional examples.
:func:`grampoint` can solve the defining equation for
nonintegral `n`. There is a fixed point where `g(x) = x`::
>>> print findroot(lambda x: grampoint(x) - x, 10000)
9146.698193171459265866198
**References**
1. http://mathworld.wolfram.com/GramPoint.html
"""
# Asymptotic expansion, from
# http://mathworld.wolfram.com/GramPoint.html
g = 2*pi*exp(1+lambertw((8*n+1)/(8*e)))
from optimization import findroot
return findroot(lambda t: siegeltheta(t)-pi*n, g)
@funcwrapper
def siegelz(t):
r"""
Computes the Z-function, also known as the Riemann-Siegel Z function,
.. math ::
Z(t) = e^{i \theta(t)} \zeta(1/2+it)
where `\zeta(s)` is the Riemann zeta function (:func:`zeta`)
and where `\theta(t)` denotes the Riemann-Siegel theta function
(see :func:`siegeltheta`).
Evaluation is supported for real and complex arguments::
>>> from sympy.mpmath import *
>>> mp.dps = 25
>>> print siegelz(1)
-0.7363054628673177346778998
>>> print siegelz(3+4j)
(-0.1852895764366314976003936 - 0.2773099198055652246992479j)
The Z-function has a Maclaurin expansion::
>>> nprint(chop(taylor(siegelz, 0, 4)))
[-1.46035, 0.0, 2.73588, 0.0, -8.39357]
The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
critical strip `s = 1/2+it` (i.e. for real arguments `t`
to `Z`). Its zeros coincide with those of the Riemann zeta
function::
>>> print findroot(siegelz, 14)
14.13472514173469379045725
>>> print findroot(siegelz, 20)
21.02203963877155499262848
>>> print findroot(zeta, 0.5+14j)
(0.5 + 14.13472514173469379045725j)
>>> print findroot(zeta, 0.5+20j)
(0.5 + 21.02203963877155499262848j)
Since the Z-function is real-valued on the critical strip
(and unlike `|\zeta(s)|` analytic), it is useful for
investigating the zeros of the Riemann zeta function.
For example, one can use a root-finding algorithm based
on sign changes::
>>> print findroot(siegelz, [100, 200], solver='bisect')
176.4414342977104188888926
To locate roots, Gram points `g_n` which can be computed
by :func:`grampoint` are useful. If `(-1)^n Z(g_n)` is
positive for two consecutive `n`, then `Z(t)` must have
a zero between those points::
>>> g10 = grampoint(10)
>>> g11 = grampoint(11)
>>> (-1)**10 * siegelz(g10) > 0
True
>>> (-1)**11 * siegelz(g11) > 0
True
>>> print findroot(siegelz, [g10, g11], solver='bisect')
56.44624769706339480436776
>>> print g10, g11
54.67523744685325626632663 57.54516517954725443703014
"""
v = exp(j*siegeltheta(t))*zeta(0.5+j*t)
if isinstance(t, mpf):
return v.real
return v
@funcwrapper
def bernpoly(n, z):
"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-3.33333e-2, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`bernoulli`)::
>>> print bernpoly(12, 0), bernoulli(12)
-0.253113553113553 -0.253113553113553
>>> print bernpoly(13, 0), bernoulli(13)
0.0 0.0
"""
n = int(n)
assert n >= 0
# XXX: optimize
return sum(binomial(n,k)*bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
# TODO: this should be implemented low-level
def polylog_series(s, z):
tol = +eps
l = mpf(0)
k = 1
zk = z
while 1:
term = zk / k**s
l += term
if abs(term) < tol:
break
zk *= z
k += 1
return l
def polylog_continuation(n, z):
if n < 0:
return z*0
a = -(2*pi*j)**n/fac(n) * bernpoly(n, log(z)/(2*pi*j))
if isinstance(z, mpf) and z < 0:
a = a.real
if z.imag < 0 or (z.imag == 0 and z.real >= 1):
a -= 2*pi*j*log(z)**(n-1)/fac(n-1)
return a
def polylog_unitcircle(n, z):
tol = +eps
if n > 1:
l = mpf(0)
logz = log(z)
logmz = mpf(1)
m = 0
while 1:
if (n-m) != 1:
term = zeta(n-m) * logmz / fac(m)
if term and abs(term) < tol:
break
l += term
logmz *= logz
m += 1
l += log(z)**(n-1)/fac(n-1)*(harmonic(n-1)-log(-log(z)))
elif n < 1: # else
l = fac(-n)*(-log(z))**(n-1)
logz = log(z)
logkz = mpf(1)
k = 0
while 1:
b = bernoulli(k-n+1)
if b:
term = b*logkz/(fac(k)*(k-n+1))
if abs(term) < tol:
break
l -= term
logkz *= logz
k += 1
else:
raise ValueError
if isinstance(z, mpf) and z < 0:
l = l.real
return l
@funcwrapper
def polylog(s, z):
r"""
Computes the polylogarithm, defined by the sum
.. math ::
\mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
This series is convergent only for `|z| < 1`, so elsewhere
the analytic continuation is implied.
The polylogarithm should not be confused with the logarithmic
integral (also denoted by Li or li), which is implemented
as :func:`li`.
**Examples**
The polylogarithm satisfies a huge number of functional identities.
A sample of polylogarithm evaluations is shown below::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> print polylog(1,0.5), log(2)
0.693147180559945 0.693147180559945
>>> print polylog(2,0.5), (pi**2-6*log(2)**2)/12
0.582240526465012 0.582240526465012
>>> print polylog(2,-phi), -log(phi)**2-pi**2/10
-1.21852526068613 -1.21852526068613
>>> print polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
0.53721319360804 0.53721319360804
:func:`polylog` can evaluate the analytic continuation of the
polylogarithm when `s` is an integer::
>>> print polylog(2, 10)
(0.536301287357863 - 7.23378441241546j)
>>> print polylog(2, -10)
-4.1982778868581
>>> print polylog(2, 10j)
(-3.05968879432873 + 3.71678149306807j)
>>> print polylog(-2, 10)
-0.150891632373114
>>> print polylog(-2, -10)
0.067618332081142
>>> print polylog(-2, 10j)
(0.0384353698579347 + 0.0912451798066779j)
Some more examples, with arguments on the unit circle (note that
the series definition cannot be used for computation here)::
>>> print polylog(2,j)
(-0.205616758356028 + 0.915965594177219j)
>>> print j*catalan-pi**2/48
(-0.205616758356028 + 0.915965594177219j)
>>> print polylog(3,exp(2*pi*j/3))
(-0.534247512515375 + 0.765587078525922j)
>>> print -4*zeta(3)/9 + 2*j*pi**3/81
(-0.534247512515375 + 0.765587078525921j)
Polylogarithms of different order are related by integration
and differentiation::
>>> s, z = 3, 0.5
>>> print polylog(s+1, z)
0.517479061673899
>>> print quad(lambda t: polylog(s,t)/t, [0, z])
0.517479061673899
>>> print z*diff(lambda t: polylog(s+2,t), z)
0.517479061673899
Taylor series expansions around `z = 0` are::
>>> for n in range(-3, 4):
... nprint(taylor(lambda x: polylog(n,x), 0, 5))
...
[0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
[0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
[0.0, 1.0, 0.25, 0.111111, 6.25e-2, 4.0e-2]
[0.0, 1.0, 0.125, 3.7037e-2, 1.5625e-2, 8.0e-3]
The series defining the polylogarithm is simultaneously
a Taylor series and an L-series. For certain values of `z`, the
polylogarithm reduces to a pure zeta function::
>>> print polylog(pi, 1), zeta(pi)
1.17624173838258 1.17624173838258
>>> print polylog(pi, -1), -altzeta(pi)
-0.909670702980385 -0.909670702980385
Evaluation for arbitrary, nonintegral `s` is supported
for `z` within the unit circle:
>>> print polylog(3+4j, 0.25)
(0.24258605789446 - 0.00222938275488344j)
>>> print nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
(0.24258605789446 - 0.00222938275488344j)
**References**
1. Richard Crandall, "Note on fast polylogarithm computation"
http://people.reed.edu/~crandall/papers/Polylog.pdf
2. http://en.wikipedia.org/wiki/Polylogarithm
3. http://mathworld.wolfram.com/Polylogarithm.html
"""
if z == 1:
return zeta(s)
if z == -1:
return -altzeta(s)
if s == 0:
return z/(1-z)
if s == 1:
return -log(1-z)
if s == -1:
return z/(1-z)**2
if abs(z) <= 0.75 or (not isint(s) and abs(z) < 0.99):
return polylog_series(s, z)
if abs(z) >= 1.4 and isint(s):
return (-1)**(s+1)*polylog_series(s, 1/z) + polylog_continuation(s, z)
if isint(s):
return polylog_unitcircle(int(s), z)
raise NotImplementedError("polylog for arbitrary s and z")
# This could perhaps be used in some cases
#from quadrature import quad
#return quad(lambda t: t**(s-1)/(exp(t)/z-1),[0,inf])/gamma(s)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
gnulinooks/sympy
|
sympy/mpmath/functions.py
|
Python
|
bsd-3-clause
| 166,515
|
[
"Gaussian"
] |
9da3d36483a990e4b6ee2708f6c9fdf093965c3d22f8c44db8faa152945f5093
|
################################################################################
#
# Copyright (c) 2012 The MadGraph Development team and Contributors
#
# This file is a part of the MadGraph 5 project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph license which should accompany this
# distribution.
#
# For more information, please visit: http://madgraph.phys.ucl.ac.be
#
################################################################################
""" This part is not part of the UFO Model but only of MG5 suite.
This files defines how the restrict card can be build automatically """
import models.build_restriction_lib as build_restrict_lib
all_categories = []
first_category = build_restrict_lib.Category('sm customization')
all_categories.append(first_category)
first_category.add_options(name='light mass = 0 (u d s c e mu)', # name
default=True, # default
rules=[('MASS',[1], 0.0),
('MASS',[2], 0.0),
('MASS',[3], 0.0),
('MASS',[11], 0.0),
('MASS',[13], 0.0)]
)
first_category.add_options(name='b mass = 0',
default=False,
rules=[('MASS',[5], 0.0)]
)
first_category.add_options(name='tau mass = 0',
default=False,
rules=[('MASS',[15], 0.0)]
)
sec_category = build_restrict_lib.Category('mssm customization')
all_categories.append(sec_category)
sec_category.add_options(name='diagonal usqmix matrices',
default=False, # default
rules=[('USQMIX',[1,1], 1.0),
('USQMIX',[2,2], 1.0),
('USQMIX',[3,3], 1.0),
('USQMIX',[4,4], 1.0),
('USQMIX',[5,5], 1.0),
('USQMIX',[6,6], 1.0),
('USQMIX',[3,6], 0.0),
('USQMIX',[6,3], 0.0)]
)
sec_category.add_options(name='diagonal dsqmix matrices',
default=False, # default
rules=[('DSQMIX',[1,1], 1.0),
('DSQMIX',[2,2], 1.0),
('DSQMIX',[3,3], 1.0),
('DSQMIX',[4,4], 1.0),
('DSQMIX',[5,5], 1.0),
('DSQMIX',[6,6], 1.0),
('DSQMIX',[3,6], 0.0),
('DSQMIX',[6,3], 0.0)]
)
sec_category.add_options(name='diagonal selmix matrices',
default=False, # default
rules=[('SELMIX',[1,1], 1.0),
('SELMIX',[2,2], 1.0),
('SELMIX',[3,3], 1.0),
('SELMIX',[4,4], 1.0),
('SELMIX',[5,5], 1.0),
('SELMIX',[6,6], 1.0),
('SELMIX',[3,6], 0.0),
('SELMIX',[6,3], 0.0)]
)
|
cms-externals/sherpa
|
Examples/UFO_MSSM/MSSM/build_restrict.py
|
Python
|
gpl-3.0
| 3,624
|
[
"VisIt"
] |
3f3e74a186d66137fb903453705c6ace4ccb5430600ba7c1c63869fb973a0429
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Analysis classes for batteries
"""
import math
from collections import defaultdict
import scipy.constants as const
from pymatgen.core.periodic_table import Element, Species
from pymatgen.core.structure import Composition
__author__ = "Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__credits__ = ["Shyue Ping Ong", "Geoffroy Hautier"]
__version__ = "1.0"
__maintainer__ = "Anubhav Jain"
__email__ = "[email protected]"
__date__ = "Sep 20, 2011"
EV_PER_ATOM_TO_J_PER_MOL = const.e * const.N_A
ELECTRON_TO_AMPERE_HOURS = EV_PER_ATOM_TO_J_PER_MOL / 3600
class BatteryAnalyzer:
"""
A suite of methods for starting with an oxidized structure and determining its potential as a battery
"""
def __init__(self, struc_oxid, cation="Li"):
"""
Pass in a structure for analysis
Arguments:
struc_oxid: a Structure object; oxidation states *must* be assigned for this structure; disordered
structures should be OK
cation: a String symbol or Element for the cation. It must be positively charged, but can be 1+/2+/3+ etc.
"""
for site in struc_oxid:
if not hasattr(site.specie, "oxi_state"):
raise ValueError("BatteryAnalyzer requires oxidation states assigned to structure!")
self.struc_oxid = struc_oxid
self.comp = self.struc_oxid.composition # shortcut for later
if not isinstance(cation, Element):
self.cation = Element(cation)
self.cation_charge = self.cation.max_oxidation_state
@property
def max_cation_removal(self):
"""
Maximum number of cation A that can be removed while maintaining charge-balance.
Returns:
integer amount of cation. Depends on cell size (this is an 'extrinsic' function!)
"""
# how much 'spare charge' is left in the redox metals for oxidation?
oxid_pot = sum(
(Element(spec.symbol).max_oxidation_state - spec.oxi_state) * self.comp[spec]
for spec in self.comp
if is_redox_active_intercalation(Element(spec.symbol))
)
oxid_limit = oxid_pot / self.cation_charge
# the number of A that exist in the structure for removal
num_cation = self.comp[Species(self.cation.symbol, self.cation_charge)]
return min(oxid_limit, num_cation)
@property
def max_cation_insertion(self):
"""
Maximum number of cation A that can be inserted while maintaining charge-balance.
No consideration is given to whether there (geometrically speaking) are Li sites to actually accommodate the
extra Li.
Returns:
integer amount of cation. Depends on cell size (this is an 'extrinsic' function!)
"""
# how much 'spare charge' is left in the redox metals for reduction?
lowest_oxid = defaultdict(lambda: 2, {"Cu": 1}) # only Cu can go down to 1+
oxid_pot = sum(
(spec.oxi_state - min(e for e in Element(spec.symbol).oxidation_states if e >= lowest_oxid[spec.symbol]))
* self.comp[spec]
for spec in self.comp
if is_redox_active_intercalation(Element(spec.symbol))
)
return oxid_pot / self.cation_charge
def _get_max_cap_ah(self, remove, insert):
"""
Give max capacity in mAh for inserting and removing a charged cation
This method does not normalize the capacity and intended as a helper method
"""
num_cations = 0
if remove:
num_cations += self.max_cation_removal
if insert:
num_cations += self.max_cation_insertion
return num_cations * self.cation_charge * ELECTRON_TO_AMPERE_HOURS
def get_max_capgrav(self, remove=True, insert=True):
"""
Give max capacity in mAh/g for inserting and removing a charged cation
Note that the weight is normalized to the most lithiated state,
thus removal of 1 Li from LiFePO4 gives the same capacity as insertion of 1 Li into FePO4.
Args:
remove: (bool) whether to allow cation removal
insert: (bool) whether to allow cation insertion
Returns:
max grav capacity in mAh/g
"""
weight = self.comp.weight
if insert:
weight += self.max_cation_insertion * self.cation.atomic_mass
return self._get_max_cap_ah(remove, insert) / (weight / 1000)
def get_max_capvol(self, remove=True, insert=True, volume=None):
"""
Give max capacity in mAh/cc for inserting and removing a charged cation into base structure.
Args:
remove: (bool) whether to allow cation removal
insert: (bool) whether to allow cation insertion
volume: (float) volume to use for normalization (default=volume of initial structure)
Returns:
max vol capacity in mAh/cc
"""
vol = volume if volume else self.struc_oxid.volume
return self._get_max_cap_ah(remove, insert) * 1000 * 1e24 / (vol * const.N_A)
def get_removals_int_oxid(self):
"""
Returns a set of delithiation steps, e.g. set([1.0 2.0 4.0]) etc. in order to
produce integer oxidation states of the redox metals.
If multiple redox metals are present, all combinations of reduction/oxidation are tested.
Note that having more than 3 redox metals will likely slow down the algorithm.
Examples:
LiFePO4 will return [1.0]
Li4Fe3Mn1(PO4)4 will return [1.0, 2.0, 3.0, 4.0])
Li6V4(PO4)6 will return [4.0, 6.0]) *note that this example is not normalized*
Returns:
array of integer cation removals. If you double the unit cell, your answers will be twice as large!
"""
# the elements that can possibly be oxidized
oxid_els = [Element(spec.symbol) for spec in self.comp if is_redox_active_intercalation(spec)]
numa = set()
for oxid_el in oxid_els:
numa = numa.union(self._get_int_removals_helper(self.comp.copy(), oxid_el, oxid_els, numa))
# convert from num A in structure to num A removed
num_cation = self.comp[Species(self.cation.symbol, self.cation_charge)]
return {num_cation - a for a in numa}
def _get_int_removals_helper(self, spec_amts_oxi, oxid_el, oxid_els, numa):
"""
This is a helper method for get_removals_int_oxid!
Args:
spec_amts_oxi - a dict of species to their amounts in the structure
oxid_el - the element to oxidize
oxid_els - the full list of elements that might be oxidized
numa - a running set of numbers of A cation at integer oxidation steps
Returns:
a set of numbers A; steps for for oxidizing oxid_el first, then the other oxid_els in this list
"""
# If Mn is the oxid_el, we have a mixture of Mn2+, Mn3+, determine the minimum oxidation state for Mn
# this is the state we want to oxidize!
oxid_old = min(spec.oxi_state for spec in spec_amts_oxi if spec.symbol == oxid_el.symbol)
oxid_new = math.floor(oxid_old + 1)
# if this is not a valid solution, break out of here and don't add anything to the list
if oxid_new > oxid_el.max_oxidation_state:
return numa
# update the spec_amts_oxi map to reflect that the oxidation took place
spec_old = Species(oxid_el.symbol, oxid_old)
spec_new = Species(oxid_el.symbol, oxid_new)
specamt = spec_amts_oxi[spec_old]
spec_amts_oxi = {sp: amt for sp, amt in spec_amts_oxi.items() if sp != spec_old}
spec_amts_oxi[spec_new] = specamt
spec_amts_oxi = Composition(spec_amts_oxi)
# determine the amount of cation A in the structure needed for charge balance and add it to the list
oxi_noA = sum(
spec.oxi_state * spec_amts_oxi[spec] for spec in spec_amts_oxi if spec.symbol not in self.cation.symbol
)
a = max(0, -oxi_noA / self.cation_charge)
numa = numa.union({a})
# recursively try the other oxidation states
if a == 0:
return numa
for ox in oxid_els:
numa = numa.union(self._get_int_removals_helper(spec_amts_oxi.copy(), ox, oxid_els, numa))
return numa
def is_redox_active_intercalation(element):
"""
True if element is redox active and interesting for intercalation materials
Args:
element: Element object
"""
ns = [
"Ti",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"Cu",
"Nb",
"Mo",
"W",
"Sb",
"Sn",
"Bi",
]
return element.symbol in ns
|
vorwerkc/pymatgen
|
pymatgen/apps/battery/analyzer.py
|
Python
|
mit
| 8,942
|
[
"pymatgen"
] |
c771ce5efde01c889efe773aa67c750e4c34e8bfb006acce36c3986613ad2376
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Seasonal model."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import docstring_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.sts.internal import util as sts_util
from tensorflow_probability.python.sts.structural_time_series import Parameter
from tensorflow_probability.python.sts.structural_time_series import StructuralTimeSeries
seasonal_init_args = """
Args:
num_timesteps: Scalar `int` `Tensor` number of timesteps to model
with this distribution.
num_seasons: Scalar Python `int` number of seasons.
drift_scale: Scalar (any additional dimensions are treated as batch
dimensions) `float` `Tensor` indicating the standard deviation of the
change in effect between consecutive occurrences of a given season.
This is assumed to be the same for all seasons.
initial_state_prior: instance of `tfd.MultivariateNormal`
representing the prior distribution on latent states; must
have event shape `[num_seasons]`.
observation_noise_scale: Scalar (any additional dimensions are
treated as batch dimensions) `float` `Tensor` indicating the standard
deviation of the observation noise.
Default value: 0.
num_steps_per_season: Python `int` number of steps in each
season. This may be either a scalar (shape `[]`), in which case all
seasons have the same length, or a NumPy array of shape `[num_seasons]`,
in which seasons have different length, but remain constant around
different cycles, or a NumPy array of shape `[num_cycles, num_seasons]`,
in which num_steps_per_season for each season also varies in different
cycle (e.g., a 4 years cycle with leap day).
Default value: 1.
name: Python `str` name prefixed to ops created by this class.
Default value: "SeasonalStateSpaceModel".
**linear_gaussian_ssm_kwargs: Optional additional keyword arguments to
to the base `tfd.LinearGaussianStateSpaceModel` constructor.
Raises:
ValueError: if `num_steps_per_season` has invalid shape (neither
scalar nor `[num_seasons]`).
"""
class SeasonalStateSpaceModel(tfd.LinearGaussianStateSpaceModel):
"""State space model for a seasonal effect.
A state space model (SSM) posits a set of latent (unobserved) variables that
evolve over time with dynamics specified by a probabilistic transition model
`p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an
observation model conditioned on the current state, `p(x[t] | z[t])`. The
special case where both the transition and observation models are Gaussians
with mean specified as a linear function of the inputs, is known as a linear
Gaussian state space model and supports tractable exact probabilistic
calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for
details.
A seasonal effect model is a special case of a linear Gaussian SSM. The
latent states represent an unknown effect from each of several 'seasons';
these are generally not meteorological seasons, but represent regular
recurring patterns such as hour-of-day or day-of-week effects. The effect of
each season drifts from one occurrence to the next, following a Gaussian
random walk:
```python
effects[season, occurrence[i]] = (
effects[season, occurrence[i-1]] + Normal(loc=0., scale=drift_scale))
```
The latent state has dimension `num_seasons`, containing one effect for each
seasonal component. The parameters `drift_scale` and
`observation_noise_scale` are each (a batch of) scalars. The batch shape of
this `Distribution` is the broadcast batch shape of these parameters and of
the `initial_state_prior`.
Note: there is no requirement that the effects sum to zero.
#### Mathematical Details
The seasonal effect model implements a
`tfp.distributions.LinearGaussianStateSpaceModel` with
`latent_size = num_seasons` and `observation_size = 1`. The latent state
is organized so that the *current* seasonal effect is always in the first
(zeroth) dimension. The transition model rotates the latent state to shift
to a new effect at the end of each season:
```
transition_matrix[t] = (permutation_matrix([1, 2, ..., num_seasons-1, 0])
if season_is_changing(t)
else eye(num_seasons)
transition_noise[t] ~ Normal(loc=0., scale_diag=(
[drift_scale, 0, ..., 0]
if season_is_changing(t)
else [0, 0, ..., 0]))
```
where `season_is_changing(t)` is `True` if ``t `mod`
sum(num_steps_per_season)`` is in the set of final days for each season,
given by `cumsum(num_steps_per_season) - 1`. The observation model always
picks out the effect for the current season, i.e., the first element of
the latent state:
```
observation_matrix = [[1., 0., ..., 0.]]
observation_noise ~ Normal(loc=0, scale=observation_noise_scale)
```
#### Examples
A state-space model with day-of-week seasonality on hourly data:
```python
day_of_week = SeasonalStateSpaceModel(
num_timesteps=30,
num_seasons=7,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([7], dtype=tf.float32),
num_steps_per_season=24)
```
A model with basic month-of-year seasonality on daily data, demonstrating
seasons of varying length:
```python
month_of_year = SeasonalStateSpaceModel(
num_timesteps=2 * 365, # 2 years
num_seasons=12,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([12], dtype=tf.float32)),
num_steps_per_season=[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
initial_step=22)
```
Note that we've used `initial_step=22` to denote that the model begins
on January 23 (steps are zero-indexed). This version works over time periods
not involving a leap year. A general implementation of month-of-year
seasonality would require additional logic:
```python
num_days_per_month = np.array(
[[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 29, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31], # year with leap day
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31]])
month_of_year = SeasonalStateSpaceModel(
num_timesteps=4 * 365 + 2, # 8 years with leap days
num_seasons=12,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([12], dtype=tf.float32)),
num_steps_per_season=num_days_per_month,
initial_step=22)
```
"""
@docstring_util.expand_docstring(seasonal_init_args=seasonal_init_args)
def __init__(self,
num_timesteps,
num_seasons,
drift_scale,
initial_state_prior,
observation_noise_scale=0.,
num_steps_per_season=1,
name=None,
**linear_gaussian_ssm_kwargs): # pylint: disable=g-doc-args
"""Build a seasonal effect state space model.
{seasonal_init_args}
"""
parameters = dict(locals())
parameters.update(linear_gaussian_ssm_kwargs)
del parameters['linear_gaussian_ssm_kwargs']
with tf.name_scope(name or 'SeasonalStateSpaceModel') as name:
# The initial state prior determines the dtype of sampled values.
# Other model parameters must have the same dtype.
dtype = initial_state_prior.dtype
drift_scale = tf.convert_to_tensor(
value=drift_scale, name='drift_scale', dtype=dtype)
observation_noise_scale = tf.convert_to_tensor(
value=observation_noise_scale,
name='observation_noise_scale',
dtype=dtype)
# Coerce `num_steps_per_season` to a canonical form, an array of
# `num_seasons` integers.
num_steps_per_season = np.squeeze(np.asarray(num_steps_per_season))
if num_steps_per_season.ndim == 0: # scalar case
num_steps_per_season = np.tile(num_steps_per_season, num_seasons)
elif ((num_steps_per_season.ndim <= 2) # 1D and 2D case
and (num_steps_per_season.shape[-1] != num_seasons)):
raise ValueError('num_steps_per_season must either be scalar (shape [])'
' or have the last dimension equal to [num_seasons] = '
'[{}] (saw: shape {})'.format(
num_seasons, num_steps_per_season.shape))
is_last_day_of_season = build_is_last_day_of_season(num_steps_per_season)
seasonal_transition_matrix = build_seasonal_transition_matrix(
num_seasons=num_seasons,
is_last_day_of_season=is_last_day_of_season,
dtype=dtype)
seasonal_transition_noise = build_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season)
observation_matrix = tf.concat([
tf.ones([1, 1], dtype=dtype),
tf.zeros([1, num_seasons-1], dtype=dtype)], axis=-1)
self._drift_scale = drift_scale
self._observation_noise_scale = observation_noise_scale
self._num_seasons = num_seasons
self._num_steps_per_season = num_steps_per_season
super(SeasonalStateSpaceModel, self).__init__(
num_timesteps=num_timesteps,
transition_matrix=seasonal_transition_matrix,
transition_noise=seasonal_transition_noise,
observation_matrix=observation_matrix,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=observation_noise_scale[..., tf.newaxis]),
initial_state_prior=initial_state_prior,
name=name,
**linear_gaussian_ssm_kwargs)
self._parameters = parameters
@property
def drift_scale(self):
"""Standard deviation of the drift in effects between seasonal cycles."""
return self._drift_scale
@property
def observation_noise_scale(self):
"""Standard deviation of the observation noise."""
return self._observation_noise_scale
@property
def num_seasons(self):
"""Number of seasons."""
return self._num_seasons
@property
def num_steps_per_season(self):
"""Number of steps in each season."""
return self._num_steps_per_season
class ConstrainedSeasonalStateSpaceModel(tfd.LinearGaussianStateSpaceModel):
"""Seasonal state space model with effects constrained to sum to zero.
See `SeasonalStateSpaceModel` for background.
#### Mathematical details
The constrained model implements a reparameterization of the
naive `SeasonalStateSpaceModel`. Instead of directly representing the
seasonal effects in the latent space, the latent space of the constrained
model represents the difference between each effect and the mean effect.
The following discussion assumes familiarity with the mathematical details
of `SeasonalStateSpaceModel`.
*Reparameterization and constraints*: let the seasonal effects at a given
timestep be `E = [e_1, ..., e_N]`. The difference between each effect `e_i`
and the mean effect is `z_i = e_i - sum_i(e_i)/N`. By itself, this
transformation is not invertible because recovering the absolute effects
requires that we know the mean as well. To fix this, we'll define
`z_N = sum_i(e_i)/N` as the mean effect. It's easy to see that this is
invertible: given the mean effect and the differences of the first `N - 1`
effects from the mean, it's easy to solve for all `N` effects. Formally,
we've defined the invertible linear reparameterization `Z = R E`, where
```
R = [1 - 1/N, -1/N, ..., -1/N
-1/N, 1 - 1/N, ..., -1/N,
...
1/N, 1/N, ..., 1/N]
```
represents the change of basis from 'effect coordinates' E to
'residual coordinates' Z. The `Z`s form the latent space of the
`ConstrainedSeasonalStateSpaceModel`.
To constrain the mean effect `z_N` to zero, we fix the prior to zero,
`p(z_N) ~ N(0., 0)`, and after the transition at each timestep we project
`z_N` back to zero. Note that this projection is linear: to set the Nth
dimension to zero, we simply multiply by the identity matrix with a missing
element in the bottom right, i.e., `Z_constrained = P Z`,
where `P = eye(N) - scatter((N-1, N-1), 1)`.
*Model*: concretely, suppose a naive seasonal effect model has initial state
prior `N(m, S)`, transition matrix `F` and noise covariance
`Q`, and observation matrix `H`. Then the corresponding constrained seasonal
effect model has initial state prior `N(P R m, P R S R' P')`,
transition matrix `P R F R^-1` and noise covariance `F R Q R' F'`, and
observation matrix `H R^-1`, where the change-of-basis matrix `R` and
constraint projection matrix `P` are as defined above. This follows
directly from applying the reparameterization `Z = R E`, and then enforcing
the zero-sum constraint on the prior and transition noise covariances.
In practice, because the sum of effects `z_N` is constrained to be zero, it
will never contribute a term to any linear operation on the latent space,
so we can drop that dimension from the model entirely.
`ConstrainedSeasonalStateSpaceModel` does this, so that it implements the
`N - 1` dimension latent space `z_1, ..., z_[N-1]`.
Note that since we constrained the mean effect to be zero, the latent
`z_i`'s now recover their interpretation as the *actual* effects,
`z_i = e_i` for `i = `1, ..., N - 1`, even though they were originally
defined as residuals. The `N`th effect is represented only implicitly, as
the nonzero mean of the first `N - 1` effects. Although the computational
represention is not symmetric across all `N` effects, we derived the
`ConstrainedSeasonalStateSpaceModel` by starting with a symmetric
representation and imposing only a symmetric constraint (the zero-sum
constraint), so the probability model remains symmetric over all `N`
seasonal effects.
#### Examples
A constrained state-space model with day-of-week seasonality on hourly data:
```python
day_of_week = ConstrainedSeasonalStateSpaceModel(
num_timesteps=30,
num_seasons=7,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([7-1], dtype=tf.float32)),
num_steps_per_season=24)
```
A model with basic month-of-year seasonality on daily data, demonstrating
seasons of varying length:
```python
month_of_year = ConstrainedSeasonalStateSpaceModel(
num_timesteps=2 * 365, # 2 years
num_seasons=12,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([12-1], dtype=tf.float32)),
num_steps_per_season=[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
initial_step=22)
```
Note that we've used `initial_step=22` to denote that the model begins
on January 23 (steps are zero-indexed). This version works over time periods
not involving a leap year. A general implementation of month-of-year
seasonality would require additional logic:
```python
num_days_per_month = np.array(
[[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 29, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31], # year with leap day
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31]])
month_of_year = ConstrainedSeasonalStateSpaceModel(
num_timesteps=4 * 365 + 2, # 8 years with leap days
num_seasons=12,
drift_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=tf.ones([12-1], dtype=tf.float32)),
num_steps_per_season=num_days_per_month,
initial_step=22)
```
"""
def __init__(self,
num_timesteps,
num_seasons,
drift_scale,
initial_state_prior,
observation_noise_scale=1e-4, # Avoid degeneracy.
num_steps_per_season=1,
name=None,
**linear_gaussian_ssm_kwargs): # pylint: disable=g-doc-args
"""Build a seasonal effect state space model with a zero-sum constraint.
{seasonal_init_args}
"""
parameters = dict(locals())
parameters.update(linear_gaussian_ssm_kwargs)
del parameters['linear_gaussian_ssm_kwargs']
with tf.name_scope(name or 'ConstrainedSeasonalStateSpaceModel') as name:
# The initial state prior determines the dtype of sampled values.
# Other model parameters must have the same dtype.
dtype = initial_state_prior.dtype
drift_scale = tf.convert_to_tensor(
value=drift_scale, name='drift_scale', dtype=dtype)
observation_noise_scale = tf.convert_to_tensor(
value=observation_noise_scale,
name='observation_noise_scale',
dtype=dtype)
# Coerce `num_steps_per_season` to a canonical form, an array of
# `num_seasons` integers.
num_steps_per_season = np.squeeze(np.asarray(num_steps_per_season))
if num_steps_per_season.ndim == 0: # scalar case
num_steps_per_season = np.tile(num_steps_per_season, num_seasons)
elif ((num_steps_per_season.ndim <= 2) # 1D and 2D case
and (num_steps_per_season.shape[-1] != num_seasons)):
raise ValueError('num_steps_per_season must either be scalar (shape [])'
' or have the last dimension equal to [num_seasons] = '
'[{}] (saw: shape {})'.format(
num_seasons, num_steps_per_season.shape))
is_last_day_of_season = build_is_last_day_of_season(num_steps_per_season)
[
effects_to_residuals,
residuals_to_effects
] = build_effects_to_residuals_matrix(num_seasons, dtype=dtype)
seasonal_transition_matrix = build_seasonal_transition_matrix(
num_seasons=num_seasons,
is_last_day_of_season=is_last_day_of_season,
dtype=dtype,
basis_change_matrix=effects_to_residuals,
basis_change_matrix_inv=residuals_to_effects)
seasonal_transition_noise = build_constrained_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season)
observation_matrix = tf.concat(
[tf.ones([1, 1], dtype=dtype),
tf.zeros([1, num_seasons-1], dtype=dtype)], axis=-1)
observation_matrix = tf.matmul(observation_matrix, residuals_to_effects)
self._drift_scale = drift_scale
self._observation_noise_scale = observation_noise_scale
self._num_seasons = num_seasons
self._num_steps_per_season = num_steps_per_season
super(ConstrainedSeasonalStateSpaceModel, self).__init__(
num_timesteps=num_timesteps,
transition_matrix=seasonal_transition_matrix,
transition_noise=seasonal_transition_noise,
observation_matrix=observation_matrix,
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=observation_noise_scale[..., tf.newaxis]),
initial_state_prior=initial_state_prior,
name=name,
**linear_gaussian_ssm_kwargs)
self._parameters = parameters
@property
def drift_scale(self):
"""Standard deviation of the drift in effects between seasonal cycles."""
return self._drift_scale
@property
def observation_noise_scale(self):
"""Standard deviation of the observation noise."""
return self._observation_noise_scale
@property
def num_seasons(self):
"""Number of seasons."""
return self._num_seasons
@property
def num_steps_per_season(self):
"""Number of steps in each season."""
return self._num_steps_per_season
def build_is_last_day_of_season(num_steps_per_season):
"""Build utility method to compute whether the season is changing."""
num_steps_per_cycle = np.sum(num_steps_per_season)
changepoints = np.cumsum(np.ravel(num_steps_per_season)) - 1
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.math.floormod(t, num_steps_per_cycle)
return tf.reduce_any(tf.equal(step_in_cycle, changepoints))
return is_last_day_of_season
def build_effects_to_residuals_matrix(num_seasons, dtype):
"""Build change-of-basis matrices for constrained seasonal effects.
This method builds the matrix that transforms seasonal effects into
effect residuals (differences from the mean effect), and additionally
projects these residuals onto the subspace where the mean effect is zero.
See `ConstrainedSeasonalStateSpaceModel` for mathematical details.
Args:
num_seasons: scalar `int` number of seasons.
dtype: TensorFlow `dtype` for the returned values.
Returns:
effects_to_residuals: `Tensor` of shape
`[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect =
matmul(effects_to_residuals, seasonal_effects)`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`effects_to_residuals = P * R`.
residuals_to_effects: the (pseudo)-inverse of the above; a
`Tensor` of shape `[num_seasons, num_seasons-1]`. In the
notation of `ConstrainedSeasonalStateSpaceModel`, this is
`residuals_to_effects = R^{-1} * P'`.
"""
# Build the matrix that converts effects `e_i` into differences from the mean
# effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last
# row so that the transformation is invertible.
effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons
effects_to_residuals_fullrank[-1, :] = 1./num_seasons # compute mean effect
residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank)
# Drop the final dimension, effectively setting the mean effect to zero.
effects_to_residuals = effects_to_residuals_fullrank[:-1, :]
residuals_to_effects = residuals_to_effects_fullrank[:, :-1]
# Return Tensor values of the specified dtype.
effects_to_residuals = tf.cast(
effects_to_residuals, dtype=dtype, name='effects_to_residuals')
residuals_to_effects = tf.cast(
residuals_to_effects, dtype=dtype, name='residuals_to_effects')
return effects_to_residuals, residuals_to_effects
def build_seasonal_transition_matrix(
num_seasons, is_last_day_of_season, dtype,
basis_change_matrix=None, basis_change_matrix_inv=None):
"""Build a function computing transitions for a seasonal effect model."""
with tf.name_scope('build_seasonal_transition_matrix'):
# If the season is changing, the transition matrix permutes the latent
# state to shift all seasons up by a dimension, and sends the current
# season's effect to the bottom.
seasonal_permutation = np.concatenate(
[np.arange(1, num_seasons), [0]], axis=0)
seasonal_permutation_matrix = tf.constant(
np.eye(num_seasons)[seasonal_permutation], dtype=dtype)
# Optionally transform the transition matrix into a reparameterized space,
# enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel.
if basis_change_matrix is not None:
seasonal_permutation_matrix = tf.matmul(
basis_change_matrix,
tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv))
identity_matrix = tf.eye(
ps.shape(seasonal_permutation_matrix)[-1], dtype=dtype)
def seasonal_transition_matrix(t):
return tf.linalg.LinearOperatorFullMatrix(
matrix=dist_util.pick_scalar_condition(
is_last_day_of_season(t),
seasonal_permutation_matrix,
identity_matrix))
return seasonal_transition_matrix
def build_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build the transition noise model for a SeasonalStateSpaceModel."""
# If the current season has just ended, increase the variance of its effect
# following drift_scale. (the just-ended seasonal effect will always be the
# bottom element of the vector). Otherwise, do nothing.
drift_scale_diag = tf.stack(
[tf.zeros_like(drift_scale)] * (num_seasons - 1) + [drift_scale],
axis=-1)
def seasonal_transition_noise(t):
noise_scale_diag = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_diag,
tf.zeros_like(drift_scale_diag))
return tfd.MultivariateNormalDiag(
loc=tf.zeros(num_seasons, dtype=drift_scale.dtype),
scale_diag=noise_scale_diag)
return seasonal_transition_noise
def build_constrained_seasonal_transition_noise(
drift_scale, num_seasons, is_last_day_of_season):
"""Build transition noise distribution for a ConstrainedSeasonalSSM."""
# Conceptually, this method takes the noise covariance on effects L @ L'
# computed by `build_seasonal_transition_noise`, with scale factor
# L = [ 0, 0, ..., 0
# ...
# 0, 0, ..., drift_scale],
# and transforms it to act on the constrained-residual representation.
#
# The resulting noise covariance M @ M' is equivalent to
# M @ M' = effects_to_residuals @ LL' @ residuals_to_effects
# where `@` is matrix multiplication. However because this matrix is
# rank-deficient, we can't take its Cholesky decomposition directly, so we'll
# construct its lower-triangular scale factor `M` by hand instead.
#
# Concretely, let `M = P @ R @ L` be the scale factor in the
# transformed space, with matrices `R`, `P` applying the reparameterization
# and zero-mean constraint respectively as defined in the
# "Mathematical Details" section of `ConstrainedSeasonalStateSpaceModel`. It's
# easy to see (*) that the implied covariance
# `M @ M' = P @ R @ L @ L' @ R' @ P'` is just the constant matrix
# `M @ M' = [ 1, 1, ..., 1, 0
# 1, 1, ..., 1, 0
# ...
# 1, 1, ..., 1, 0
# 0, 0, ..., 0, 0] * (drift_scale / num_seasons)**2`
# with zeros in the final row and column. So we can directly construct
# the lower-triangular factor
# `Q = [ 1, 0, ... 0
# 1, 0, ..., 0
# ...
# 1, 0, ..., 0
# 0, 0, ..., 0 ] * drift_scale/num_seasons`
# such that Q @ Q' = M @ M'. In practice, we don't reify the final row and
# column full of zeroes, i.e., we construct
# `Q[:num_seasons-1, :num_seasons-1]` as the scale-TriL covariance factor.
#
# (*) Argument: `L` is zero everywhere but the last column, so `R @ L` will be
# too. Since the last column of `R` is the constant `-1/num_seasons`, `R @ L`
# is simply the matrix with constant `-drift_scale/num_seasons` in the final
# column (except the final row, which is negated) and zero in all other
# columns, and `M = P @ R @ L` additionally zeroes out the final row. Then
# M @ M' is just the outer product of that final column with itself (since all
# other columns are zero), which gives the matrix shown above.
drift_scale_tril_nonzeros = tf.concat([
tf.ones([num_seasons - 1, 1], dtype=drift_scale.dtype),
tf.zeros([num_seasons - 1, num_seasons - 2], dtype=drift_scale.dtype)],
axis=-1)
drift_scale_tril = (drift_scale_tril_nonzeros *
drift_scale[..., tf.newaxis, tf.newaxis] / num_seasons)
# Inject transition noise iff it is the last day of the season.
def seasonal_transition_noise(t):
noise_scale_tril = dist_util.pick_scalar_condition(
is_last_day_of_season(t),
drift_scale_tril,
tf.zeros_like(drift_scale_tril))
return tfd.MultivariateNormalTriL(
loc=tf.zeros(num_seasons-1, dtype=drift_scale.dtype),
scale_tril=noise_scale_tril)
return seasonal_transition_noise
class Seasonal(StructuralTimeSeries):
"""Formal representation of a seasonal effect model.
A seasonal effect model posits a fixed set of recurring, discrete 'seasons',
each of which is active for a fixed number of timesteps and, while active,
contributes a different effect to the time series. These are generally not
meteorological seasons, but represent regular recurring patterns such as
hour-of-day or day-of-week effects. Each season lasts for a fixed number of
timesteps. The effect of each season drifts from one occurrence to the next
following a Gaussian random walk:
```python
effects[season, occurrence[i]] = (
effects[season, occurrence[i-1]] + Normal(loc=0., scale=drift_scale))
```
The `drift_scale` parameter governs the standard deviation of the random walk;
for example, in a day-of-week model it governs the change in effect from this
Monday to next Monday.
#### Examples
A seasonal effect model representing day-of-week seasonality on hourly data:
```python
day_of_week = tfp.sts.Seasonal(num_seasons=7,
num_steps_per_season=24,
observed_time_series=y,
name='day_of_week')
```
A seasonal effect model representing month-of-year seasonality on daily data,
with explicit priors:
```python
month_of_year = tfp.sts.Seasonal(
num_seasons=12,
num_steps_per_season=[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
drift_scale_prior=tfd.LogNormal(loc=-1., scale=0.1),
initial_effect_prior=tfd.Normal(loc=0., scale=5.),
name='month_of_year')
```
Note that this version works over time periods not involving a leap year. A
general implementation of month-of-year seasonality would require additional
logic:
```python
num_days_per_month = np.array(
[[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 29, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31], # year with leap day
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31],
[31, 28, 31, 30, 30, 31, 31, 31, 30, 31, 30, 31]])
month_of_year = tfp.sts.Seasonal(
num_seasons=12,
num_steps_per_season=num_days_per_month,
drift_scale_prior=tfd.LogNormal(loc=-1., scale=0.1),
initial_effect_prior=tfd.Normal(loc=0., scale=5.),
name='month_of_year')
```
A model representing both day-of-week and hour-of-day seasonality, on hourly
data:
```
day_of_week = tfp.sts.Seasonal(num_seasons=7,
num_steps_per_season=24,
observed_time_series=y,
name='day_of_week')
hour_of_day = tfp.sts.Seasonal(num_seasons=24,
num_steps_per_season=1,
observed_time_series=y,
name='hour_of_day')
model = tfp.sts.Sum(components=[day_of_week, hour_of_day],
observed_time_series=y)
```
"""
def __init__(self,
num_seasons,
num_steps_per_season=1,
allow_drift=True,
drift_scale_prior=None,
initial_effect_prior=None,
constrain_mean_effect_to_zero=True,
observed_time_series=None,
name=None):
"""Specify a seasonal effects model.
Args:
num_seasons: Scalar Python `int` number of seasons.
num_steps_per_season: Python `int` number of steps in each
season. This may be either a scalar (shape `[]`), in which case all
seasons have the same length, or a NumPy array of shape `[num_seasons]`,
in which seasons have different length, but remain constant around
different cycles, or a NumPy array of shape `[num_cycles, num_seasons]`,
in which num_steps_per_season for each season also varies in different
cycle (e.g., a 4 years cycle with leap day).
Default value: 1.
allow_drift: optional Python `bool` specifying whether the seasonal
effects can drift over time. Setting this to `False`
removes the `drift_scale` parameter from the model. This is
mathematically equivalent to
`drift_scale_prior = tfd.Deterministic(0.)`, but removing drift
directly is preferred because it avoids the use of a degenerate prior.
Default value: `True`.
drift_scale_prior: optional `tfd.Distribution` instance specifying a prior
on the `drift_scale` parameter. If `None`, a heuristic default prior is
constructed based on the provided `observed_time_series`.
Default value: `None`.
initial_effect_prior: optional `tfd.Distribution` instance specifying a
normal prior on the initial effect of each season. This may be either
a scalar `tfd.Normal` prior, in which case it applies independently to
every season, or it may be multivariate normal (e.g.,
`tfd.MultivariateNormalDiag`) with event shape `[num_seasons]`, in
which case it specifies a joint prior across all seasons. If `None`, a
heuristic default prior is constructed based on the provided
`observed_time_series`.
Default value: `None`.
constrain_mean_effect_to_zero: if `True`, use a model parameterization
that constrains the mean effect across all seasons to be zero. This
constraint is generally helpful in identifying the contributions of
different model components and can lead to more interpretable
posterior decompositions. It may be undesirable if you plan to directly
examine the latent space of the underlying state space model.
Default value: `True`.
observed_time_series: optional `float` `Tensor` of shape
`batch_shape + [T, 1]` (omitting the trailing unit dimension is also
supported when `T > 1`), specifying an observed time series. Any `NaN`s
are interpreted as missing observations; missingness may be also be
explicitly specified by passing a `tfp.sts.MaskedTimeSeries` instance.
Any priors not explicitly set will be given default values according to
the scale of the observed time series (or batch of time series).
Default value: `None`.
name: the name of this model component.
Default value: 'Seasonal'.
"""
init_parameters = dict(locals())
with tf.name_scope(name or 'Seasonal') as name:
_, observed_stddev, observed_initial = (
sts_util.empirical_statistics(observed_time_series)
if observed_time_series is not None else (0., 1., 0.))
# Heuristic default priors. Overriding these may dramatically
# change inference performance and results.
if initial_effect_prior is None:
initial_effect_prior = tfd.Normal(
loc=observed_initial,
scale=tf.abs(observed_initial) + observed_stddev)
dtype = initial_effect_prior.dtype
if drift_scale_prior is None:
scale_factor = tf.convert_to_tensor(.01, dtype=dtype)
drift_scale_prior = tfd.LogNormal(
loc=tf.math.log(scale_factor * observed_stddev),
scale=3.)
if isinstance(initial_effect_prior, tfd.Normal):
initial_state_prior = tfd.MultivariateNormalDiag(
loc=tf.stack([initial_effect_prior.mean()] * num_seasons, axis=-1),
scale_diag=tf.stack([initial_effect_prior.stddev()] * num_seasons,
axis=-1))
else:
initial_state_prior = initial_effect_prior
if constrain_mean_effect_to_zero:
# Transform the prior to the residual parameterization used by
# `ConstrainedSeasonalStateSpaceModel`, imposing a zero-sum constraint.
# This doesn't change the marginal prior on individual effects, but
# does introduce dependence between the effects.
(effects_to_residuals, _) = build_effects_to_residuals_matrix(
num_seasons, dtype=dtype)
effects_to_residuals_linop = tf.linalg.LinearOperatorFullMatrix(
effects_to_residuals) # Use linop so that matmul broadcasts.
initial_state_prior_loc = effects_to_residuals_linop.matvec(
initial_state_prior.mean())
scale_linop = effects_to_residuals_linop.matmul(
initial_state_prior.scale) # returns LinearOperator
initial_state_prior = tfd.MultivariateNormalTriL(
loc=initial_state_prior_loc,
scale_tril=tf.linalg.cholesky(
scale_linop.matmul(scale_linop.to_dense(), adjoint_arg=True)))
self._constrain_mean_effect_to_zero = constrain_mean_effect_to_zero
self._initial_state_prior = initial_state_prior
self._num_seasons = num_seasons
self._num_steps_per_season = num_steps_per_season
parameters = []
if allow_drift:
parameters.append(Parameter(
'drift_scale', drift_scale_prior,
tfb.Chain([tfb.Scale(scale=observed_stddev),
tfb.Softplus(low=dtype_util.eps(dtype))])))
self._allow_drift = allow_drift
super(Seasonal, self).__init__(
parameters,
latent_size=(num_seasons - 1
if self.constrain_mean_effect_to_zero else num_seasons),
init_parameters=init_parameters,
name=name)
@property
def allow_drift(self):
"""Whether the seasonal effects are allowed to drift over time."""
return self._allow_drift
@property
def constrain_mean_effect_to_zero(self):
"""Whether to constrain the mean effect to zero."""
return self._constrain_mean_effect_to_zero
@property
def num_seasons(self):
"""Number of seasons."""
return self._num_seasons
@property
def num_steps_per_season(self):
"""Number of steps per season."""
return self._num_steps_per_season
@property
def initial_state_prior(self):
"""Prior distribution on the initial latent state (level and scale)."""
return self._initial_state_prior
def _make_state_space_model(self,
num_timesteps,
param_map,
initial_state_prior=None,
**linear_gaussian_ssm_kwargs):
if initial_state_prior is None:
initial_state_prior = self.initial_state_prior
if not self.allow_drift:
param_map['drift_scale'] = 0.
linear_gaussian_ssm_kwargs.update(param_map)
if self.constrain_mean_effect_to_zero:
return ConstrainedSeasonalStateSpaceModel(
num_timesteps=num_timesteps,
num_seasons=self.num_seasons,
num_steps_per_season=self.num_steps_per_season,
initial_state_prior=initial_state_prior,
**linear_gaussian_ssm_kwargs)
else:
return SeasonalStateSpaceModel(
num_timesteps=num_timesteps,
num_seasons=self.num_seasons,
num_steps_per_season=self.num_steps_per_season,
initial_state_prior=initial_state_prior,
**linear_gaussian_ssm_kwargs)
|
tensorflow/probability
|
tensorflow_probability/python/sts/components/seasonal.py
|
Python
|
apache-2.0
| 40,275
|
[
"Gaussian"
] |
276e3113b9347e8216305fe68abcc8a685fc5166a173e8c745100fdc84f0d9fc
|
# $Id$
#
# Copyright (C) 2007,2008 Greg Landrum
#
# @@ All Rights Reserved @@
#
import os,sys
import io
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import DataStructs as ds
def feq(v1,v2,tol=1e-4):
return abs(v1-v2)<tol
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test1Int(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[5])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==5)
v2= ds.IntSparseIntVect(5)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,3:3})
def test2Long(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
self.assertTrue(v1.GetLength()==l)
v2= ds.LongSparseIntVect(l)
self.assertTrue(v1!=v2)
v2|=v1
self.assertTrue(v2==v1)
v3=v2|v1
self.assertTrue(v3==v1)
onVs = v1.GetNonzeroElements()
self.assertTrue(onVs=={0:1,2:2,1<<35:3})
def test3Pickle1(self):
"""
"""
l=1<<42
v1 = ds.LongSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<35]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.LongSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('lsiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/lsiv.pkl'),
'r'
) as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test3Pickle2(self):
"""
"""
l=1<<21
v1 = ds.IntSparseIntVect(l)
self.assertRaises(IndexError,lambda:v1[l+1])
v1[0]=1
v1[2]=2
v1[1<<12]=3
self.assertTrue(v1==v1)
v2= cPickle.loads(cPickle.dumps(v1))
self.assertTrue(v2==v1)
v3= ds.IntSparseIntVect(v2.ToBinary())
self.assertTrue(v2==v3)
self.assertTrue(v1==v3)
#cPickle.dump(v1,file('isiv.pkl','wb+'))
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/isiv.pkl'),
'r'
) as tf:
buf = tf.read().replace('\r\n', '\n').encode('utf-8')
tf.close()
with io.BytesIO(buf) as f:
v3 = cPickle.load(f)
self.assertTrue(v3==v1)
def test4Update(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
self.assertRaises(IndexError,lambda:v1[6])
v1[0]=1
v1[2]=2
v1[3]=3
self.assertTrue(v1==v1)
v2 = ds.IntSparseIntVect(5)
v2.UpdateFromSequence((0,2,3,3,2,3))
self.assertTrue(v1==v2)
def test5Dice(self):
"""
"""
v1 = ds.IntSparseIntVect(5)
v1[4]=4;
v1[0]=2;
v1[3]=1;
self.assertTrue(feq(ds.DiceSimilarity(v1,v1),1.0))
v1 = ds.IntSparseIntVect(5)
v1[0]=2;
v1[2]=1;
v1[3]=4;
v1[4]=6;
v2 = ds.IntSparseIntVect(5)
v2[1]=2;
v2[2]=3;
v2[3]=4;
v2[4]=4;
self.assertTrue(feq(ds.DiceSimilarity(v1,v2),18.0/26.))
self.assertTrue(feq(ds.DiceSimilarity(v2,v1),18.0/26.))
def test6BulkDice(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
bulkDs = ds.BulkDiceSimilarity(vs[0],vs[1:])
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
def test6BulkTversky(self):
"""
"""
sz=10
nToSet=5
nVs=6
import random
vs = []
for i in range(nVs):
v = ds.IntSparseIntVect(sz)
for j in range(nToSet):
v[random.randint(0,sz-1)]=random.randint(1,10)
vs.append(v)
baseDs = [ds.TverskySimilarity(vs[0],vs[x],.5,.5) for x in range(1,nVs)]
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],0.5,0.5)
diceDs = [ds.DiceSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(baseDs)):
self.assertTrue(feq(baseDs[i],bulkDs[i]))
self.assertTrue(feq(baseDs[i],diceDs[i]))
bulkDs = ds.BulkTverskySimilarity(vs[0],vs[1:],1.0,1.0)
taniDs = [ds.TanimotoSimilarity(vs[0],vs[x]) for x in range(1,nVs)]
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
taniDs = ds.BulkTanimotoSimilarity(vs[0],vs[1:])
for i in range(len(bulkDs)):
self.assertTrue(feq(bulkDs[i],taniDs[i]))
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
Code/DataStructs/Wrap/testSparseIntVect.py
|
Python
|
bsd-3-clause
| 4,950
|
[
"RDKit"
] |
63cb78c8bd2cdf90fb2c4cb5519011a6d477da100c05f1090a592b4bbd38875b
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, OneTypeList, Struct
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import MeshIO
from sfepy.solvers.ts import TimeStepper
from sfepy.base.ioutils import get_trunk, write_dict_hdf5
import six
from six.moves import range
def _linearize(out, fields, linearization):
new = {}
for key, val in six.iteritems(out):
field = fields[val.field_name]
new.update(field.create_output(val.data, var_name=key,
dof_names=val.dofs, key=key,
linearization=linearization))
return new
def dump_to_vtk(filename, output_filename_trunk=None, step0=0, steps=None,
fields=None, linearization=None):
"""Dump a multi-time-step results file into a sequence of VTK files."""
def _save_step(suffix, out, mesh):
if linearization is not None:
output('linearizing...')
out = _linearize(out, fields, linearization)
output('...done')
for key, val in six.iteritems(out):
lmesh = val.get('mesh', mesh)
lmesh.write(output_filename_trunk + '_' + key + suffix,
io='auto', out={key : val})
if hasattr(val, 'levels'):
output('max. refinement per group:', val.levels)
else:
mesh.write(output_filename_trunk + suffix, io='auto', out=out)
output('dumping to VTK...')
io = MeshIO.any_from_filename(filename)
mesh = Mesh.from_file(filename, io=io)
if output_filename_trunk is None:
output_filename_trunk = get_trunk(filename)
try:
ts = TimeStepper(*io.read_time_stepper())
all_steps, times, nts, dts = extract_times(filename)
except ValueError:
output('no time stepping info found, assuming single step')
out = io.read_data(0)
if out is not None:
_save_step('.vtk', out, mesh)
ret = None
else:
ts.times = times
ts.n_step = times.shape[0]
if steps is None:
ii0 = nm.searchsorted(all_steps, step0)
iterator = ((all_steps[ii], times[ii])
for ii in range(ii0, len(times)))
else:
iterator = [(step, ts.times[step]) for step in steps]
max_step = all_steps.max()
for step, time in iterator:
output(ts.format % (step, max_step))
out = io.read_data(step)
if out is None: break
_save_step('.' + ts.suffix % step + '.vtk', out, mesh)
ret = ts.suffix
output('...done')
return ret
def extract_times(filename):
"""
Read true time step data from individual time steps.
Returns
-------
steps : array
The time steps.
times : array
The times of the time steps.
nts : array
The normalized times of the time steps, in [0, 1].
dts : array
The true time deltas.
"""
io = MeshIO.any_from_filename(filename)
steps, times, nts = io.read_times()
dts = nm.ediff1d(times, to_end=0)
return steps, times, nts, dts
def extract_time_history(filename, extract, verbose=True):
"""Extract time history of a variable from a multi-time-step results file.
Parameters
----------
filename : str
The name of file to extract from.
extract : str
The description of what to extract in a string of comma-separated
description items. A description item consists of: name of the variable
to extract, mode ('e' for elements, 'n' for nodes), ids of the nodes or
elements (given by the mode). Example: 'u n 10 15, p e 0' means
variable 'u' in nodes 10, 15 and variable 'p' in element 0.
verbose : bool
Verbosity control.
Returns
-------
ths : dict
The time histories in a dict with variable names as keys. If a nodal
variable is requested in elements, its value is a dict of histories in
the element nodes.
ts : TimeStepper instance
The time stepping information.
"""
output('extracting selected data...', verbose=verbose)
output('selection:', extract, verbose=verbose)
##
# Parse extractions.
pes = OneTypeList(Struct)
for chunk in extract.split(','):
aux = chunk.strip().split()
pes.append(Struct(var=aux[0],
mode=aux[1],
indx=map(int, aux[2:])))
##
# Verify array limits.
mesh = Mesh.from_file(filename)
for pe in pes:
if pe.mode == 'n':
for ii in pe.indx:
if (ii < 0) or (ii >= mesh.n_nod):
raise ValueError('node index 0 <= %d < %d!'
% (ii, mesh.n_nod))
if pe.mode == 'e':
for ii, ie in enumerate(pe.indx[:]):
if (ie < 0) or (ie >= mesh.n_el):
raise ValueError('element index 0 <= %d < %d!'
% (ie, mesh.n_el))
pe.indx[ii] = ie
##
# Extract data.
io = MeshIO.any_from_filename(filename)
ths = {}
for pe in pes:
mode, nname = io.read_data_header(pe.var)
output(mode, nname, verbose=verbose)
if ((pe.mode == 'n' and mode == 'vertex') or
(pe.mode == 'e' and mode == 'cell')):
th = io.read_time_history(nname, pe.indx)
elif pe.mode == 'e' and mode == 'vertex':
conn = mesh.conns[0]
th = {}
for iel in pe.indx:
ips = conn[iel]
th[iel] = io.read_time_history(nname, ips)
else:
raise ValueError('cannot extract cell data %s in nodes!' % pe.var)
ths[pe.var] = th
output('...done', verbose=verbose)
ts = TimeStepper(*io.read_time_stepper())
# Force actual times.
steps, times, nts, dts = extract_times(filename)
ts.times = times
ts.nt = nts
return ths, ts
def average_vertex_var_in_cells(ths_in):
"""Average histories in the element nodes for each nodal variable
originally requested in elements."""
ths = dict.fromkeys(list(ths_in.keys()))
for var, th in six.iteritems(ths_in):
aux = dict.fromkeys(list(th.keys()))
for ir, data in six.iteritems(th):
if isinstance(data, dict):
for ic, ndata in six.iteritems(data):
if aux[ir] is None:
aux[ir] = ndata
else:
aux[ir] += ndata
aux[ir] /= float(len(data))
else:
aux[ir] = data
ths[var] = aux
return ths
def save_time_history(ths, ts, filename_out):
"""Save time history and time-stepping information in a HDF5 file."""
ths.update({'times' : ts.times, 'dt' : ts.dt})
write_dict_hdf5(filename_out, ths)
def guess_time_units(times):
"""
Given a vector of times in seconds, return suitable time units and
new vector of times suitable for plotting.
Parameters
----------
times : array
The vector of times in seconds.
Returns
-------
new_times : array
The vector of times in `units`.
units : str
The time units.
"""
times = nm.asarray(times)
if (times[-1] / 60.0 / 60.0) > 10.0:
units = 'hours'
new_times = times / 60.0 / 60.0
elif (times[-1] / 60.0) > 10.0:
units = 'min.'
new_times = times / 60.0
else:
units = 's'
new_times = times
return new_times, units
|
vlukes/sfepy
|
sfepy/postprocess/time_history.py
|
Python
|
bsd-3-clause
| 7,716
|
[
"VTK"
] |
1d8cfda61776faccdb6ff12237cc6cb0d850521cfdbccda1048fd56966141924
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Usefull links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os, time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header.encode('utf-8'))
fp.write(globalPalette)
fp.write(appext.encode('utf-8'))
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext.encode('utf-8'))
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext.encode('utf-8'))
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";".encode('utf-8')) # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
wangtai/py_gif_processor
|
images2gif.py
|
Python
|
apache-2.0
| 37,041
|
[
"NEURON"
] |
ca51e40e66ce11c34ec9064a87fd98d694c56cd3601a928578ceaef75c786917
|
"""Classes to handle SAXS (Small Angle X-ray Scattering) data"""
import _modeller
from modeller.util.modobject import modobject
from modeller.util import modlist, array
__docformat__ = "epytext en"
class SAXSList(modlist.LinkList):
"""A list of L{saxsdata} objects"""
def __init__(self, edat):
self.__edat = edat
self.__list = []
modlist.LinkList.__init__(self)
def _insfunc(self, indx, obj):
_modeller.mod_saxsdata_pt_new(self.__edat, indx, obj.modpt)
self.__list.insert(indx, obj)
def __len__(self):
return len(self.__list)
def _getfunc(self, indx):
return self.__list[indx]
def _delfunc(self, indx):
del self.__list[indx]
_modeller.mod_saxsdata_pt_del(self.__edat, indx)
class saxsdata(modobject):
"""Holds all SAXS (Small Angle X-ray Scattering) data"""
__modpt = None
env = None
def __init__(self, env, **vars):
self.__modpt = _modeller.mod_saxsdata_new()
self.env = env.copy()
def __setstate__(self, d):
self.__dict__.update(d)
self.__modpt = _modeller.mod_saxsdata_new()
def __del__(self):
if self.__modpt:
_modeller.mod_saxsdata_free(self.__modpt)
def __get_modpt(self):
return self.__modpt
def write(self, file):
"""Write SAXS data, which is currently in memory """
fh=open(file,'w')
for ii in range(0,self.ns):
fh.write('%10.7f ' % self.s[ii] + '%15.5f ' % self.intensity[ii] +'%15.5f\n' % self.int_exp[ii] )
fh.close()
def pr_score(self, mdl, maxr, filename=None, use_err=False, rfact=False):
"""Calculates P(r) score of a model by comparing model P(r) to expt
data saxs.pr_exp.
@param mdl: model
@param maxr: maximum radius to score P(r) in A
@param filename: filename of P(r) model to write (scaled to expt data)
@param use_err: use experimental error?
@param rfact: use rfactor as score?
@return: (pr score,
scaling factor of model P(r) to match expt. P(r))"""
from math import sqrt
sum_e = 0.0
sum_mm = 0.0
sum_em = 0.0
sum_ee = 0.0
mdl.saxs_pr(self, filename='None')
imaxr = min( int(maxr/self.dr_exp)+1, len(self.p_r_exp))
if (rfact):
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]
sum_ee = sum_ee + self.p_r_exp[ii]
scf = sum_ee/sum_mm
sum_ee = 0.0
for ii in range(0, imaxr):
sum_em = sum_em + abs(self.p_r_exp[ii]-scf*self.p_r_resamp[ii])
sum_ee = sum_ee + abs(self.p_r_exp[ii])
psc = sum_em/sum_ee
else:
if (use_err):
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]*self.p_r_resamp[ii]/self.p_r_sig[ii]
sum_em = sum_em + self.p_r_exp[ii]*self.p_r_resamp[ii]/self.p_r_sig[ii]
sum_ee = sum_ee + self.p_r_exp[ii]*self.p_r_exp[ii]/self.p_r_sig[ii]
else:
for ii in range(0, imaxr):
sum_mm = sum_mm + self.p_r_resamp[ii]*self.p_r_resamp[ii]
sum_em = sum_em + self.p_r_exp[ii]*self.p_r_resamp[ii]
sum_ee = sum_ee + self.p_r_exp[ii]*self.p_r_exp[ii]
norm_e = sqrt(sum_ee)
scf = sum_em / sum_mm
scf_norm = scf / norm_e
#psc = sum_mm*scf*scf + sum_ee - 2.*scf * sum_em
#psc = psc / (sqrt(sum_ee))
psc = sum_mm*scf_norm*scf_norm + 1 - 2.*scf_norm * sum_em / norm_e
if (filename):
fhandle=open(filename, 'w')
for ii in range(0, len(self.p_r_exp)):
tmp = scf*self.p_r_resamp[ii]
fhandle.write('%10.5f ' % self.r_exp[ii] +
' %15.6f\n' % tmp)
fhandle.close()
return (psc, scf)
def ini_saxs(self, atmsel,
filename='$(LIB)/formfactors-int_tab_solvation.lib',
s_min=0.0, s_max=2.0, maxs=100, nmesh=100, natomtyp=15,
represtyp='heav', wswitch='uniform', s_hybrid=0.0,
s_low=0.0, s_hi=2.0, spaceflag='real', rho_solv=0.334,
use_lookup=True, nr=5000, dr=0.1, nr_exp=300, dr_exp=1.0,
use_offset=False, use_rolloff=False, use_conv=False,
mixflag=False, pr_smooth=False):
"""Initialize saxsdata
@param atmsel: selection of atoms
@param s_min: minimum frequency in reciprocal space in A^-1
@param s_max: maximum frequency in reciprocal space in A^-1
@param maxs: maximum number of frequencies
@param nmesh: actual number of frequencies (<= maxs)
@param natomtyp: number of 'atoms', i.e. scattering centers
@param represtyp: representation: 'heav', 'allh', or 'CA'
@param filename: filename of the library for formfactors
@param wswitch: character for filter of scoring function options:
'unity', 'sq', or 'hybrid'
@param s_hybrid: frequency above which $ s^2$ weighting is applied
if wswitch='hybrid'
@param s_low: bandpass filter in A^-1 - lower cutoff
@param s_hi: bandpass filter in A^-1 - higher cutoff
@param spaceflag: how should I(s) be computed? 'real' space via P(r)
or 'reciprocal'? 'real' is more than a magnitude
faster but less accurate for high resolution
@param rho_solv: electron density of solvent;
default=0.334 e/A^-3 (H_2O)
@param use_lookup: use lookup tables for SINC and COS function -
significant increase in speed for 'reciprocal' mode
@param nr: number of points for P(r) sampling
@param dr: spacing (sampling) of P(r) in A
@param nr_exp: number of points for P_exp(r) sampling
@param dr_exp: spacing (sampling) of P(r) in A
@param use_offset: allow for additive constant in expt. spectrum
@param use_rolloff: allow for Gaussian rolloff of model spectrum
@param use_conv: convolute with nitrogen formfactor to mimic hydr
layer
@param mixflag: different conformations present? implemented for
HtpG project
@param pr_smooth: smoothing of P(r)"""
(inds, mdl) = atmsel.get_atom_indices()
return _modeller.mod_saxs_ini(self.modpt, mdl.modpt, inds, s_min,
s_max, maxs, nmesh, natomtyp, represtyp,
filename, wswitch, s_hybrid, s_low, s_hi,
spaceflag, rho_solv, use_lookup, nr, dr,
nr_exp, dr_exp, use_offset, use_rolloff,
use_conv, mixflag, pr_smooth)
def saxs_read(self, filename):
"""Read in experimental SAXS data"""
return _modeller.mod_saxs_read(self.modpt, filename)
def read(self,
saxsfilename,
atmsel,
formfacfilename='$(LIB)/formfactors-int_tab_solvation.lib',
natomtyp=15,
represtyp='heav', wswitch='uniform', s_hybrid=0.0,
s_low=None, s_hi=None,
spaceflag='real', rho_solv=0.334,
use_lookup=True, nr=5000, dr=0.1, nr_exp=300, dr_exp=1.0,
use_offset=False, use_rolloff=False, use_conv=False,
mixflag=False, pr_smooth=False):
"""Read in experimental SAXS data and initialize saxsdata
@param saxsfilename: Name of file containing SAXS spectrum
@param atmsel: selection of atoms
@param s_min: minimum frequency in reciprocal space in A^-1
@param s_max: maximum frequency in reciprocal space in A^-1
@param natomtyp: number of 'atoms', i.e. scattering centers
@param represtyp: representation: 'heav', 'allh', or 'CA'
@param formfacfilename: filename of the library for formfactors
@param wswitch: character for filter of scoring function options:
'unity', 'sq', or 'hybrid'
@param s_hybrid: frequency above which $ s^2$ weighting is applied
if wswitch='hybrid'
@param s_low: bandpass filter in A^-1 - lower cutoff
@param s_hi: bandpass filter in A^-1 - higher cutoff
@param spaceflag: how should I(s) be computed? 'real' space via P(r)
or 'reciprocal'? 'real' is more than a magnitude
faster but less accurate for high resolution
@param rho_solv: electron density of solvent;
default=0.334 e/A^-3 (H_2O)
@param use_lookup: use lookup tables for SINC and COS function -
significant increase in speed for 'reciprocal' mode
@param nr: number of points for P(r) sampling
@param dr: spacing (sampling) of P(r) in A
@param nr_exp: number of points for P_exp(r) sampling
@param dr_exp: spacing (sampling) of P(r) in A
@param use_offset: allow for additive constant in expt. spectrum
@param use_rolloff: allow for Gaussian rolloff of model spectrum
@param use_conv: convolute with nitrogen formfactor to mimic hydr
layer
@param mixflag: different conformations present? implemented for
HtpG project
@param pr_smooth: smoothing of P(r)"""
try:
fh = open(saxsfilename,'r')
except:
print "file "+saxsfilename+" not found :("
return
fh.close()
ns = 0
s_min = 10.
s_max = 0.
for line in open(saxsfilename,'r'):
s = line.split()
# '#' is comment
if (not s[0][0] == '#'):
ns = ns +1
if ( float(s[0]) > s_max):
s_max = float(s[0])
if ( float(s[0]) < s_min):
s_min = float(s[0])
if (not s_low):
s_low = s_min - .001
if (not s_hi):
s_hi = s_max + .001
print "s_min="+str(s_min)+", s_max="+str(s_max)
print "s_low="+str(s_low)+", s_hi="+str(s_hi)
self.ini_saxs(atmsel,
filename=formfacfilename,
s_min=s_min, s_max=s_max, maxs=ns, nmesh=ns, natomtyp=natomtyp,
represtyp=represtyp, wswitch=wswitch, s_hybrid=s_hybrid,
s_low=s_low, s_hi=s_hi, spaceflag=spaceflag, rho_solv=rho_solv,
use_lookup=use_lookup, nr=nr, dr=dr, nr_exp=nr_exp, dr_exp=dr_exp,
use_offset=use_offset, use_rolloff=use_rolloff, use_conv=use_conv,
mixflag=mixflag, pr_smooth=pr_smooth)
self.saxs_read(saxsfilename)
def saxs_pr_read(self, filename):
"""Read in experimental P(r)"""
return _modeller.mod_saxs_pr_read(self.modpt, filename)
def __get_s_hybrid(self):
return _modeller.mod_saxsdata_s_hybrid_get(self.modpt)
def __set_s_hybrid(self, val):
return _modeller.mod_saxsdata_s_hybrid_set(self.modpt, val)
def __get_s_max(self):
return _modeller.mod_saxsdata_s_max_get(self.modpt)
def __set_s_max(self, val):
return _modeller.mod_saxsdata_s_max_set(self.modpt, val)
def __get_s_min(self):
return _modeller.mod_saxsdata_s_min_get(self.modpt)
def __set_s_min(self, val):
return _modeller.mod_saxsdata_s_min_set(self.modpt, val)
def __get_s_low(self):
return _modeller.mod_saxsdata_s_low_get(self.modpt)
def __set_s_low(self, val):
return _modeller.mod_saxsdata_s_low_set(self.modpt, val)
def __get_s_hi(self):
return _modeller.mod_saxsdata_s_hi_get(self.modpt)
def __set_s_hi(self, val):
return _modeller.mod_saxsdata_s_hi_set(self.modpt, val)
def __get_normsq_exp(self):
return _modeller.mod_saxsdata_normsq_exp_get(self.modpt)
def __set_normsq_exp(self, val):
return _modeller.mod_saxsdata_normsq_exp_set(self.modpt, val)
def __get_ns(self):
return _modeller.mod_saxsdata_ns_get(self.modpt)
def __get_nr(self):
return _modeller.mod_saxsdata_nr_get(self.modpt)
def __get_nr_exp(self):
return _modeller.mod_saxsdata_nr_exp_get(self.modpt)
def __set_nr_exp(self):
return _modeller.mod_saxsdata_nr_exp_set(self.modpt, val)
def __get_dr(self):
return _modeller.mod_saxsdata_dr_get(self.modpt)
def __get_dr_exp(self):
return _modeller.mod_saxsdata_dr_exp_get(self.modpt)
def __set_dr_exp(self):
return _modeller.mod_saxsdata_dr_exp_set(self.modpt, val)
def __get_c(self):
return _modeller.mod_saxsdata_c_get(self.modpt)
def __set_c(self, val):
return _modeller.mod_saxsdata_c_set(self.modpt, val)
def __get_rolloff(self):
return _modeller.mod_saxsdata_rolloff_get(self.modpt)
def __set_rolloff(self):
return _modeller.mod_saxsdata_rolloff(self.modpt, val)
def __get_bfac(self):
return _modeller.mod_saxsdata_bfac_get(self.modpt)
def __set_bfac(self):
return _modeller.mod_saxsdata_bfac(self.modpt, val)
def __get_chi_sq(self):
return _modeller.mod_saxsdata_chi_sq_get(self.modpt)
def __set_chi_sq(self, val):
return _modeller.mod_saxsdata_chi_sq_set(self.modpt, val)
def __get_rho_solv(self):
return _modeller.mod_saxsdata_rho_solv_get(self.modpt)
def __set_rho_solv(self, val):
return _modeller.mod_saxsdata_rho_solv_set(self.modpt, val)
def __get_offset(self):
return _modeller.mod_saxsdata_offset_get(self.modpt)
def __set_offset(self):
return _modeller.mod_saxsdata_offset(self.modpt, val)
def __get_intensity(self):
ptarr = _modeller.mod_saxsdata_intensity_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_intensity(self, val):
modlist.set_fixlist(self.intensity, val)
def __get_int_exp(self):
ptarr = _modeller.mod_saxsdata_int_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_int_exp(self, val):
modlist.set_fixlist(self.int_exp, val)
def __get_s(self):
ptarr = _modeller.mod_saxsdata_s_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_s(self, val):
modlist.set_fixlist(self.s, val)
def __get_sigma_exp(self):
ptarr = _modeller.mod_saxsdata_sigma_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_ns)
def __set_sigma_exp(self, val):
modlist.set_fixlist(self.sigma_exp, val)
def __get_p_r(self):
ptarr = _modeller.mod_saxsdata_p_r_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr)
def __set_p_r(self):
modlist.set_fixlist(self.p_r, val)
def __get_p_r_exp(self):
ptarr = _modeller.mod_saxsdata_p_r_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_exp(self):
modlist.set_fixlist(self.p_r_exp, val)
def __get_r_exp(self):
ptarr = _modeller.mod_saxsdata_r_exp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_r_exp(self):
modlist.set_fixlist(self.r_exp, val)
def __get_p_r_resamp(self):
ptarr = _modeller.mod_saxsdata_p_r_resamp_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_resamp(self):
modlist.set_fixlist(self.p_r_resamp, val)
def __get_p_r_sig(self):
ptarr = _modeller.mod_saxsdata_p_r_sig_get(self.modpt)
return array.Double1DArray(ptarr, self.__get_nr_exp)
def __set_p_r_sig(self):
modlist.set_fixlist(self.p_r_sig, val)
modpt = property(__get_modpt)
s_hybrid = property(__get_s_hybrid, __set_s_hybrid)
s_max = property(__get_s_max, __set_s_max)
s_min = property(__get_s_min, __set_s_min)
s_low = property(__get_s_low, __set_s_low)
s_hi = property(__get_s_hi, __set_s_hi )
normsq_exp = property(__get_normsq_exp, __set_normsq_exp)
c = property(__get_c, __set_c)
ns = property(__get_ns)
nr = property(__get_nr)
nr_exp = property(__get_nr_exp, __set_nr_exp)
dr = property(__get_dr)
dr_exp = property(__get_dr_exp, __set_dr_exp)
intensity = property(__get_intensity, __set_intensity)
s = property(__get_s, __set_s)
int_exp = property(__get_int_exp, __set_int_exp)
sigma_exp = property(__get_sigma_exp, __set_sigma_exp)
p_r = property(__get_p_r, __set_p_r)
p_r_exp = property(__get_p_r_exp, __set_p_r_exp)
r_exp = property(__get_r_exp, __set_r_exp)
p_r_resamp = property(__get_p_r_resamp, __set_p_r_resamp)
p_r_sig = property(__get_p_r_sig, __set_p_r_sig)
p_r_sig = property(__get_p_r_sig, __set_p_r_sig)
chi_sq = property(__get_chi_sq, __set_chi_sq)
rho_solv = property(__get_rho_solv, __set_rho_solv)
rolloff= property(__get_rolloff, __set_rolloff)
bfac = property(__get_bfac, __set_bfac)
offset = property(__get_offset, __set_offset)
|
bjornwallner/proq2-server
|
apps/modeller9v8/modlib/modeller/saxsdata.py
|
Python
|
gpl-3.0
| 17,392
|
[
"Gaussian"
] |
f7aae6f3c28974ed705f1be51d0481101774518836586faa953aae10ece1eb55
|
"""Gaussian normalization to normalize standalone modality."""
import numpy as np
from joblib import Parallel, delayed
from scipy.linalg import norm
from .mrsi_normalization import MRSINormalization
KNOWN_KIND = ('l2', 'l1')
class LNormNormalization(MRSINormalization):
"""Normalization of MRSI data using l-norm.
Normalization of the MRSI spectrum. Only the real part will be normalized.
Parameters
----------
base_modality : object
The base modality on which the normalization will be applied. The base
modality should inherate from MRSIModality class.
kind : str, optional (default='l2')
The type of l-norm to use. Can be either 'l2' or 'l1'.
Attributes
----------
base_modality_ : object
The base modality on which the normalization will be applied. The base
modality should inherate from MRSIModality class.
fit_params_ : ndarray, shape (size_x, size_y, size_z)
The constant to apply to each spectrum.
is_fitted_ : bool
Boolean to know if the `fit` function has been already called.
"""
def __init__(self, base_modality, kind='l2'):
super(LNormNormalization, self).__init__(base_modality)
self.kind = kind
self.is_fitted_ = False
def fit(self, modality, ground_truth=None, cat=None):
"""Method to find the parameters needed to apply the normalization.
Parameters
----------
modality : object of type MRSIModality
The modality object of interest.
ground-truth : object of type GTModality or None
The ground-truth of GTModality. If None, the whole data will be
considered.
cat : str or None
String corresponding at the ground-truth of interest. Cannot be
None if ground-truth is not None.
Returns
-------
self : object
Return self.
"""
super(LNormNormalization, self).fit(modality=modality,
ground_truth=ground_truth,
cat=cat)
# Check that the type of l-norm is known
if self.kind not in KNOWN_KIND:
raise ValueError('The type of l-norm is not known.')
# Allocate the parameters array
self.fit_params_ = np.zeros((modality.data_.shape[1],
modality.data_.shape[2],
modality.data_.shape[3]))
for y in range(modality.data_.shape[1]):
for x in range(modality.data_.shape[2]):
for z in range(modality.data_.shape[3]):
if self.kind == 'l1':
self.fit_params_[y, x, z] = norm(np.real(
modality.data_[:, y, x, z]), 1)
if self.kind == 'l2':
self.fit_params_[y, x, z] = norm(np.real(
modality.data_[:, y, x, z]), 2)
self.is_fitted_ = True
return self
def normalize(self, modality):
"""Method to normalize the given modality using the fitted parameters.
Parameters
----------
modality: object of type MRSIModality
The modality object from which the data need to be normalized.
Returns
-------
modality: object of type MRSIModality
The modality object in which the data will be normalized.
"""
super(LNormNormalization, self).normalize(modality)
# Check that the parameters have been fitted
if not self.is_fitted_:
raise ValueError('Fit the parameters previous to normalize'
' the data.')
for y in range(modality.data_.shape[1]):
for x in range(modality.data_.shape[2]):
for z in range(modality.data_.shape[3]):
modality.data_[:, y, x, z] = ((
np.real(modality.data_[:, y, x, z]) /
self.fit_params_[y, x, z]) + (
1j * np.imag(modality.data_[:, y, x, z])))
return modality
def denormalize(self, modality):
"""Denormalize the given modality using the fitted parameters.
Parameters
----------
modality: object of type StandaloneModality
The modality object from which the data need to be normalized.
Returns
-------
modality: object of type StandaloneModality
The modality object in which the data will be normalized.
"""
super(LNormNormalization, self).denormalize(modality)
# Check that the parameters have been fitted
if not self.is_fitted_:
raise ValueError('Fit the parameters previous to normalize'
' the data.')
for y in range(modality.data_.shape[1]):
for x in range(modality.data_.shape[2]):
for z in range(modality.data_.shape[3]):
modality.data_[:, y, x, z] = ((
np.real(modality.data_[:, y, x, z]) *
self.fit_params_[y, x, z]) + (
1j * np.imag(modality.data_[:, y, x, z])))
return modality
|
glemaitre/protoclass
|
protoclass/preprocessing/l_norm_normalization.py
|
Python
|
gpl-2.0
| 5,323
|
[
"Gaussian"
] |
a28b254d72b3e8443616cf65d782fbb7708bc6c9c3b5da9217eb3f708cd5cd8a
|
import numpy as np
from matplotlib import pyplot
import rft1d
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y)
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return F
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(0)
nResponses = 9,8,9
nTestStatFields = 3
nNodes = 101
nIterations = 2000
FWHM = 8.0
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
### initialize RFT calculator:
rftcalc = rft1d.prob.RFTCalculator(STAT='F', df=df, nodes=nNodes, FWHM=FWHM, n=nTestStatFields)
#(1) Generate Gaussian 1D fields, compute test stat:
Fmax = []
generator = rft1d.random.Generator1D(nTotal, nNodes, FWHM)
for i in range(nIterations):
F = []
for i in range(nTestStatFields):
y = generator.generate_sample()
f = here_anova1(y, X, X0, Xi, X0i, df)
F.append( f )
Fconj = np.min(F, axis=0) #minimum across the test stat fields
Fmax.append( Fconj.max() )
Fmax = np.array(Fmax)
#(2) Survival functions:
heights = np.linspace(2, 5, 21)
sf = np.array( [ (Fmax>h).mean() for h in heights] )
sfE = rftcalc.sf(heights) #theoretical
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P(F_\mathrm{conj} > u)$', size=20)
ax.legend()
ax.set_title('Conjunction validation (F fields)', size=20)
pyplot.show()
|
0todd0000/rft1d
|
rft1d/examples/val_conj_2_F.py
|
Python
|
gpl-3.0
| 2,258
|
[
"Gaussian"
] |
b84a17b6d0a804166d0b1924fc5f35aa387e752327ab0177a69ec3a656df6950
|
"""Tests for dials.report.analysis module"""
from __future__ import annotations
from unittest import mock
import pytest
from cctbx import miller
from dxtbx.model import Crystal
from dxtbx.serialize import load
from dials.algorithms.scaling.scaling_library import (
merging_stats_from_scaled_array,
scaled_data_as_miller_array,
)
from dials.array_family import flex
from dials.report.analysis import (
batch_dependent_properties,
combined_table_to_batch_dependent_properties,
i_sig_i_vs_batch,
reflection_tables_to_batch_dependent_properties,
rmerge_vs_batch,
scales_vs_batch,
table_1_summary,
)
@pytest.fixture
def example_crystal():
exp_dict = {
"__id__": "crystal",
"real_space_a": [1.0, 0.0, 0.0],
"real_space_b": [0.0, 1.0, 0.0],
"real_space_c": [0.0, 0.0, 1.0],
"space_group_hall_symbol": "-P 2yb",
}
crystal = Crystal.from_dict(exp_dict)
return crystal
@pytest.fixture
def example_miller_set(example_crystal):
"""Generate an example miller set."""
ms = miller.set(
crystal_symmetry=example_crystal.get_crystal_symmetry(),
indices=flex.miller_index([(1, 1, 1)] * 8 + [(2, 2, 2)]),
anomalous_flag=False,
)
return ms
@pytest.fixture
def batch_array(example_miller_set):
"""Generate an example batch array."""
batches_ = flex.double([1, 1, 1, 2, 2, 2, 3, 3, 4])
batches = miller.array(example_miller_set, data=batches_)
return batches
@pytest.fixture
def data_array(example_miller_set):
"""Generate an example data array."""
data_ = flex.double([1.0 + (i * 0.1) for i in range(0, 9)])
data = miller.array(example_miller_set, data=data_)
return data
expected_results = {
"bins": [1, 2, 3, 4],
"svb": [1.1, 1.4, 1.65, 1.8],
"isigivb": [1.1 / 2.0, 1.4 / 2.0, 1.65 / 2.0, 1.8 / 2.0],
"rmergevb": [0.22727, 0.059524, 0.18182, 0.0],
}
def test_scales_vs_batch(batch_array, data_array):
"""Test the scales_vs_batch function."""
bins, svb = scales_vs_batch(data_array, batch_array)
assert bins == expected_results["bins"]
assert svb == pytest.approx(expected_results["svb"], 1e-6)
def test_IsigI_vs_batch(batch_array, data_array):
"""Test the IsigI_vs_batch function."""
with pytest.raises(AssertionError):
bins, isigivb = i_sig_i_vs_batch(data_array, batch_array)
Is = data_array.customized_copy()
Is.set_sigmas(flex.double(9, 2.0))
bins, isigivb = i_sig_i_vs_batch(Is, batch_array)
assert bins == expected_results["bins"]
assert isigivb == pytest.approx(expected_results["isigivb"], 1e-6)
def test_Rmerge_vs_batch(batch_array, data_array):
"""Test the Rmerge_vs_batch function."""
bins, rmergevsb = rmerge_vs_batch(data_array, batch_array)
assert bins == expected_results["bins"]
assert rmergevsb == pytest.approx(expected_results["rmergevb"], 1e-4)
def test_reflections_to_batch_properties(
data_array, example_miller_set, example_crystal
):
"""Test the helper functions that provide the batch properties from reflection
tables and experiments."""
# first make a reflection table.
reflections = flex.reflection_table()
reflections["intensity.scale.value"] = data_array.data() * flex.double(9, 2.0)
reflections["inverse_scale_factor"] = flex.double(9, 2.0)
reflections["intensity.scale.variance"] = flex.double(9, 4.0) * flex.double(9, 4.0)
reflections["xyzobs.px.value"] = flex.vec3_double(
[(0, 0, 0.1)] * 3 + [(0, 0, 1.1)] * 3 + [(0, 0, 2.1)] * 2 + [(0, 0, 3.1)]
)
reflections["miller_index"] = example_miller_set.indices()
reflections["id"] = flex.int(9, 1)
reflections.set_flags(flex.bool(9, True), reflections.flags.integrated)
reflections.set_flags(flex.bool(9, True), reflections.flags.scaled)
experiments = [mock.Mock()]
experiments[0].scan.get_image_range.return_value = [1, 10]
experiments[0].crystal = example_crystal
experiments[0].beam.get_wavelength.return_value = 1
(
bins,
rmerge,
isigi,
scalesvsbatch,
batch_data,
) = reflection_tables_to_batch_dependent_properties( # pylint: disable=unbalanced-tuple-unpacking
[reflections], experiments
)
assert bins == expected_results["bins"]
assert rmerge == pytest.approx(expected_results["rmergevb"], 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"], 1e-4)
assert scalesvsbatch == pytest.approx([2.0] * 4, 1e-4)
assert batch_data == [{"range": (1, 10), "id": 0}]
# now try a two experiment dataset in a combined table.
import copy
reflections_2 = copy.deepcopy(reflections)
reflections_2["id"] = flex.int(9, 2)
reflections.extend(reflections_2)
experiments = [mock.Mock(), mock.Mock()]
experiments[0].scan.get_image_range.return_value = [1, 10]
experiments[0].crystal = example_crystal
experiments[0].beam.get_wavelength.return_value = 1
experiments[1].scan.get_image_range.return_value = [1, 10]
experiments[1].crystal = example_crystal
experiments[1].beam.get_wavelength.return_value = 1
(
bins,
rmerge,
isigi,
scalesvsbatch,
batch_data,
) = combined_table_to_batch_dependent_properties( # pylint: disable=unbalanced-tuple-unpacking
reflections, experiments
)
assert bins == [1, 2, 3, 4, 101, 102, 103, 104]
assert rmerge == pytest.approx(expected_results["rmergevb"] * 2, 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"] * 2, 1e-4)
assert scalesvsbatch == pytest.approx([2.0] * 8, 1e-4)
assert batch_data == [{"range": (1, 10), "id": 0}, {"range": (101, 110), "id": 1}]
def test_batch_dependent_properties(batch_array, data_array):
"""Test the interface function that manages the calculations."""
Is = data_array.customized_copy()
Is.set_sigmas(flex.double(9, 2.0))
bins, rmerge, isigi, scalesvsbatch = batch_dependent_properties(
batch_array, Is, scales=data_array
)
assert bins == expected_results["bins"]
assert rmerge == pytest.approx(expected_results["rmergevb"], 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"], 1e-4)
assert scalesvsbatch == pytest.approx(expected_results["svb"], 1e-4)
# test for no scales given
bins, rmerge, isigi, scalesvsbatch = batch_dependent_properties(batch_array, Is)
assert bins == expected_results["bins"]
assert rmerge == pytest.approx(expected_results["rmergevb"], 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"], 1e-4)
assert scalesvsbatch is None
# test for bad input - batches should reduce for all data
Is.set_sigmas(flex.double([2.0] * 8 + [-1.0] * 1))
bins, rmerge, isigi, scalesvsbatch = batch_dependent_properties(
batch_array, Is, scales=data_array
)
assert bins == expected_results["bins"][0:3]
assert rmerge == pytest.approx(expected_results["rmergevb"][0:3], 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"][0:3], 1e-4)
assert scalesvsbatch == pytest.approx(expected_results["svb"][0:3], 1e-4)
bins, rmerge, isigi, scalesvsbatch = batch_dependent_properties(batch_array, Is)
assert bins == expected_results["bins"][0:3]
assert rmerge == pytest.approx(expected_results["rmergevb"][0:3], 1e-4)
assert isigi == pytest.approx(expected_results["isigivb"][0:3], 1e-4)
assert scalesvsbatch is None
# test for mismatched array sizes
with pytest.raises(AssertionError):
_ = batch_dependent_properties(batch_array, Is, data_array[0:-1])
with pytest.raises(AssertionError):
_ = batch_dependent_properties(batch_array[0:-1], Is)
def test_table_1_summary(dials_data):
location = dials_data("l_cysteine_4_sweeps_scaled")
expts = load.experiment_list(location.join("scaled_20_25.expt"), check_format=False)
refls = flex.reflection_table.from_file(location.join("scaled_20_25.refl"))
# Get a miller array of real data and calculate an iotbx.merging_statistics
ma = scaled_data_as_miller_array([refls], expts)
arr, anom = merging_stats_from_scaled_array(ma)
# Test that something is returned in each case
### Case of overall statistics summary
out = table_1_summary(arr, anom)
assert out
assert all(a in out for a in ("Overall", "Low", "High"))
assert "Suggested" not in out
### Case of overall and suggested statistics summary (with anom)
out = table_1_summary(arr, anom, arr, anom)
assert out
assert all(a in out for a in ("Overall", "Suggested", "Low", "High"))
### Case of no anomalous, but with suggested as well as overall.
out = table_1_summary(arr, selected_statistics=arr)
assert out
assert all(a in out for a in ("Overall", "Suggested", "Low", "High"))
|
dials/dials
|
tests/report/test_analysis.py
|
Python
|
bsd-3-clause
| 8,850
|
[
"CRYSTAL"
] |
dbae99c6dc89739aeee56ccb6bd61cf3ce5accc779bdf466e747d6974b9ccb9b
|
#!/usr/bin/python
import os
import sys
import argparse
from argparse import RawTextHelpFormatter
import subprocess
import fileinput
CWD = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ESPRESSO_TEMAPLTE_ZIP = 'EspressoProjectTestTemplate.zip'
PROJECT_NAME_TEMPLATE_TEXT = 'PROJECT_NAME_TEMPLATE'
PKG_NAME_TEMPLATE_TEXT = 'PACKAGE_NAME_TEMPLATE'
DEFAULT_ACTIVITY_TEXT = 'DEFAULT_ACTIVITY'
class Espresso:
@staticmethod
def show_espresso_first_usage_help(test_project_path, project_name, steps_definition_path):
print('\n\n'\
'Espresso & Cucumber basic documentation\n'\
'---------------------------------------\n'\
'\n'\
'> Cucumber\n'\
'Start writing test scenarios and feature tests using Given-When-Tehn language in feature file:\n'\
'{0}\n'\
'\n'\
'> Espresso\n'\
'Implement Test Steps Definitions in file:\n'\
'{1}\n'\
'\n'\
'For Espresso library examples and documentation visit: https://code.google.com/p/android-test-kit/wiki/EspressoSamples\n'.format(
os.path.join(test_project_path, 'src', 'assets', 'features', project_name + '.feature'),
os.path.join(test_project_path, steps_definition_path, project_name + 'TestSteps.java')))
@staticmethod
def replace_in_file(original_text, new_text, file_to_replace):
if not os.path.exists(file_to_replace):
sys.exit("Error: Destination file to replace in new test project does not exist: {0}".format(file_to_replace))
for line in fileinput.input(file_to_replace, inplace=True):
print(line.replace(original_text, new_text)),
@staticmethod
def move_source_code_to_package_path(package, path):
original_path = os.path.join(path, 'src', 'java', 'test')
relative_dest_path = os.path.join('src', 'java', *package.split('.'))
destination_path = os.path.join(path, relative_dest_path, 'test')
os.makedirs(destination_path)
os.rename(original_path, destination_path)
return os.path.join(relative_dest_path, 'test')
@staticmethod
def check_test_project_dependencies(test_project_path):
# Check dependencies in project.properties
project_properties_file = os.path.join(test_project_path, 'project.properties')
properties = Espresso.read_properties(project_properties_file)
for key in properties:
if ('dir' in key or 'library.reference' in key):
dep_path = os.path.join(test_project_path, properties[key])
if not os.path.exists(dep_path):
print "\nWarning!\n Dependency path for compilation does not exist: {0}\
\n Fix dependencies path in file {1}".format(dep_path, project_properties_file)
@staticmethod
def read_properties(file_to_read):
properties = {}
for line in open(file_to_read):
#H = dict(line.strip().split('=')
if (len(line.strip()) == 0) or (line.strip()[0] == '#'): continue
properties[line.strip().split('=')[0]] = line.strip().split('=')[1]
return properties
@staticmethod
def generate_test(package, project_name, destination_path, activity_name):
if not os.path.isdir(destination_path):
sys.exit("Error: Destination path to place the new test project does not exist" +
", first create it: {0}".format(destination_path))
espresso_template_zip = os.path.abspath(os.path.join(CWD, ESPRESSO_TEMAPLTE_ZIP))
if not os.path.exists(espresso_template_zip):
sys.exit("Error: Espresso template zip file does not exist, please check: {0}".format(espresso_template_zip))
template_path = os.path.join(destination_path, 'PROJECT_NAME_TEMPLATETest')
test_project_path = os.path.join(destination_path, project_name + 'EspressoTest')
if os.path.isdir(template_path):
sys.exit("Error: Temporal destination path already exists, delete or rename it first: {0}".format(template_path))
if os.path.isdir(test_project_path):
sys.exit("Error: Destination path already exists, delete or rename it first: {0}".format(test_project_path))
print "Extracting template test project..."
cmd = ['unzip', espresso_template_zip, '-d', destination_path]
p = subprocess.Popen(cmd)
output = p.communicate()
if p.returncode != 0:
sys.exit("Error unzipping Espresso template zip: \n"
+ '$ '+ ' '.join(cmd) + "\n"
+ "Exiting.")
if not os.path.isdir(destination_path):
sys.exit("Error unzipping Espresso template zip ({0}) to {1}".format(espresso_template_zip, template_path))
print "\nConfiguring test template project..."
# Fill classes and variable names with target package and project names
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path, 'build.xml'))
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path, '.project'))
Espresso.replace_in_file(PKG_NAME_TEMPLATE_TEXT, package, os.path.join(template_path, 'AndroidManifest.xml'))
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path, 'project.properties'))
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path, '.classpath'))
Espresso.replace_in_file(PKG_NAME_TEMPLATE_TEXT, package, os.path.join(template_path,
'src', 'java', 'test', 'RunCucumberTest.java'))
Espresso.replace_in_file(PKG_NAME_TEMPLATE_TEXT, package, os.path.join(template_path,
'src', 'java', 'test', 'PROJECT_NAME_TEMPLATETestSteps.java'))
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path,
'src', 'java', 'test', 'PROJECT_NAME_TEMPLATETestSteps.java'))
if activity_name: Espresso.replace_in_file(DEFAULT_ACTIVITY_TEXT,
activity_name, os.path.join(template_path, 'src', 'java', 'test', 'PROJECT_NAME_TEMPLATETestSteps.java'))
Espresso.replace_in_file(PROJECT_NAME_TEMPLATE_TEXT, project_name, os.path.join(template_path,
'src', 'assets', 'features', PROJECT_NAME_TEMPLATE_TEXT + '.feature'))
# Rename files and folders to align with target package and project names
("path/to/current/file.foo", "path/to/new/desination/for/file.foo")
os.rename(os.path.join(template_path, 'src', 'java', 'test', 'PROJECT_NAME_TEMPLATETestSteps.java'),
os.path.join(template_path, 'src', 'java', 'test', project_name + 'TestSteps.java'))
os.rename(os.path.join(template_path, 'src', 'assets', 'features', 'PROJECT_NAME_TEMPLATE.feature'),
os.path.join(template_path, 'src', 'assets', 'features', project_name + '.feature'))
# Generate package tree from 'package' under src/java/ and move test package folder
steps_definition_path = Espresso.move_source_code_to_package_path(package, template_path)
os.rename(template_path, test_project_path)
# Check relative paths to app and libraries
Espresso.check_test_project_dependencies(test_project_path)
print "Test project template configured"
Espresso.show_espresso_first_usage_help(test_project_path, project_name, steps_definition_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create test project with Cucumber and Espresso from template\
\nFor Espresso library examples and documentation visit: https://code.google.com/p/android-test-kit/wiki/EspressoSamples',
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', help='The action to perform (e.g. \'generate\' or just \'g\')')
parser.add_argument('element', help='The element to perform the action (e.g. \'test\' or just \'t\')')
parser.add_argument('package', help='The package of the target application to test (e.g. com.tomtom.pnd.firstrunwizard)')
parser.add_argument('-d', '--destination-path', dest='destination_path', default=os.path.abspath(os.path.dirname('.')),
help='Path inside which the test project will be created and placed (e.g. .../MyAppProject/test)')
parser.add_argument('-p', '--project-name', dest='project_name',
help='Name of the project to test (last part of package name is used by default)')
parser.add_argument('-a', '--default-activity', dest='activity',
help='Name of the main activity to test (test will instrument this activity as starting point, e.g. HomeActivity)')
# Parse the arguments
options = parser.parse_args()
if not ((options.action == 'generate') or (options.action == 'g')):
sys.exit("Error: Unrecognized or missing action (e.g. generate)")
if not ((options.element == 'test') or (options.element == 't')):
sys.exit("Error: Unrecognized or missing element (e.g. test)")
if not (options.package):
sys.exit("Error: Missing input package to test")
if not options.project_name:
options.project_name = options.package.split('.')[-1]
if (len(options.project_name) > 1):
options.project_name = options.project_name[0].upper() + options.project_name[1:]
options.destination_path = os.path.abspath(options.destination_path)
Espresso.generate_test(options.package, options.project_name, options.destination_path, options.activity)
|
neoranga55/espresso-cucumber
|
espresso-test-lib/espresso.py
|
Python
|
apache-2.0
| 9,644
|
[
"ESPResSo",
"VisIt"
] |
aadd0e7e334b2989409ed016b3c0b71123d181f1e8913d3d34149013d35eb8dc
|
# Copyright (C) 2016-2018 The ESPResSo project
# Copyright (C) 2014 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates gen_pxiconfig.cpp, which in turn generates myconfig.pxi.
#
import inspect
import sys
import os
# find featuredefs.py
moduledir = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(os.path.join(moduledir, '..', '..', 'config'))
import featuredefs
if len(sys.argv) != 3:
print("Usage: {} DEFFILE CPPFILE".format(sys.argv[0]), file=sys.stderr)
exit(2)
deffilename, cfilename = sys.argv[1:3]
print("Reading definitions from " + deffilename + "...")
defs = featuredefs.defs(deffilename)
print("Done.")
# generate cpp-file
print("Writing " + cfilename + "...")
cfile = open(cfilename, 'w')
cfile.write("""
#include "config.hpp"
#include <iostream>
using namespace std;
int main() {
cout << "# This file was autogenerated." << endl
<< "# Do not modify it or your changes will be overwritten!" << endl;
""")
template = """
#ifdef {0}
cout << "DEF {0} = 1" << endl;
#else
cout << "DEF {0} = 0" << endl;
#endif
"""
for feature in defs.allfeatures:
cfile.write(template.format(feature))
cfile.write("""
}
""")
cfile.close()
print("Done.")
|
mkuron/espresso
|
src/python/espressomd/gen_pxiconfig.py
|
Python
|
gpl-3.0
| 1,845
|
[
"ESPResSo"
] |
12ffb1c7a776a768accfbf7b19684225db3f24298fd050908e353f8242f1effe
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBiocneighbors(RPackage):
"""Nearest Neighbor Detection for Bioconductor Packages.
Implements exact and approximate methods for nearest neighbor detection,
in a framework that allows them to be easily switched within
Bioconductor packages or workflows. Exact searches can be performed
using the k-means for k-nearest neighbors algorithm or with vantage
point trees. Approximate searches can be performed using the Annoy or
HNSW libraries. Searching on either Euclidean or Manhattan distances is
supported. Parallelization is achieved for all methods by using
BiocParallel. Functions are also provided to search for all neighbors
within a given distance."""
homepage = "https://bioconductor.org/packages/BiocNeighbors"
git = "https://git.bioconductor.org/packages/BiocNeighbors.git"
version('1.2.0', commit='f754c6300f835142536a4594ddf750481e0fe273')
version('1.0.0', commit='e252fc04b6d22097f2c5f74406e77d85e7060770')
depends_on('[email protected]:', when='@1.0.0', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-rcppannoy', type=('build', 'run'))
depends_on('r-biocgenerics', when='@1.2.0:', type=('build', 'run'))
depends_on('r-rcpphnsw', when='@1.2.0:', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/r-biocneighbors/package.py
|
Python
|
lgpl-2.1
| 1,650
|
[
"Bioconductor"
] |
336cb261278ec65b2bda5c3700e26e9dce74539dd38f87cb6487dcb61ce48ba8
|
"""Helper functions for formating and rendering units."""
from typing import Optional, Union
import libsbml
import numpy as np
import pint
from sbmlutils.console import console
ureg = pint.UnitRegistry()
ureg.define("item = dimensionless")
ureg.define("avogadro = 6.02214179E23 dimensionless")
Q_ = ureg.Quantity
short_names = {
"metre": "m",
"meter": "m",
"liter": "l",
"litre": "l",
"dimensionless": "-",
"second": "s",
}
def udef_to_string(
udef: Optional[Union[libsbml.UnitDefinition, str]],
model: Optional[libsbml.Model] = None,
format: str = "latex",
) -> Optional[str]:
"""Render formatted string for units.
Format can be either 'str' or 'latex'
Units have the general format
(multiplier * 10^scale *ukind)^exponent
(m * 10^s *k)^e
Returns None if udef is None or no units in UnitDefinition.
:param udef: unit definition which is to be converted to string
"""
if udef is None:
return None
ud: libsbml.UnitDefinition
if isinstance(udef, str):
# check for internal unit
if libsbml.UnitKind_forName(udef) != libsbml.UNIT_KIND_INVALID:
return short_names.get(udef, udef)
else:
ud = model.getUnitDefinition(udef) # type: ignore
else:
ud = udef
# collect nominators and denominators
nom: str = ""
denom: str = ""
if ud:
for u in ud.getListOfUnits():
m = u.getMultiplier()
s: int = u.getScale()
e = u.getExponent()
k = libsbml.UnitKind_toString(u.getKind())
# (m * 10^s *k)^e
# parse with pint
term = Q_(float(m) * 10**s, k) ** float(abs(e))
try:
term = term.to_compact()
except KeyError:
pass
if np.isclose(term.magnitude, 1.0):
term = Q_(1, term.units)
us = f"{term:~}" # short formating
# handle min and hr
us = us.replace("60.0 s", "1 min")
us = us.replace("3600.0 s", "1 hr")
us = us.replace("3.6 ks", "1 hr")
us = us.replace("86.4 ks", "1 day")
us = us.replace("10.0 mm", "1 cm")
# remove 1.0 prefixes
us = us.replace("1 ", "")
# exponent
us = us.replace(" ** ", "^")
if e >= 0.0:
nom = us if nom == "" else f"{nom}*{us}"
else:
denom = us if denom == "" else f"{denom}*{us}"
else:
nom = "-"
if format == "str":
denom = denom.replace("*", "/")
if nom and denom:
ustr = f"{nom}/{denom}"
elif nom and not denom:
ustr = nom
elif not nom and denom:
ustr = f"1/{denom}"
elif not nom and not denom:
ustr = "-"
elif format == "latex":
nom = nom.replace("*", " \\cdot ")
denom = denom.replace("*", " \\cdot ")
if nom and denom:
ustr = f"\\frac{{{nom}}}{{{denom}}}"
elif nom and not denom:
ustr = nom
elif not nom and denom:
ustr = f"\\frac{{1}}{{{denom}}}"
elif not nom and not denom:
ustr = "-"
else:
raise ValueError
if ustr == "1":
ustr = "-"
# json escape
return ustr
if __name__ == "__main__":
import libsbml
from sbmlutils.factory import *
doc: libsbml.SBMLDocument = libsbml.SBMLDocument()
model: libsbml.Model = doc.createModel()
for (key, definition, _, _) in [
# ("mmole_per_min", "mmole/min", "str", "mmol/min"),
# ("m3", "meter^3", "str", "m^3"),
# ("m3", "meter^3/second", "str", "m^3/s"),
# ("mM", "mmole/liter", "str", "mmol/l"),
# ("ml_per_s_kg", "ml/s/kg", "str", "ml/s/kg"),
# ("dimensionless", "dimensionless", "str", "dimensionless"),
("item", "item", "str", "item"),
# ("mM", "mmole/min", "latex", "\\frac{mmol}/{min}"),
]:
ud = UnitDefinition(key, definition=definition)
# ud = UnitDefinition("item")
udef: libsbml.UnitDefinition = ud.create_sbml(model=model)
console.rule()
console.print(udef)
console.print(udef_to_string(udef, format="str"))
console.print(udef_to_string(udef, format="latex"))
|
matthiaskoenig/sbmlutils
|
src/sbmlutils/report/units.py
|
Python
|
lgpl-3.0
| 4,366
|
[
"Avogadro"
] |
7fc8654312fd756a6dae31ce90122ac86a48c9e9832fc4b9cc450678f24f7b5c
|
#! /usr/bin/env python
#coding=utf-8
""" DESCRIPTION
"""
import glob, sys, csv
from tabulate import tabulate
from Bio.Blast.Applications import NcbiblastnCommandline
"""--- FUNCTIONS ---"""
def carga_csv(file_name):
""" creates a list of lists with a csv file """
tabla = list()
archivo = open(file_name+'.csv',"rU")
csvreader = csv.reader(archivo, dialect=csv.excel_tab, delimiter = ',')
for row in csvreader:
tabla.append(row)
return tabla
def crea_comparacion(tabla_ref, estructura = 'star', comparacion = 'uni'):
""" creates comparisons lists (code) depending on arguments """
lista = []
tabla = list(tabla_ref)
if estructura == 'star':
nodo = tabla.pop(0)
print nodo
for organismo in tabla:
lista.append([nodo[1],organismo[1]])
if comparacion == 'bi':
lista.append([organismo[1], nodo[1]])
else:
comps = estructura.split(',')
for comp in comps:
pareja = comp.split('-')
query = tabla[int(pareja[0])][1]
db = tabla[int(pareja[1])][1]
lista.append([query, db])
if comparacion == 'bi':
lista.append([db, query])
return lista
def imprime_comparacion(listas):
""" prints the comparison as a readable format"""
print 'COMPARISONS\n-----------\n'
for lista in listas:
print lista[0] + ' --> ' + lista[1]
print '\n'
def imprime_referencia(claves):
""" prints the comparison as a readable format"""
print 'REFERENCE\n---------'
n = 0
for key, val in claves.items():
print n, '. ', key, '\t', val
n=n+1
print '\n'
def crea_diccionario(tabla):
""" creates a dictionary of code:organism"""
diccionario={}
for row in tabla:
diccionario[row[1]]=row[0]
return diccionario
"""--- PROGRAM BODY ---"""
print '----------------\nBLAST EVALUATION\n----------------'
blast_eval = 1e-05
comparison_list = []
# charge csv file
nombre_csv = raw_input('Please enter the CSV file name: ')
organismos = carga_csv(nombre_csv)
referencia = crea_diccionario(organismos)
comparison_list = crea_comparacion(organismos)
# present csv data
print '\nCSV data\n--------'
print tabulate(organismos, headers=["Organism","Code", "Genome File", "Database folder"]) + '\n'
# present options: blast parameters, comparison parameters, run
while 1:
imprime_referencia(referencia)
imprime_comparacion(comparison_list)
print 'CHOOSE AN OPTION\n----------------\n1) Comparisons\n2) Run\n3) Quit'
user_in = raw_input('Option: ')
if user_in == '1':
imprime_referencia(referencia)
print ('Please enter the comparisons using the organism index.\n' +
'Format: "-" between indices; "," between comparisons; no spaces.\n')
nueva_comparacion = raw_input('Comparisons: ')
print 'Choose "bi" for bidirectional or "uni" for unidirectional; no quotation marks.'
tipo_comparacion = raw_input('Direction: ')
comparison_list = crea_comparacion(organismos, nueva_comparacion, tipo_comparacion)
elif user_in == '2':
blast_eval = raw_input('\nPlease write the desired E value for BLAST runs; 1e-5 suggested.\nE_value: ')
print '\nBLAST+ commands to be runned...\n'
break
elif user_in == '3': quit()
else: print ('Incorrect option, try again.\n')
# create commands for comparisons
comandos = []
for pair in comparison_list:
nombre = referencia[pair[0]].split()
comandos.append([(nombre[0]+'_'+nombre[1]+'.fasta'), ('db_'+pair[1]+'/db_'+pair[1]), (pair[0]+'_'+pair[1]+'.xml')])
print tabulate(comandos, headers=["Genome file","Database", "Product file"]) + '\n'
raw_input('Press ENTER to continue')
# run commands, inform data created
# qseqid sseqid pident length mismatch gapopen qstart qend sstart send
evalue bitscore
for comando in comandos:
blastn_cline = NcbiblastnCommandline(query=comando[0], db=comando[1], evalue=blast_eval ,outfmt=5, out=comando[2])
print 'File ' + comando[2] + ' is currently in progess...'
stdout, stderr = blastn_cline()
print 'WORK COMPLETED\n--------------'
|
fernan9/LANGEBIO-Internship
|
BLAST_tricho_run.py
|
Python
|
gpl-2.0
| 4,183
|
[
"BLAST"
] |
ce21af6f5587d29898369174fc8c457771be50acd35dbead0da4afdef121e508
|
# -*- coding: iso-8859-1 -*-
############################################################
#Example 1: Simple basin hopping
############################################################
import numpy as np
import pygmin.potentials.lj as lj
import pygmin.basinhopping as bh
from pygmin.takestep import displace
natoms = 12
# random initial coordinates
coords=np.random.random(3*natoms)
potential = lj.LJ()
step = displace.RandomDisplacement(stepsize=0.5)
opt = bh.BasinHopping(coords, potential, takeStep=step)
opt.run(100)
# some visualization
try:
import pygmin.utils.pymolwrapper as pym
pym.start()
pym.draw_spheres(opt.coords, "A", 1)
except:
print "Could not draw using pymol, skipping this step"
|
js850/PyGMIN
|
examples/basinhopping_no_system_class/1_basic.py
|
Python
|
gpl-3.0
| 721
|
[
"PyMOL"
] |
bb86eaad84c4a7582c7ba95a4ec7b7c8d5cf2cc867312a9b87cc46dbc1845cec
|
"""
Tests for discussion pages
"""
import datetime
from pytz import UTC
from uuid import uuid4
from nose.plugins.attrib import attr
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
)
from .helpers import BaseDiscussionMixin
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_1')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('shard_1')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_1')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .display-vote'))
@attr('shard_1')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_1')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should be voted successfully
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should be reported successfully
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.vote_response('response_self_author')
page.vote_response('response_other_author')
page.report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('shard_1')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_1')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_module(self):
"""
Scenario: Two discussion module in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion module new post button
Then it should add new post form of that module in DOM
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
And I click on second discussion module new post button
Then it should add new post form of second module in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion module new post form
And I have two new post form in the DOM
When I click back on first module new post button
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_1')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
@attr('shard_1')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_1')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
beni55/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 34,315
|
[
"VisIt"
] |
fd2e3d6373965d5a71ddfbad54f5aa7301ead216f3ea2a580efaf54541ca32c0
|
#####################################################################
# File: ReqManagerHandler.py
########################################################################
"""
:mod: ReqManagerHandler
.. module: ReqManagerHandler
:synopsis: Implementation of the RequestDB service in the DISET framework
"""
__RCSID__ = "$Id$"
# # imports
import json
import datetime
import math
from types import DictType, IntType, LongType, ListType, StringTypes
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
# # from RMS
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.RequestManagementSystem.DB.RequestDB import RequestDB
class ReqManagerHandler( RequestHandler ):
"""
.. class:: ReqManagerHandler
RequestDB interface in the DISET framework.
"""
# # request validator
__validator = None
# # request DB instance
__requestDB = None
@classmethod
def initializeHandler( cls, serviceInfoDict ):
""" initialize handler """
try:
cls.__requestDB = RequestDB()
except RuntimeError, error:
gLogger.exception( error )
return S_ERROR( error )
# # create tables for empty db
return cls.__requestDB.createTables()
# # helper functions
@classmethod
def validate( cls, request ):
""" request validation """
if not cls.__validator:
cls.__validator = RequestValidator()
return cls.__validator.validate( request )
types_getRequestIDForName = [ StringTypes ]
@classmethod
def export_getRequestIDForName( cls, requestName ):
""" get requestID for given :requestName: """
if type( requestName ) in StringTypes:
result = cls.__requestDB.getRequestIDForName( requestName )
if not result["OK"]:
return result
requestID = result["Value"]
return S_OK( requestID )
types_cancelRequest = [ ( IntType, LongType ) ]
@classmethod
def export_cancelRequest( cls , requestID ):
""" Cancel a request """
return cls.__requestDB.cancelRequest( requestID )
types_putRequest = [ StringTypes ]
@classmethod
def export_putRequest( cls, requestJSON ):
""" put a new request into RequestDB
:param cls: class ref
:param str requestJSON: request serialized to JSON format
"""
requestDict = json.loads( requestJSON )
requestName = requestDict.get( "RequestID", requestDict.get( 'RequestName', "***UNKNOWN***" ) )
request = Request( requestDict )
optimized = request.optimize()
if optimized.get( "Value", False ):
gLogger.debug( "putRequest: request was optimized" )
else:
gLogger.debug( "putRequest: request unchanged", optimized.get( "Message", "Nothing could be optimized" ) )
valid = cls.validate( request )
if not valid["OK"]:
gLogger.error( "putRequest: request %s not valid: %s" % ( requestName, valid["Message"] ) )
return valid
# If NotBefore is not set or user defined, we calculate its value
now = datetime.datetime.utcnow().replace( microsecond = 0 )
extraDelay = datetime.timedelta( 0 )
if request.Status not in Request.FINAL_STATES and ( not request.NotBefore or request.NotBefore < now ) :
op = request.getWaiting().get( 'Value' )
# If there is a waiting Operation with Files
if op and len( op ):
attemptList = [ opFile.Attempt for opFile in op if opFile.Status == "Waiting" ]
if attemptList:
maxWaitingAttempt = max( [ opFile.Attempt for opFile in op if opFile.Status == "Waiting" ] )
# In case it is the first attempt, extraDelay is 0
# maxWaitingAttempt can be None if the operation has no File, like the ForwardDiset
extraDelay = datetime.timedelta( minutes = 2 * math.log( maxWaitingAttempt ) if maxWaitingAttempt else 0 )
request.NotBefore = now + extraDelay
gLogger.info( "putRequest: request %s not before %s (extra delay %s)" % ( request.RequestName, request.NotBefore, extraDelay ) )
requestName = request.RequestName
gLogger.info( "putRequest: Attempting to set request '%s'" % requestName )
return cls.__requestDB.putRequest( request )
types_getScheduledRequest = [ ( IntType, LongType ) ]
@classmethod
def export_getScheduledRequest( cls , operationID ):
""" read scheduled request given operationID """
scheduled = cls.__requestDB.getScheduledRequest( operationID )
if not scheduled["OK"]:
gLogger.error( "getScheduledRequest: %s" % scheduled["Message"] )
return scheduled
if not scheduled["Value"]:
return S_OK()
requestJSON = scheduled["Value"].toJSON()
if not requestJSON["OK"]:
gLogger.error( "getScheduledRequest: %s" % requestJSON["Message"] )
return requestJSON
types_getDBSummary = []
@classmethod
def export_getDBSummary( cls ):
""" Get the summary of requests in the Request DB """
return cls.__requestDB.getDBSummary()
types_getRequest = [ ( LongType, IntType ) ]
@classmethod
def export_getRequest( cls, requestID = 0 ):
""" Get a request of given type from the database """
getRequest = cls.__requestDB.getRequest( requestID )
if not getRequest["OK"]:
gLogger.error( "getRequest: %s" % getRequest["Message"] )
return getRequest
if getRequest["Value"]:
getRequest = getRequest["Value"]
toJSON = getRequest.toJSON()
if not toJSON["OK"]:
gLogger.error( toJSON["Message"] )
return toJSON
return S_OK()
types_getBulkRequests = [ IntType ]
@classmethod
def export_getBulkRequests( cls, numberOfRequest = 10 ):
""" Get a request of given type from the database
:param numberOfRequest : size of the bulk (default 10)
:return S_OK( {Failed : message, Successful : list of Request.toJSON()} )
"""
getRequests = cls.__requestDB.getBulkRequests( numberOfRequest )
if not getRequests["OK"]:
gLogger.error( "getRequests: %s" % getRequests["Message"] )
return getRequests
if getRequests["Value"]:
getRequests = getRequests["Value"]
toJSONDict = {"Successful" : {}, "Failed" : {}}
for rId in getRequests:
toJSON = getRequests[rId].toJSON()
if not toJSON["OK"]:
gLogger.error( toJSON["Message"] )
toJSONDict["Failed"][rId] = toJSON["Message"]
else:
toJSONDict["Successful"][rId] = toJSON["Value"]
return S_OK( toJSONDict )
return S_OK()
types_peekRequest = [ ( LongType, IntType ) ]
@classmethod
def export_peekRequest( cls, requestID = 0 ):
""" peek request given its id """
peekRequest = cls.__requestDB.peekRequest( requestID )
if not peekRequest["OK"]:
gLogger.error( "peekRequest: %s" % peekRequest["Message"] )
return peekRequest
if peekRequest["Value"]:
peekRequest = peekRequest["Value"].toJSON()
if not peekRequest["OK"]:
gLogger.error( peekRequest["Message"] )
return peekRequest
types_getRequestSummaryWeb = [ DictType, ListType, IntType, IntType ]
@classmethod
def export_getRequestSummaryWeb( cls, selectDict, sortList, startItem, maxItems ):
""" Returns a list of Request for the web portal
:param dict selectDict: parameter on which to restrain the query {key : Value}
key can be any of the Request columns, 'Type' (interpreted as Operation.Type)
and 'FromData' and 'ToData' are matched against the LastUpdate field
:param list sortList: [sorting column, ASC/DESC]
:param int startItem: start item (for pagination)
:param int maxItems: max items (for pagination)
"""
return cls.__requestDB.getRequestSummaryWeb( selectDict, sortList, startItem, maxItems )
types_getDistinctValuesWeb = [ StringTypes ]
@classmethod
def export_getDistinctValuesWeb( cls, attribute ):
""" Get distinct values for a given request attribute. 'Type' is interpreted as
the operation type """
tableName = 'Request'
if attribute == 'Type':
tableName = 'Operation'
return cls.__requestDB.getDistinctValues( tableName, attribute )
types_getRequestCountersWeb = [ StringTypes, DictType ]
@classmethod
def export_getRequestCountersWeb( cls, groupingAttribute, selectDict ):
""" For the web portal.
Returns a dictionary {value : counts} for a given key.
The key can be any field from the RequestTable. or "Type",
which will be interpreted as 'Operation.Type'
:param groupingAttribute : attribute used for grouping
:param selectDict : selection criteria
"""
return cls.__requestDB.getRequestCountersWeb( groupingAttribute, selectDict )
types_deleteRequest = [ ( IntType, LongType ) ]
@classmethod
def export_deleteRequest( cls, requestID ):
""" Delete the request with the supplied ID"""
return cls.__requestDB.deleteRequest( requestID )
types_getRequestIDsList = [ ListType, IntType, StringTypes ]
@classmethod
def export_getRequestIDsList( cls, statusList = None, limit = None, since = None, until = None ):
""" get requests' IDs with status in :statusList: """
statusList = statusList if statusList else list( Request.FINAL_STATES )
limit = limit if limit else 100
since = since if since else ""
until = until if until else ""
reqIDsList = cls.__requestDB.getRequestIDsList( statusList, limit, since = since, until = until )
if not reqIDsList["OK"]:
gLogger.error( "getRequestIDsList: %s" % reqIDsList["Message"] )
return reqIDsList
types_getRequestIDsForJobs = [ ListType ]
@classmethod
def export_getRequestIDsForJobs( cls, jobIDs ):
""" Select the request IDs for supplied jobIDs """
return cls.__requestDB.getRequestIDsForJobs( jobIDs )
types_readRequestsForJobs = [ ListType ]
@classmethod
def export_readRequestsForJobs( cls, jobIDs ):
""" read requests for jobs given list of jobIDs """
requests = cls.__requestDB.readRequestsForJobs( jobIDs )
if not requests["OK"]:
gLogger.error( "readRequestsForJobs: %s" % requests["Message"] )
return requests
for jobID, request in requests["Value"]["Successful"].items():
requests["Value"]["Successful"][jobID] = request.toJSON()["Value"]
return requests
types_getDigest = [ ( IntType, LongType ) ]
@classmethod
def export_getDigest( cls, requestID ):
""" get digest for a request given its id
:param str requestID: request's id
:return: S_OK( json_str )
"""
return cls.__requestDB.getDigest( requestID )
types_getRequestStatus = [ ( IntType, LongType ) ]
@classmethod
def export_getRequestStatus( cls, requestID ):
""" get request status given its id """
status = cls.__requestDB.getRequestStatus( requestID )
if not status["OK"]:
gLogger.error( "getRequestStatus: %s" % status["Message"] )
return status
types_getRequestFileStatus = [ [ IntType, LongType ], list( StringTypes ) + [ListType] ]
@classmethod
def export_getRequestFileStatus( cls, requestID, lfnList ):
""" get request file status for a given LFNs list and requestID """
if type( lfnList ) == str:
lfnList = [lfnList]
res = cls.__requestDB.getRequestFileStatus( requestID, lfnList )
if not res["OK"]:
gLogger.error( "getRequestFileStatus: %s" % res["Message"] )
return res
# types_getRequestName = [ ( IntType, LongType ) ]
# @classmethod
# def export_getRequestName( cls, requestID ):
# """ get request name for a given requestID """
# requestName = cls.__requestDB.getRequestName( requestID )
# if not requestName["OK"]:
# gLogger.error( "getRequestName: %s" % requestName["Message"] )
# return requestName
types_getRequestInfo = [ [ IntType, LongType ] ]
@classmethod
def export_getRequestInfo( cls, requestID ):
""" get request info for a given requestID """
requestInfo = cls.__requestDB.getRequestInfo( requestID )
if not requestInfo["OK"]:
gLogger.error( "getRequestInfo: %s" % requestInfo["Message"] )
return requestInfo
|
marcelovilaca/DIRAC
|
RequestManagementSystem/Service/ReqManagerHandler.py
|
Python
|
gpl-3.0
| 12,159
|
[
"DIRAC"
] |
49dc6feb433cce415840ea15c347c19ddda484ddc574e2658f4cb12ed84a7329
|
import glob
import os
import shutil
from pathlib import Path
from django.conf import settings
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from ncdjango.models import Service, Variable
from netCDF4 import Dataset
from pyproj import Proj
from trefoil.netcdf.describe import describe
from trefoil.netcdf.variable import SpatialCoordinateVariables
from trefoil.render.renderers.stretched import StretchedRenderer
from trefoil.utilities.color import Color
SERVICE_DIR = getattr(settings, 'NC_SERVICE_DATA_ROOT', 'data/ncdjango/services')
class Command(BaseCommand):
help = (
'Publish a NetCDF dataset as a map service (use `populate_climate_data` for publishing climate variable ' +
'datasets)'
)
def add_arguments(self, parser):
parser.add_argument('datasets', nargs='+', type=str)
parser.add_argument('-d', '--directory', default=None, type=str)
parser.add_argument('--overwrite', action='store_true', dest='overwrite')
def handle(self, datasets, directory, overwrite, *args, **options):
old_files = []
for dataset in datasets:
filename = os.path.basename(dataset)
name = os.path.splitext(filename)[0]
if directory is not None:
filename = '{}/{}'.format(directory.strip('/'), filename)
name = '{}/{}'.format(directory.strip('/'), name)
with transaction.atomic():
existing = Service.objects.filter(name__iexact=name)
if existing.exists():
if overwrite:
old_files.append(os.path.join(SERVICE_DIR, existing.get().data_path))
existing.delete()
else:
raise CommandError("A service named '{}' already exists".format(name))
with Dataset(dataset, 'r') as ds:
variables = []
x_dimension = None
y_dimension = None
projection = None
desc = describe(ds)
for variable, variable_info in desc['variables'].items():
if 'spatial_grid' in variable_info:
variables.append(variable)
spatial_grid = variable_info['spatial_grid']
x_dimension = spatial_grid['x_dimension']
y_dimension = spatial_grid['y_dimension']
projection = Proj(variable_info['proj4'])
if not variables:
raise CommandError('No usable variables found')
coords = SpatialCoordinateVariables.from_dataset(
ds, x_dimension, y_dimension, projection=projection
)
service = Service.objects.create(
name=name,
data_path=filename,
projection=coords.projection,
full_extent=coords.bbox,
initial_extent=coords.bbox
)
for variable in variables:
Variable.objects.create(
service=service,
index=0,
variable=variable,
projection=projection,
x_dimension=x_dimension,
y_dimension=y_dimension,
name=variable,
renderer=StretchedRenderer(
[(variable_info['min'], Color(0, 0, 0)), (variable_info['max'], Color(255, 255, 255))]
),
full_extent=coords.bbox
)
print('Added {}...'.format(name))
for path in old_files:
if os.path.exists(path):
os.remove(path)
for dataset in datasets:
target_dir = Path(SERVICE_DIR) / (directory or '')
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.copy(dataset, target_dir)
|
consbio/seedsource-core
|
seedsource_core/django/seedsource/management/commands/publish_netcdf.py
|
Python
|
bsd-3-clause
| 4,185
|
[
"NetCDF"
] |
d8933d7145d1e4f7f69ae6c139d83f9695daa5f447bc826198ca568d440a25a1
|
"""
Migration script to add a 'tool_version' column to the hda/ldda tables.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from galaxy.model.custom_types import *
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
try:
hda_table = Table( "history_dataset_association", metadata, autoload=True )
c = Column( "tool_version", TEXT )
c.create( hda_table )
assert c is hda_table.c.tool_version
ldda_table = Table( "library_dataset_dataset_association", metadata, autoload=True )
c = Column( "tool_version", TEXT )
c.create( ldda_table )
assert c is ldda_table.c.tool_version
except Exception, e:
print "Adding the tool_version column to the hda/ldda tables failed: ", str( e )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
hda_table = Table( "history_dataset_association", metadata, autoload=True )
hda_table.c.tool_version.drop()
ldda_table = Table( "library_dataset_dataset_association", metadata, autoload=True )
ldda_table.c.tool_version.drop()
except Exception, e:
print "Dropping the tool_version column from hda/ldda table failed: ", str( e )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0081_add_tool_version_to_hda_ldda.py
|
Python
|
gpl-3.0
| 1,370
|
[
"Galaxy"
] |
32aa62d285e5b2d8d5d704339e3b9a1079e500f56ae9d7d28d8edd18d37e5d03
|
import functools
def resolve_internal(tree):
""" Given a tree, replace any nodes with an 'href' attribute
with the node with a corresponding 'id'. """
# get all the nodes with an id attribute
id_map = {}
id_visitor = functools.partial(get_id_nodes, id_map)
tree.visit(id_visitor)
# get all the nodes with an href attribute
href_nodes = []
href_visitor = functools.partial(get_href_nodes, href_nodes)
tree.visit(href_visitor)
# replace each node in the href map with a node in the id map
for href_node in href_nodes:
key = href_node.attributes["href"].strip('#')
# grab the id node associated with the href node
id_node = id_map.get(key, None)
if id_node is None:
continue
# since this node is used by-reference, we'll remove it from where it's defined
if id_node.parent is not None:
id_node.parent.remove_child(id_node)
id_node.parent = None
new_node = id_node.clone()
# replace the href_node with the id_node
href_node.parent.replace_child(href_node, new_node)
new_node.parent = href_node.parent
href_node.parent = None
def get_id_nodes(memory, node):
""" If the node has an 'id' attribute, record it here. """
node_id = node.attributes.get('id', None)
if node_id is not None:
memory[node_id] = node
def get_href_nodes(memory, node):
node_href = node.attributes.get('href', None)
if node_href is not None:
memory.append(node)
|
annegentle/wadl2rst
|
wadl2rst/transformations/resolve_internal.py
|
Python
|
apache-2.0
| 1,553
|
[
"VisIt"
] |
d874f4640a4f8e4a2021ab87e656b15910491f07b96528d5745adf50dc0855b5
|
# coding: utf-8
from __future__ import unicode_literals
import hmac
import hashlib
import base64
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
js_to_json,
mimetype2ext,
parse_iso8601,
remove_start,
)
class NYTimesBaseIE(InfoExtractor):
_SECRET = b'pX(2MbU2);4N{7J8)>YwKRJ+/pQ3JkiU2Q^V>mFYv6g6gYvt6v'
def _extract_video_from_id(self, video_id):
# Authorization generation algorithm is reverse engineered from `signer` in
# http://graphics8.nytimes.com/video/vhs/vhs-2.x.min.js
path = '/svc/video/api/v3/video/' + video_id
hm = hmac.new(self._SECRET, (path + ':vhs').encode(), hashlib.sha512).hexdigest()
video_data = self._download_json('http://www.nytimes.com' + path, video_id, 'Downloading video JSON', headers={
'Authorization': 'NYTV ' + base64.b64encode(hm.encode()).decode(),
'X-NYTV': 'vhs',
}, fatal=False)
if not video_data:
video_data = self._download_json(
'http://www.nytimes.com/svc/video/api/v2/video/' + video_id,
video_id, 'Downloading video JSON')
title = video_data['headline']
def get_file_size(file_size):
if isinstance(file_size, int):
return file_size
elif isinstance(file_size, dict):
return int(file_size.get('value', 0))
else:
return None
urls = []
formats = []
for video in video_data.get('renditions', []):
video_url = video.get('url')
format_id = video.get('type')
if not video_url or format_id == 'thumbs' or video_url in urls:
continue
urls.append(video_url)
ext = mimetype2ext(video.get('mimetype')) or determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id or 'hls', fatal=False))
elif ext == 'mpd':
continue
# formats.extend(self._extract_mpd_formats(
# video_url, video_id, format_id or 'dash', fatal=False))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'vcodec': video.get('videoencoding') or video.get('video_codec'),
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'filesize': get_file_size(video.get('file_size') or video.get('fileSize')),
'tbr': int_or_none(video.get('bitrate'), 1000),
'ext': ext,
})
self._sort_formats(formats)
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': 'http://www.nytimes.com/' + image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
publication_date = video_data.get('publication_date')
timestamp = parse_iso8601(publication_date[:-8]) if publication_date else None
return {
'id': video_id,
'title': title,
'description': video_data.get('summary'),
'timestamp': timestamp,
'uploader': video_data.get('byline'),
'duration': float_or_none(video_data.get('duration'), 1000),
'formats': formats,
'thumbnails': thumbnails,
}
class NYTimesIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:(?:www\.)?nytimes\.com/video/(?:[^/]+/)+?|graphics8\.nytimes\.com/bcvideo/\d+(?:\.\d+)?/iframe/embed\.html\?videoId=)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nytimes.com/video/opinion/100000002847155/verbatim-what-is-a-photocopier.html?playlistId=100000001150263',
'md5': 'd665342765db043f7e225cff19df0f2d',
'info_dict': {
'id': '100000002847155',
'ext': 'mov',
'title': 'Verbatim: What Is a Photocopier?',
'description': 'md5:93603dada88ddbda9395632fdc5da260',
'timestamp': 1398631707,
'upload_date': '20140427',
'uploader': 'Brett Weiner',
'duration': 419,
}
}, {
'url': 'http://www.nytimes.com/video/travel/100000003550828/36-hours-in-dubai.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_video_from_id(video_id)
class NYTimesArticleIE(NYTimesBaseIE):
_VALID_URL = r'https?://(?:www\.)?nytimes\.com/(.(?<!video))*?/(?:[^/]+/)*(?P<id>[^.]+)(?:\.html)?'
_TESTS = [{
'url': 'http://www.nytimes.com/2015/04/14/business/owner-of-gravity-payments-a-credit-card-processor-is-setting-a-new-minimum-wage-70000-a-year.html?_r=0',
'md5': 'e2076d58b4da18e6a001d53fd56db3c9',
'info_dict': {
'id': '100000003628438',
'ext': 'mov',
'title': 'New Minimum Wage: $70,000 a Year',
'description': 'Dan Price, C.E.O. of Gravity Payments, surprised his 120-person staff by announcing that he planned over the next three years to raise the salary of every employee to $70,000 a year.',
'timestamp': 1429033037,
'upload_date': '20150414',
'uploader': 'Matthew Williams',
}
}, {
'url': 'http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html',
'md5': 'e0d52040cafb07662acf3c9132db3575',
'info_dict': {
'id': '100000004709062',
'title': 'The Run-Up: ‘He Was Like an Octopus’',
'ext': 'mp3',
'description': 'md5:fb5c6b93b12efc51649b4847fe066ee4',
'series': 'The Run-Up',
'episode': '‘He Was Like an Octopus’',
'episode_number': 20,
'duration': 2130,
}
}, {
'url': 'http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html',
'info_dict': {
'id': '100000004709479',
'title': 'The Rise of Hitler',
'ext': 'mp3',
'description': 'md5:bce877fd9e3444990cb141875fab0028',
'creator': 'Pamela Paul',
'duration': 3475,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.nytimes.com/news/minute/2014/03/17/times-minute-whats-next-in-crimea/?_php=true&_type=blogs&_php=true&_type=blogs&_r=1',
'only_matching': True,
}]
def _extract_podcast_from_json(self, json, page_id, webpage):
podcast_audio = self._parse_json(
json, page_id, transform_source=js_to_json)
audio_data = podcast_audio['data']
track = audio_data['track']
episode_title = track['title']
video_url = track['source']
description = track.get('description') or self._html_search_meta(
['og:description', 'twitter:description'], webpage)
podcast_title = audio_data.get('podcast', {}).get('title')
title = ('%s: %s' % (podcast_title, episode_title)
if podcast_title else episode_title)
episode = audio_data.get('podcast', {}).get('episode') or ''
episode_number = int_or_none(self._search_regex(
r'[Ee]pisode\s+(\d+)', episode, 'episode number', default=None))
return {
'id': remove_start(podcast_audio.get('target'), 'FT') or page_id,
'url': video_url,
'title': title,
'description': description,
'creator': track.get('credit'),
'series': podcast_title,
'episode': episode_title,
'episode_number': episode_number,
'duration': int_or_none(track.get('duration')),
}
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
video_id = self._search_regex(
r'data-videoid=["\'](\d+)', webpage, 'video id',
default=None, fatal=False)
if video_id is not None:
return self._extract_video_from_id(video_id)
podcast_data = self._search_regex(
(r'NYTD\.FlexTypes\.push\s*\(\s*({.+?})\s*\)\s*;\s*</script',
r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
webpage, 'podcast data')
return self._extract_podcast_from_json(podcast_data, page_id, webpage)
|
Tithen-Firion/youtube-dl
|
youtube_dl/extractor/nytimes.py
|
Python
|
unlicense
| 8,836
|
[
"Octopus"
] |
00a78c6338c0ba0b12a4edb587e05f264492b47e1f0248aafe42796efd78e603
|
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
# in this test, I compare the results obtained from R run with H2O-3 runs using a much larger datasets to test
# multiple chunks operation.
def test_HGLM_R():
tot=1e-6
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/HGLM_5KRows_100Z.csv"),
col_types=["enum", "enum", "enum", "enum", "numeric", "numeric", "numeric",
"numeric"])
y = "response"
x = ["enum1","enum2","enum3","num1","num2","num3"]
z = 0
h2o_glm = H2OGeneralizedLinearEstimator(HGLM=True, family="gaussian", rand_family=["gaussian"], random_columns=[z],
calc_like=True)
h2o_glm.train(x=x, y=y, training_frame=h2o_data)
modelMetrics = h2o_glm.training_model_metrics()
rmodelMetrics = {"hlik":-23643.3076231, "caic":47019.7968491, "pvh":-23491.5738429, "pbvh": -23490.2982034,
"dfrefe":4953.0, "varfix":703.86912057}
metricsNames = ["hlik", "caic", "pvh", "pbvh", "dfrefe", "varfix"]
for kNames in metricsNames:
assert abs(rmodelMetrics[kNames]-modelMetrics[kNames])<tot,"for {2}, Expected from R: {0}, actual from H2O-3: " \
"{1}".format(rmodelMetrics[kNames], modelMetrics[kNames], kNames)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_HGLM_R)
else:
test_HGLM_R()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_6876_HGLM_compare_R_large.py
|
Python
|
apache-2.0
| 1,607
|
[
"Gaussian"
] |
b9a1549ca17327c7775f097018165f180c9591a8d2cefa2dbd745616b420197f
|
import numpy as np
import cv2
def readFile(file, dx, dy):
rdata = np.fromfile(file, dtype='float32')
#rdata[rdata > 280] = 330
# For global tracking
#rdata[rdata > 220] = 330
#rdata[rdata <= 221] = 100
rdata -= 75
rdata.shape = (dy,dx)
data = rdata.astype(np.uint8)
return data
def readFileTrmm(file, dx, dy):
rdata = np.fromfile(file, dtype='float32')
#rdata[rdata > 280] = 330
# For global tracking
#rdata[rdata > 220] = 330
#rdata[rdata <= 221] = 100
rdata = rdata / 100 * 255;
rdata.shape = (dy,dx)
data = rdata.astype(np.uint8)
return data
def writeBinaryFile(file, flow, nx, ny):
size = nx * ny * 2;
shape = (size)
rowFlow = flow.reshape(shape).astype(np.float32)
rowFlow.tofile(file)
print 'wrote file:', file
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def writeVectorField(file, flow, t):
f = open(file, 'w')
f.write('# vtk DataFile Version 2.0\n')
f.write('Sample rectilinear grid\n')
f.write('ASCII\n')
f.write('DATASET STRUCTURED_POINTS\n')
nx = len(flow[0])
ny = len(flow)
f.write('DIMENSIONS ' + str(nx) + ' ' + str(ny) + ' 1\n')
f.write('ASPECT_RATIO 1 1 1\n')
f.write('ORIGIN 0 0 0\n')
f.write('POINT_DATA ' + str(nx*ny) + ' \n')
f.write('SCALARS irtemp float\n')
f.write('LOOKUP_TABLE default \n')
for i in range(0,ny):
for j in range(0,nx):
f.write(str(t[i][j])+ '\n')
f.write('VECTORS vectors float\n')
for i in range(0,ny):
for j in range(0,nx):
f.write(str(flow[i][j][0]) + ' ' + str(flow[i][j][1]) + ' 0\n')
print 'Wrote flow: ' + file
def writeSingleVTK(file, flow, t):
f = open(file, 'w')
nx = len(flow[0])
ny = len(flow)
f.write(str(nx) + ' ' + str(ny) + ' \n')
for i in range(0,ny):
for j in range(0,nx):
f.write(str(t[i][j]) + ' ' + str(flow[i][j][0]) + ' ' + str(flow[i][j][1]) + ' 1\n')
print 'Wrote flow: ' + file
|
harishd10/cloud-track
|
python/fileutils.py
|
Python
|
apache-2.0
| 2,322
|
[
"VTK"
] |
66a111097646a91ce63ebf28d20cc6a298414fa048ebfdcc479501f72581dd39
|
# GromacsWrapper: utilities.py
# Copyright (c) 2009 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
:mod:`gromacs.utilities` -- Helper functions and classes
========================================================
The module defines some convenience functions and classes that are
used in other modules; they do *not* make use of :mod:`gromacs.tools`
or :mod:`gromacs.cbook` and can be safely imported at any time.
Classes
-------
:class:`FileUtils` provides functions related to filename handling. It
can be used as a base or mixin class. The :class:`gromacs.analysis.Simulation`
class is derived from it.
.. autoclass:: FileUtils
:members:
.. autoclass:: AttributeDict
.. autoclass:: Timedelta
Functions
---------
Some additional convenience functions that deal with files and
directories:
.. function:: openany(directory[,mode='r'])
Context manager to open a compressed (bzip2, gzip) or plain file
(uses :func:`anyopen`).
.. autofunction:: anyopen
.. autofunction:: realpath
.. function:: in_dir(directory[,create=True])
Context manager to execute a code block in a directory.
* The *directory* is created if it does not exist (unless
*create* = ``False`` is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
.. autofunction:: find_first
.. autofunction:: withextsep
.. autofunction:: which
Functions that improve list processing and which do *not* treat
strings as lists:
.. autofunction:: iterable
.. autofunction:: asiterable
.. autofunction:: firstof
Functions that help handling Gromacs files:
.. autofunction:: unlink_f
.. autofunction:: unlink_gmx
.. autofunction:: unlink_gmx_backups
.. autofunction:: number_pdbs
Functions that make working with matplotlib_ easier:
.. _matplotlib: http://matplotlib.sourceforge.net/
.. autofunction:: activate_subplot
.. autofunction:: remove_legend
Miscellaneous functions:
.. autofunction:: convert_aa_code
.. autofunction:: autoconvert
Data
----
.. autodata:: amino_acid_codes
"""
from __future__ import absolute_import, with_statement, division
__docformat__ = "restructuredtext en"
import os
import glob
import fnmatch
import re
import warnings
import errno
import subprocess
from contextlib import contextmanager
import bz2, gzip
import datetime
import numpy
from six import string_types
import logging
logger = logging.getLogger('gromacs.utilities')
from .exceptions import AutoCorrectionWarning
class AttributeDict(dict):
"""A dictionary with pythonic access to keys as attributes --- useful for interactive work."""
def __getattribute__(self, x):
try:
return super(AttributeDict,self).__getattribute__(x)
except AttributeError:
return self[x]
def __setattr__(self, name, value):
try:
super(AttributeDict, self).__setitem__(name, value)
except KeyError:
super(AttributeDict, self).__setattr__(name, value)
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
def autoconvert(s):
"""Convert input to a numerical type if possible.
1. A non-string object is returned as it is
2. Try conversion to int, float, str.
"""
if type(s) is not str:
return s
for converter in int, float, str: # try them in increasing order of lenience
try:
s = [converter(i) for i in s.split()]
if len(s) == 1:
return s[0]
else:
return numpy.array(s)
except (ValueError, AttributeError):
pass
raise ValueError("Failed to autoconvert {0!r}".format(s))
@contextmanager
def openany(datasource, mode='r', **kwargs):
"""Open the datasource and close it when the context exits.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
stream, filename = anyopen(datasource, mode=mode, **kwargs)
try:
yield stream
finally:
stream.close()
def anyopen(datasource, mode='r', **kwargs):
"""Open datasource (gzipped, bzipped, uncompressed) and return a stream.
:Arguments:
*datasource*
a stream or a filename
*mode*
``'r'`` opens for reading, ``'w'`` for writing ['r']
*kwargs*
additional keyword arguments that are passed through to the
actual handler; if these are not appropriate then an
exception will be raised by the handler
"""
handlers = {'bz2': bz2.BZ2File, 'gz': gzip.open, '': open}
if mode.startswith('r'):
if hasattr(datasource,'next') or hasattr(datasource,'readline'):
stream = datasource
try:
filename = '({0})'.format(stream.name) # maybe that does not always work?
except AttributeError:
filename = str(type(stream))
else:
stream = None
filename = datasource
for ext in ('bz2', 'gz', ''): # file == '' should be last
openfunc = handlers[ext]
stream = _get_stream(datasource, openfunc, mode=mode, **kwargs)
if stream is not None:
break
if stream is None:
raise IOError("Cannot open {filename!r} in mode={mode!r}.".format(**vars()))
elif mode.startswith('w'):
if hasattr(datasource, 'write'):
stream = datasource
try:
filename = '({0})'.format(stream.name) # maybe that does not always work?
except AttributeError:
filename = str(type(stream))
else:
stream = None
filename = datasource
name, ext = os.path.splitext(filename)
if ext.startswith(os.path.extsep):
ext = ext[1:]
if ext not in ('bz2', 'gz'):
ext = '' # anything else but bz2 or gz is just a normal file
openfunc = handlers[ext]
stream = openfunc(datasource, mode=mode, **kwargs)
if stream is None:
raise IOError("Cannot open {filename!r} in mode={mode!r} with type {ext!r}.".format(**vars()))
else:
raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars()))
return stream, filename
def _get_stream(filename, openfunction=open, mode='r'):
try:
stream = openfunction(filename, mode=mode)
except IOError:
return None
try:
stream.readline()
stream.close()
stream = openfunction(filename,'r')
except IOError:
stream.close()
stream = None
return stream
# TODO: make it work for non-default charge state amino acids.
#: translation table for 1-letter codes --> 3-letter codes
#: .. Note: This does not work for HISB and non-default charge state aa!
amino_acid_codes = {'A':'ALA', 'C':'CYS', 'D':'ASP', 'E':'GLU',
'F':'PHE', 'G':'GLY', 'H':'HIS', 'I':'ILE',
'K':'LYS', 'L':'LEU', 'M':'MET', 'N':'ASN',
'P':'PRO', 'Q':'GLN', 'R':'ARG', 'S':'SER',
'T':'THR', 'V':'VAL', 'W':'TRP', 'Y':'TYR'}
inverse_aa_codes = {three: one for one,three in amino_acid_codes.items()}
def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes."""
if len(x) == 1:
return amino_acid_codes[x.upper()]
elif len(x) == 3:
return inverse_aa_codes[x.upper()]
else:
raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, "
"not %r" % x)
@contextmanager
def in_dir(directory, create=True):
"""Context manager to execute a code block in a directory.
* The directory is created if it does not exist (unless
create=False is set)
* At the end or after an exception code always returns to
the directory that was the current directory before entering
the block.
"""
startdir = os.getcwd()
try:
try:
os.chdir(directory)
logger.debug("Working in {directory!r}...".format(**vars()))
except OSError as err:
if create and err.errno == errno.ENOENT:
os.makedirs(directory)
os.chdir(directory)
logger.info("Working in {directory!r} (newly created)...".format(**vars()))
else:
logger.exception("Failed to start working in {directory!r}.".format(**vars()))
raise
yield os.getcwd()
finally:
os.chdir(startdir)
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands ``~`` and environment variables such as :envvar:`$HOME`.
Returns ``None`` if any of the args is none.
"""
if None in args:
return None
return os.path.realpath(
os.path.expandvars(os.path.expanduser(os.path.join(*args))))
def find_first(filename, suffices=None):
"""Find first *filename* with a suffix from *suffices*.
:Arguments:
*filename*
base filename; this file name is checked first
*suffices*
list of suffices that are tried in turn on the root of *filename*; can contain the
ext separator (:data:`os.path.extsep`) or not
:Returns: The first match or ``None``.
"""
# struct is not reliable as it depends on qscript so now we just try everything...
root,extension = os.path.splitext(filename)
if suffices is None:
suffices = []
else:
suffices = withextsep(suffices)
extensions = [extension] + suffices # native name is first
for ext in extensions:
fn = root + ext
if os.path.exists(fn):
return fn
return None
def withextsep(extensions):
"""Return list in which each element is guaranteed to start with :data:`os.path.extsep`."""
def dottify(x):
if x.startswith(os.path.extsep):
return x
return os.path.extsep + x
return [dottify(x) for x in asiterable(extensions)]
def find_files(directory, pattern):
"""Find files recursively under *directory*, matching *pattern* (generator).
*pattern* is a UNIX-style glob pattern as used ny :func:`fnmatch.fnmatch`.
Recipe by Bruno Oliveira from
http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
"""
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def which(program):
"""Determine full path of executable *program* on :envvar:`PATH`.
(Jay at http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python)
.. versionadded:: 0.5.1
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
real_program = realpath(program)
if is_exe(real_program):
return real_program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class FileUtils(object):
"""Mixin class to provide additional file-related capabilities."""
#: Default extension for files read/written by this class.
default_extension = None
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename)
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False):
"""Supply a file name for the class object.
Typical uses::
fn = filename() ---> <default_filename>
fn = filename('name.ext') ---> 'name'
fn = filename(ext='pickle') ---> <default_filename>'.pickle'
fn = filename('name.inp','pdf') --> 'name.pdf'
fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf'
The returned filename is stripped of the extension
(``use_my_ext=False``) and if provided, another extension is
appended. Chooses a default if no filename is given.
Raises a ``ValueError`` exception if no default file name is known.
If ``set_default=True`` then the default filename is also set.
``use_my_ext=True`` lets the suffix of a provided filename take
priority over a default ``ext`` tension.
.. versionchanged:: 0.3.1
An empty string as *ext* = "" will suppress appending an extension.
"""
if filename is None:
if not hasattr(self,'_filename'):
self._filename = None # add attribute to class
if self._filename:
filename = self._filename
else:
raise ValueError("A file name is required because no default file name was defined.")
my_ext = None
else:
filename, my_ext = os.path.splitext(filename)
if set_default: # replaces existing default file name
self._filename = filename
if my_ext and use_my_ext:
ext = my_ext
if ext is not None:
if ext.startswith(os.extsep):
ext = ext[1:] # strip a dot to avoid annoying mistakes
if ext != "":
filename = filename + os.extsep + ext
return filename
def check_file_exists(self, filename, resolve='exception', force=None):
"""If a file exists then continue with the action specified in ``resolve``.
``resolve`` must be one of
"ignore"
always return ``False``
"indicate"
return ``True`` if it exists
"warn"
indicate and issue a :exc:`UserWarning`
"exception"
raise :exc:`IOError` if it exists
Alternatively, set *force* for the following behaviour (which
ignores *resolve*):
``True``
same as *resolve* = "ignore" (will allow overwriting of files)
``False``
same as *resolve* = "exception" (will prevent overwriting of files)
``None``
ignored, do whatever *resolve* says
"""
def _warn(x):
msg = "File {0!r} already exists.".format(x)
logger.warn(msg)
warnings.warn(msg)
return True
def _raise(x):
msg = "File {0!r} already exists.".format(x)
logger.error(msg)
raise IOError(errno.EEXIST, x, msg)
solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't
'indicate': lambda x: True, # yes, file exists
'warn': _warn,
'warning': _warn,
'exception': _raise,
'raise': _raise,
}
if force is True:
resolve = 'ignore'
elif force is False:
resolve = 'exception'
if not os.path.isfile(filename):
return False
else:
return solutions[resolve](filename)
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name
def __repr__(self):
fmt = "{0!s}(filename=%r)".format(self.__class__.__name__)
try:
fn = self.filename()
except ValueError:
fn = None
return fmt % fn
def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string."""
if isinstance(obj, string_types):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except TypeError:
return False
return True
def asiterable(obj):
"""Returns obj so that it can be iterated over; a string is *not* treated as iterable"""
if not iterable(obj):
obj = [obj]
return obj
def firstof(obj):
"""Returns the first entry of a sequence or the obj.
Treats strings as single objects.
"""
return asiterable(obj)[0]
# In utilities so that it can be safely used in tools, cbook, ...
def unlink_f(path):
"""Unlink path but do not complain if file does not exist."""
try:
os.unlink(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def unlink_gmx(*args):
"""Unlink (remove) Gromacs file(s) and all corresponding backups."""
for path in args:
unlink_f(path)
unlink_gmx_backups(*args)
def unlink_gmx_backups(*args):
"""Unlink (rm) all backup files corresponding to the listed files."""
for path in args:
dirname, filename = os.path.split(path)
fbaks = glob.glob(os.path.join(dirname, '#'+filename+'.*#'))
for bak in fbaks:
unlink_f(bak)
def mkdir_p(path):
"""Create a directory *path* with subdirs but do not complain if it exists.
This is like GNU ``mkdir -p path``.
"""
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST:
raise
def cat(f=None, o=None):
"""Concatenate files *f*=[...] and write to *o*"""
# need f, o to be compatible with trjcat and eneconv
if f is None or o is None:
return
target = o
infiles = asiterable(f)
logger.debug("cat {0!s} > {1!s} ".format(" ".join(infiles), target))
with open(target, 'w') as out:
rc = subprocess.call(['cat'] + infiles, stdout=out)
if rc != 0:
msg = "failed with return code {0:d}: cat {1!r} > {2!r} ".format(rc, " ".join(infiles), target)
logger.exception(msg)
raise OSError(errno.EIO, msg, target)
# helpers for matplotlib
def activate_subplot(numPlot):
"""Make subplot *numPlot* active on the canvas.
Use this if a simple ``subplot(numRows, numCols, numPlot)``
overwrites the subplot instead of activating it.
"""
# see http://www.mail-archive.com/[email protected]/msg07156.html
from pylab import gcf, axes
numPlot -= 1 # index is 0-based, plots are 1-based
return axes(gcf().get_axes()[numPlot])
def remove_legend(ax=None):
"""Remove legend for axes or gca.
See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html
"""
from pylab import gca, draw
if ax is None:
ax = gca()
ax.legend_ = None
draw()
# time functions
class Timedelta(datetime.timedelta):
"""Extension of :class:`datetime.timedelta`.
Provides attributes ddays, dhours, dminutes, dseconds to measure
the delta in normal time units.
ashours gives the total time in fractional hours.
"""
@property
def dhours(self):
"""Hours component of the timedelta."""
return self.seconds // 3600
@property
def dminutes(self):
"""Minutes component of the timedelta."""
return self.seconds // 60 - 60*self.dhours
@property
def dseconds(self):
"""Seconds component of the timedelta."""
return self.seconds - 3600*self.dhours - 60*self.dminutes
@property
def ashours(self):
"""Timedelta in (fractional) hours."""
return 24*self.days + self.seconds / 3600.0
def strftime(self, fmt="%d:%H:%M:%S"):
"""Primitive string formatter.
The only directives understood are the following:
============ ==========================
Directive meaning
============ ==========================
%d day as integer
%H hour [00-23]
%h hours including days
%M minute as integer [00-59]
%S second as integer [00-59]
============ ==========================
"""
substitutions = {
"%d": str(self.days),
"%H": "{0:02d}".format(self.dhours),
"%h": str(24*self.days + self.dhours),
"%M": "{0:02d}".format(self.dminutes),
"%S": "{0:02d}".format(self.dseconds),
}
s = fmt
for search, replacement in substitutions.items():
s = s.replace(search, replacement)
return s
NUMBERED_PDB = re.compile(r"(?P<PREFIX>.*\D)(?P<NUMBER>\d+)\.(?P<SUFFIX>pdb)")
def number_pdbs(*args, **kwargs):
"""Rename pdbs x1.pdb ... x345.pdb --> x0001.pdb ... x0345.pdb
:Arguments:
- *args*: filenames or glob patterns (such as "pdb/md*.pdb")
- *format*: format string including keyword *num* ["%(num)04d"]
"""
format = kwargs.pop('format', "%(num)04d")
name_format = "%(prefix)s" + format +".%(suffix)s"
filenames = []
map(filenames.append, map(glob.glob, args)) # concatenate all filename lists
filenames = filenames[0] # ... ugly
for f in filenames:
m = NUMBERED_PDB.search(f)
if m is None:
continue
num = int(m.group('NUMBER'))
prefix = m.group('PREFIX')
suffix = m.group('SUFFIX')
newname = name_format % vars()
logger.info("Renaming {f!r} --> {newname!r}".format(**vars()))
try:
os.rename(f, newname)
except OSError:
logger.exception("renaming failed")
|
PicoCentauri/GromacsWrapper
|
gromacs/utilities.py
|
Python
|
gpl-3.0
| 22,926
|
[
"Gromacs"
] |
3be933cecee02ebc870dfb2b936305182909ae710d1395ef3c15c8f50fd67f4e
|
"""mBuild conversion utilities."""
import numpy as np
def RB_to_OPLS(c0, c1, c2, c3, c4, c5):
"""Convert Ryckaert-Bellemans type dihedrals to OPLS type.
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
Returns
-------
opls_coeffs : np.array, shape=(4,)
Array containing the OPLS dihedrals coeffs f1, f2, f3, and f4
(in kcal/mol)
"""
f1 = (-1.5 * c3) - (2 * c1)
f2 = c0 + c1 + c3
f3 = -0.5 * c3
f4 = -0.25 * c4
return np.array([f1, f2, f3, f4])
def RB_to_CHARMM(c0, c1, c2, c3, c4, c5):
r"""Convert Ryckaert-Bellemans (RB) type dihedrals to CHARMM type.
.. math::
RB_torsions &= c0 + c1*cos(psi) + c2*cos(psi)^2 + c3*cos(psi)^3 + \\
&= c4*cos(psi)^4 + c5*cos(5*psi)^5
where :math:`psi = t - pi = t - 180 degrees`
.. math::
CHARMM_torsions &= K0 * (1 + cos(n0*t - d0)) + \\
&= K1 * (1 + cos(n1*t - d1)) + \\
&= K2 * (1 + cos(n2*t - d2)) + \\
&= K3 * (1 + cos(n3*t - d3)) + \\
&= K4 * (1 + cos(n4*t - d4)) + \\
&= K5 * (1 + cos(n5*t - d5))
CHARMM_torsions &= K0 +
&= K1 * (1 + cos(n1*t - d1)) + \\
&= K2 * (1 + cos(n2*t - d2)) + \\
&= K3 * (1 + cos(n3*t - d3)) + \\
&= K4 * (1 + cos(n4*t - d4)) + \\
&= K5 * (1 + cos(n5*t - d5))
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
Returns
-------
CHARMM_dihedral coeffs : np.matrix, shape=(6,3)
Array containing the CHARMM dihedral coeffs (in kcal/mol):
[[K0, n0, d0],
[K1, n1, d1],
[K2, n2, d2],
[K3, n3, d3],
[K4, n4, d4],
[K5, n5, d5]]
"""
# see below or the long version is,
# K0 = (c0 + c2 / 2 + 3 / 8 * c4) - K1 - K2 - K3 - K4 - K5
K0 = c0 - c1 - c3 - (c4 / 4) - c5
K1 = c1 + (3 / 4) * c3 + (5 / 8) * c5
K2 = (1 / 2) * c2 + (1 / 2) * c4
K3 = (1 / 4) * c3 + (5 / 16) * c5
K4 = (1 / 8) * c4
K5 = (1 / 16) * c5
n0 = 0
n1 = 1
n2 = 2
n3 = 3
n4 = 4
n5 = 5
d0 = 90
d1 = 180
d2 = 0
d3 = 180
d4 = 0
d5 = 180
return np.array(
[
[K0, n0, d0],
[K1, n1, d1],
[K2, n2, d2],
[K3, n3, d3],
[K4, n4, d4],
[K5, n5, d5],
]
)
def base10_to_base62_alph_num(base10_no):
"""Convert base-10 integer to base-62 alphanumeric system.
This function provides a utility to write pdb/psf files such that it can
add may more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-62 alphanumeric system
Returns
-------
str
The converted base-62 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return _to_base(base10_no, base=62)
def base10_to_base52_alph(base10_no):
"""Convert base-10 integer to base-52 alphabetic system.
This function provides a utility to write pdb/psf files such that it can
add more atom types in the 3 or 4 character limited pdb and psf files
Parameters
----------
base10_no: int
The integer to convert to base-52 alphabetic system
Returns
-------
str
The converted base-52 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return _to_base(number=base10_no, base=52)
def base10_to_base26_alph(base10_no):
"""Convert base-10 integer to base-26 alphabetic system.
This function provides a utility to write pdb/psf files such that it can
add many more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-26 alphabetic system
Returns
-------
str
The converted base-26 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return _to_base(base10_no, base=26)
def base10_to_base16_alph_num(base10_no):
"""Convert base-10 integer to base-16 hexadecimal system.
This function provides a utility to write pdb/psf files such that it can
add many more than 9999 atoms and 999 residues.
Parameters
----------
base10_no: int
The integer to convert to base-16 hexadecimal system
Returns
-------
str
The converted base-16 system string
See Also
--------
mbuild.conversion._to_base: Helper function to perform a base-n conversion
"""
return hex(int(base10_no))[2:]
# Helpers to convert base
def _to_base(number, base=62):
"""Convert a base-10 number into base-n alpha-num."""
start_values = {62: "0", 52: "A", 26: "A"}
if base not in start_values:
raise ValueError(
f"Base-{base} system is not supported. Supported bases are: "
f"{list(start_values.keys())}"
)
num = 1
number = int(number)
remainder = _digit_to_alpha_num((number % base), base)
base_n_values = str(remainder)
power = 1
while num != 0:
num = int(number / base ** power)
if num == base:
base_n_values = start_values[base] + base_n_values
elif num != 0 and num > base:
base_n_values = (
str(_digit_to_alpha_num(int(num % base), base)) + base_n_values
)
elif (num != 0) and (num < base):
base_n_values = (
str(_digit_to_alpha_num(int(num), base)) + base_n_values
)
power += 1
return base_n_values
def _digit_to_alpha_num(digit, base=52):
"""Convert digit to base-n."""
base_values = {
26: {j: chr(j + 65) for j in range(0, 26)},
52: {j: chr(j + 65) if j < 26 else chr(j + 71) for j in range(0, 52)},
62: {j: chr(j + 55) if j < 36 else chr(j + 61) for j in range(10, 62)},
}
if base not in base_values:
raise ValueError(
f"Base-{base} system is not supported. Supported bases are: "
f"{list(base_values.keys())}"
)
return base_values[base].get(digit, digit)
|
iModels/mbuild
|
mbuild/utils/conversion.py
|
Python
|
mit
| 6,621
|
[
"CHARMM"
] |
0635a4694bd9189a9ff29cd9ab7ced13a6b38490e51d3376ef662b67cbcf684c
|
# -*- coding: utf-8 -*-
# This script creates a transparent red dice to explore further the capabilities
# of mayavi.
#
# To run (in a Python shell):
# run red_dice.py
#
# Created January 2015 by F.P.A.Vogt for the ANITA astroinformatics summer
# school 2015.
# Published as supplementary material in Vogt, Owen et al., ApJ (2015).
#
# Questions, comments : [email protected]
#
# If you find this code useful for your research, please cite the following
# article accordingly:
#
# Vogt, Owen et al., Advanced Data Visualization in Astrophysics:
# the X3D Pathway, ApJ (2015).
#
# Copyright (C) 2015 Frédéric P.A. Vogt, Chris I. Owen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
# Import the required packages
from mayavi import mlab
# Define the dice elements
xs = [0]
ys = [0]
zs = [0]
px = [0,
-0.25,-0.25,-0.25,0.25, 0.25,0.25,
-0.5, -0.5,-0.5, -0.5,-0.5,
0.5, 0.5,
0,-0.25,0.25,
-0.25, -0.25, 0.25, 0.25]
py = [0,
-0.25, 0, 0.25,-0.25, 0, 0.25,
0,-0.25,0.25, -0.25, 0.25,
-0.25,0.25,
-0.5, -0.5, -0.5,
0.5, 0.5, 0.5, 0.5]
pz = [-0.5,
0.5,0.5, 0.5, 0.5, 0.5,0.5,
0, -0.25, -0.25, 0.25, 0.25,
0.25, -0.25,
0,-0.25, 0.25,
-0.25,0.25, -0.25, 0.25]
pc = [0,
6,6,6,6,6,6,
5,5,5,5,5,
2,2,
3,3,3,
4,4,4,4,]
# Create a mayavi window
mlab.close(2)
mlab.figure(2,size=(500,500))
# Add some inner spheres with transparency and the cube
mlab.points3d(xs,ys,zs, scale_factor=0.25,color=(1,0.5,0), mode= 'sphere',
opacity=1)
mlab.points3d(xs,ys,zs, scale_factor=0.5,color=(1,1,1), mode= 'sphere',
opacity=0.5)
mlab.points3d(xs,ys,zs, scale_factor=1,scale_mode='none', color=(0.7,0,0),
mode='cube', opacity=0.5)
# A dark outline for the look
mlab.outline(color=(0,0,0),line_width = 2.0)
# The different cube faces this time with some colors
mlab.points3d(px,py,pz, pc, scale_factor=0.2, scale_mode='none',
colormap="bone",mode='sphere')
# And the associated colorbar
mlab.colorbar(orientation="vertical",nb_labels=7)
# Finally add some text.
# This can be done via either mlab.text() or mlab.text3d(). We prefer the former
# function, as it will result in a "text" instance in the associated X3D file,
# which allows, e.g. to modifiy the text itself at the X3D level. By comparison,
# mlab.text3D() creates a full 3-D structure which cannot be modified later on.
mlab.text(0,0,"This is a dice",z=1)
# Export the model to X3D and WRL
mlab.savefig('./red_dice.x3d')
mlab.savefig('./red_dice.png')
mlab.show()
|
fpavogt/x3d-pathway
|
fits_to_x3d/red_dice/red_dice.py
|
Python
|
gpl-3.0
| 3,293
|
[
"Mayavi"
] |
d7d01dfc8d773536cca7bd5fd2724fc7d77f9b2abc27b77d9ca818ab58cbfb3b
|
"""
The ``sdm`` module contains functions to fit single diode models.
Function names should follow the pattern "fit_" + name of model + "_" +
fitting method.
"""
import numpy as np
import scipy.constants
from scipy import optimize
from scipy.special import lambertw
from scipy.misc import derivative
from pvlib.pvsystem import calcparams_pvsyst, singlediode, v_from_i
from pvlib.singlediode import bishop88_mpp
from pvlib.ivtools.utils import rectify_iv_curve, _numdiff
from pvlib.ivtools.sde import _fit_sandia_cocontent
def fit_cec_sam(celltype, v_mp, i_mp, v_oc, i_sc, alpha_sc, beta_voc,
gamma_pmp, cells_in_series, temp_ref=25):
"""
Estimates parameters for the CEC single diode model (SDM) using the SAM
SDK.
Parameters
----------
celltype : str
Value is one of 'monoSi', 'multiSi', 'polySi', 'cis', 'cigs', 'cdte',
'amorphous'
v_mp : float
Voltage at maximum power point [V]
i_mp : float
Current at maximum power point [A]
v_oc : float
Open circuit voltage [V]
i_sc : float
Short circuit current [A]
alpha_sc : float
Temperature coefficient of short circuit current [A/C]
beta_voc : float
Temperature coefficient of open circuit voltage [V/C]
gamma_pmp : float
Temperature coefficient of power at maximum point point [%/C]
cells_in_series : int
Number of cells in series
temp_ref : float, default 25
Reference temperature condition [C]
Returns
-------
I_L_ref : float
The light-generated current (or photocurrent) at reference
conditions [A]
I_o_ref : float
The dark or diode reverse saturation current at reference
conditions [A]
R_s : float
The series resistance at reference conditions, in ohms.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
a_ref : float
The product of the usual diode ideality factor ``n`` (unitless),
number of cells in series ``Ns``, and cell thermal voltage at
reference conditions [V]
Adjust : float
The adjustment to the temperature coefficient for short circuit
current, in percent.
Raises
------
ImportError if NREL-PySAM is not installed.
RuntimeError if parameter extraction is not successful.
Notes
-----
The CEC model and estimation method are described in [1]_.
Inputs ``v_mp``, ``i_mp``, ``v_oc`` and ``i_sc`` are assumed to be from a
single IV curve at constant irradiance and cell temperature. Irradiance is
not explicitly used by the fitting procedure. The irradiance level at which
the input IV curve is determined and the specified cell temperature
``temp_ref`` are the reference conditions for the output parameters
``I_L_ref``, ``I_o_ref``, ``R_s``, ``R_sh_ref``, ``a_ref`` and ``Adjust``.
References
----------
.. [1] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
"""
try:
from PySAM import PySSC
except ImportError:
raise ImportError("Requires NREL's PySAM package at "
"https://pypi.org/project/NREL-PySAM/.")
datadict = {'tech_model': '6parsolve', 'financial_model': None,
'celltype': celltype, 'Vmp': v_mp,
'Imp': i_mp, 'Voc': v_oc, 'Isc': i_sc, 'alpha_isc': alpha_sc,
'beta_voc': beta_voc, 'gamma_pmp': gamma_pmp,
'Nser': cells_in_series, 'Tref': temp_ref}
result = PySSC.ssc_sim_from_dict(datadict)
if result['cmod_success'] == 1:
return tuple([result[k] for k in ['Il', 'Io', 'Rs', 'Rsh', 'a',
'Adj']])
else:
raise RuntimeError('Parameter estimation failed')
def fit_desoto(v_mp, i_mp, v_oc, i_sc, alpha_sc, beta_voc, cells_in_series,
EgRef=1.121, dEgdT=-0.0002677, temp_ref=25, irrad_ref=1000,
root_kwargs={}):
"""
Calculates the parameters for the De Soto single diode model.
This procedure (described in [1]_) has the advantage of
using common specifications given by manufacturers in the
datasheets of PV modules.
The solution is found using the scipy.optimize.root() function,
with the corresponding default solver method 'hybr'.
No restriction is put on the fit variables, i.e. series
or shunt resistance could go negative. Nevertheless, if it happens,
check carefully the inputs and their units; alpha_sc and beta_voc are
often given in %/K in manufacturers datasheets and should be given
in A/K and V/K here.
The parameters returned by this function can be used by
:py:func:`pvlib.pvsystem.calcparams_desoto` to calculate the values at
different irradiance and cell temperature.
Parameters
----------
v_mp: float
Module voltage at the maximum-power point at reference conditions [V].
i_mp: float
Module current at the maximum-power point at reference conditions [A].
v_oc: float
Open-circuit voltage at reference conditions [V].
i_sc: float
Short-circuit current at reference conditions [A].
alpha_sc: float
The short-circuit current (i_sc) temperature coefficient of the
module [A/K].
beta_voc: float
The open-circuit voltage (v_oc) temperature coefficient of the
module [V/K].
cells_in_series: integer
Number of cell in the module.
EgRef: float, default 1.121 eV - value for silicon
Energy of bandgap of semi-conductor used [eV]
dEgdT: float, default -0.0002677 - value for silicon
Variation of bandgap according to temperature [eV/K]
temp_ref: float, default 25
Reference temperature condition [C]
irrad_ref: float, default 1000
Reference irradiance condition [W/m2]
root_kwargs: dictionary, default None
Dictionary of arguments to pass onto scipy.optimize.root()
Returns
-------
dict with the following elements:
I_L_ref: float
Light-generated current at reference conditions [A]
I_o_ref: float
Diode saturation current at reference conditions [A]
R_s: float
Series resistance [ohm]
R_sh_ref: float
Shunt resistance at reference conditions [ohm].
a_ref: float
Modified ideality factor at reference conditions.
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
alpha_sc: float
The short-circuit current (i_sc) temperature coefficient of the
module [A/K].
EgRef: float
Energy of bandgap of semi-conductor used [eV]
dEgdT: float
Variation of bandgap according to temperature [eV/K]
irrad_ref: float
Reference irradiance condition [W/m2]
temp_ref: float
Reference temperature condition [C]
scipy.optimize.OptimizeResult
Optimization result of scipy.optimize.root().
See scipy.optimize.OptimizeResult for more details.
References
----------
.. [1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
"""
# Constants
k = scipy.constants.value('Boltzmann constant in eV/K')
Tref = temp_ref + 273.15 # [K]
# initial guesses of variables for computing convergence:
# Values are taken from [2], p753
Rsh_0 = 100.0
a_0 = 1.5*k*Tref*cells_in_series
IL_0 = i_sc
Io_0 = i_sc * np.exp(-v_oc/a_0)
Rs_0 = (a_0*np.log1p((IL_0-i_mp)/Io_0) - v_mp)/i_mp
# params_i : initial values vector
params_i = np.array([IL_0, Io_0, Rs_0, Rsh_0, a_0])
# specs of module
specs = (i_sc, v_oc, i_mp, v_mp, beta_voc, alpha_sc, EgRef, dEgdT,
Tref, k)
# computing with system of equations described in [1]
optimize_result = optimize.root(_system_of_equations_desoto, x0=params_i,
args=(specs,), **root_kwargs)
if optimize_result.success:
sdm_params = optimize_result.x
else:
raise RuntimeError(
'Parameter estimation failed:\n' + optimize_result.message)
# results
return ({'I_L_ref': sdm_params[0],
'I_o_ref': sdm_params[1],
'R_s': sdm_params[2],
'R_sh_ref': sdm_params[3],
'a_ref': sdm_params[4],
'alpha_sc': alpha_sc,
'EgRef': EgRef,
'dEgdT': dEgdT,
'irrad_ref': irrad_ref,
'temp_ref': temp_ref},
optimize_result)
def _system_of_equations_desoto(params, specs):
"""Evaluates the systems of equations used to solve for the single
diode equation parameters. Function designed to be used by
scipy.optimize.root in fit_desoto.
Parameters
----------
params: ndarray
Array with parameters of the De Soto single diode model. Must be
given in the following order: IL, Io, a, Rs, Rsh
specs: tuple
Specifications of pv module given by manufacturer. Must be given
in the following order: Isc, Voc, Imp, Vmp, beta_oc, alpha_sc
Returns
-------
value of the system of equations to solve with scipy.optimize.root().
"""
# six input known variables
Isc, Voc, Imp, Vmp, beta_oc, alpha_sc, EgRef, dEgdT, Tref, k = specs
# five parameters vector to find
IL, Io, Rs, Rsh, a = params
# five equation vector
y = [0, 0, 0, 0, 0]
# 1st equation - short-circuit - eq(3) in [1]
y[0] = Isc - IL + Io * np.expm1(Isc * Rs / a) + Isc * Rs / Rsh
# 2nd equation - open-circuit Tref - eq(4) in [1]
y[1] = -IL + Io * np.expm1(Voc / a) + Voc / Rsh
# 3rd equation - Imp & Vmp - eq(5) in [1]
y[2] = Imp - IL + Io * np.expm1((Vmp + Imp * Rs) / a) \
+ (Vmp + Imp * Rs) / Rsh
# 4th equation - Pmp derivated=0 - eq23.2.6 in [2]
# caution: eq(6) in [1] has a sign error
y[3] = Imp \
- Vmp * ((Io / a) * np.exp((Vmp + Imp * Rs) / a) + 1.0 / Rsh) \
/ (1.0 + (Io * Rs / a) * np.exp((Vmp + Imp * Rs) / a) + Rs / Rsh)
# 5th equation - open-circuit T2 - eq (4) at temperature T2 in [1]
T2 = Tref + 2
Voc2 = (T2 - Tref) * beta_oc + Voc # eq (7) in [1]
a2 = a * T2 / Tref # eq (8) in [1]
IL2 = IL + alpha_sc * (T2 - Tref) # eq (11) in [1]
Eg2 = EgRef * (1 + dEgdT * (T2 - Tref)) # eq (10) in [1]
Io2 = Io * (T2 / Tref)**3 * np.exp(1 / k * (EgRef/Tref - Eg2/T2)) # eq (9)
y[4] = -IL2 + Io2 * np.expm1(Voc2 / a2) + Voc2 / Rsh # eq (4) at T2
return y
def fit_pvsyst_sandia(ivcurves, specs, const=None, maxiter=5, eps1=1.e-3):
"""
Estimate parameters for the PVsyst module performance model.
Parameters
----------
ivcurves : dict
i : array
One array element for each IV curve. The jth element is itself an
array of current for jth IV curve (same length as v[j]) [A]
v : array
One array element for each IV curve. The jth element is itself an
array of voltage for jth IV curve (same length as i[j]) [V]
ee : array
effective irradiance for each IV curve, i.e., POA broadband
irradiance adjusted by solar spectrum modifier [W / m^2]
tc : array
cell temperature for each IV curve [C]
i_sc : array
short circuit current for each IV curve [A]
v_oc : array
open circuit voltage for each IV curve [V]
i_mp : array
current at max power point for each IV curve [A]
v_mp : array
voltage at max power point for each IV curve [V]
specs : dict
cells_in_series : int
number of cells in series
alpha_sc : float
temperature coefficient of isc [A/C]
const : dict
E0 : float
effective irradiance at STC, default 1000 [W/m^2]
T0 : float
cell temperature at STC, default 25 [C]
k : float
1.38066E-23 J/K (Boltzmann's constant)
q : float
1.60218E-19 Coulomb (elementary charge)
maxiter : int, default 5
input that sets the maximum number of iterations for the parameter
updating part of the algorithm.
eps1: float, default 1e-3
Tolerance for the IV curve fitting. The parameter updating stops when
absolute values of the percent change in mean, max and standard
deviation of Imp, Vmp and Pmp between iterations are all less than
eps1, or when the number of iterations exceeds maxiter.
Returns
-------
dict
I_L_ref : float
light current at STC [A]
I_o_ref : float
dark current at STC [A]
EgRef : float
effective band gap at STC [eV]
R_s : float
series resistance at STC [ohm]
R_sh_ref : float
shunt resistance at STC [ohm]
R_sh_0 : float
shunt resistance at zero irradiance [ohm]
R_sh_exp : float
exponential factor defining decrease in shunt resistance with
increasing effective irradiance
gamma_ref : float
diode (ideality) factor at STC [unitless]
mu_gamma : float
temperature coefficient for diode (ideality) factor [1/K]
cells_in_series : int
number of cells in series
iph : array
light current for each IV curve [A]
io : array
dark current for each IV curve [A]
rs : array
series resistance for each IV curve [ohm]
rsh : array
shunt resistance for each IV curve [ohm]
u : array
boolean for each IV curve indicating that the parameter values
are deemed reasonable by the private function ``_filter_params``
Notes
-----
The PVsyst module performance model is described in [1]_, [2]_, and [3]_.
The fitting method is documented in [4]_, [5]_, and [6]_.
Ported from PVLib Matlab [7]_.
References
----------
.. [1] K. Sauer, T. Roessler, C. W. Hansen, Modeling the Irradiance and
Temperature Dependence of Photovoltaic Modules in PVsyst, IEEE Journal
of Photovoltaics v5(1), January 2015.
.. [2] A. Mermoud, PV Modules modeling, Presentation at the 2nd PV
Performance Modeling Workshop, Santa Clara, CA, May 2013
.. [3] A. Mermoud, T. Lejeuene, Performance Assessment of a Simulation
Model for PV modules of any available technology, 25th European
Photovoltaic Solar Energy Conference, Valencia, Spain, Sept. 2010
.. [4] C. Hansen, Estimating Parameters for the PVsyst Version 6
Photovoltaic Module Performance Model, Sandia National Laboratories
Report SAND2015-8598
.. [5] C. Hansen, Parameter Estimation for Single Diode Models of
Photovoltaic Modules, Sandia National Laboratories Report SAND2015-2065
.. [6] C. Hansen, Estimation of Parameters for Single Diode Models using
Measured IV Curves, Proc. of the 39th IEEE PVSC, June 2013.
.. [7] PVLib MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
"""
if const is None:
const = {'E0': 1000.0, 'T0': 25.0, 'k': 1.38066e-23, 'q': 1.60218e-19}
ee = ivcurves['ee']
tc = ivcurves['tc']
tck = tc + 273.15
isc = ivcurves['i_sc']
voc = ivcurves['v_oc']
imp = ivcurves['i_mp']
vmp = ivcurves['v_mp']
# Cell Thermal Voltage
vth = const['k'] / const['q'] * tck
n = len(ivcurves['v_oc'])
# Initial estimate of Rsh used to obtain the diode factor gamma0 and diode
# temperature coefficient mu_gamma. Rsh is estimated using the co-content
# integral method.
rsh = np.ones(n)
for j in range(n):
voltage, current = rectify_iv_curve(ivcurves['v'][j], ivcurves['i'][j])
# initial estimate of Rsh, from integral over voltage regression
# [5] Step 3a; [6] Step 3a
_, _, _, rsh[j], _ = _fit_sandia_cocontent(
voltage, current, vth[j] * specs['cells_in_series'])
gamma_ref, mu_gamma = _fit_pvsyst_sandia_gamma(voc, isc, rsh, vth, tck,
specs, const)
badgamma = np.isnan(gamma_ref) or np.isnan(mu_gamma) \
or not np.isreal(gamma_ref) or not np.isreal(mu_gamma)
if badgamma:
raise RuntimeError(
"Failed to estimate the diode (ideality) factor parameter;"
" aborting parameter estimation.")
gamma = gamma_ref + mu_gamma * (tc - const['T0'])
nnsvth = gamma * (vth * specs['cells_in_series'])
# For each IV curve, sequentially determine initial values for Io, Rs,
# and Iph [5] Step 3a; [6] Step 3
iph, io, rs, u = _initial_iv_params(ivcurves, ee, voc, isc, rsh,
nnsvth)
# Update values for each IV curve to converge at vmp, imp, voc and isc
iph, io, rs, rsh, u = _update_iv_params(voc, isc, vmp, imp, ee,
iph, io, rs, rsh, nnsvth, u,
maxiter, eps1)
# get single diode models from converged values for each IV curve
pvsyst = _extract_sdm_params(ee, tc, iph, io, rs, rsh, gamma, u,
specs, const, model='pvsyst')
# Add parameters estimated in this function
pvsyst['gamma_ref'] = gamma_ref
pvsyst['mu_gamma'] = mu_gamma
pvsyst['cells_in_series'] = specs['cells_in_series']
return pvsyst
def fit_desoto_sandia(ivcurves, specs, const=None, maxiter=5, eps1=1.e-3):
"""
Estimate parameters for the De Soto module performance model.
Parameters
----------
ivcurves : dict
i : array
One array element for each IV curve. The jth element is itself an
array of current for jth IV curve (same length as v[j]) [A]
v : array
One array element for each IV curve. The jth element is itself an
array of voltage for jth IV curve (same length as i[j]) [V]
ee : array
effective irradiance for each IV curve, i.e., POA broadband
irradiance adjusted by solar spectrum modifier [W / m^2]
tc : array
cell temperature for each IV curve [C]
i_sc : array
short circuit current for each IV curve [A]
v_oc : array
open circuit voltage for each IV curve [V]
i_mp : array
current at max power point for each IV curve [A]
v_mp : array
voltage at max power point for each IV curve [V]
specs : dict
cells_in_series : int
number of cells in series
alpha_sc : float
temperature coefficient of Isc [A/C]
beta_voc : float
temperature coefficient of Voc [V/C]
const : dict
E0 : float
effective irradiance at STC, default 1000 [W/m^2]
T0 : float
cell temperature at STC, default 25 [C]
k : float
1.38066E-23 J/K (Boltzmann's constant)
q : float
1.60218E-19 Coulomb (elementary charge)
maxiter : int, default 5
input that sets the maximum number of iterations for the parameter
updating part of the algorithm.
eps1: float, default 1e-3
Tolerance for the IV curve fitting. The parameter updating stops when
absolute values of the percent change in mean, max and standard
deviation of Imp, Vmp and Pmp between iterations are all less than
eps1, or when the number of iterations exceeds maxiter.
Returns
-------
dict
I_L_ref : float
light current at STC [A]
I_o_ref : float
dark current at STC [A]
EgRef : float
effective band gap at STC [eV]
R_s : float
series resistance at STC [ohm]
R_sh_ref : float
shunt resistance at STC [ohm]
cells_in_series : int
number of cells in series
iph : array
light current for each IV curve [A]
io : array
dark current for each IV curve [A]
rs : array
series resistance for each IV curve [ohm]
rsh : array
shunt resistance for each IV curve [ohm]
u : array
boolean for each IV curve indicating that the parameter values
are deemed reasonable by the private function ``_filter_params``
Notes
-----
The De Soto module performance model is described in [1]_. The fitting
method is documented in [2]_, [3]_. Ported from PVLib Matlab [4]_.
References
----------
.. [1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
.. [2] C. Hansen, Parameter Estimation for Single Diode Models of
Photovoltaic Modules, Sandia National Laboratories Report SAND2015-2065
.. [3] C. Hansen, Estimation of Parameters for Single Diode Models using
Measured IV Curves, Proc. of the 39th IEEE PVSC, June 2013.
.. [4] PVLib MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
"""
if const is None:
const = {'E0': 1000.0, 'T0': 25.0, 'k': 1.38066e-23, 'q': 1.60218e-19}
ee = ivcurves['ee']
tc = ivcurves['tc']
tck = tc + 273.15
isc = ivcurves['i_sc']
voc = ivcurves['v_oc']
imp = ivcurves['i_mp']
vmp = ivcurves['v_mp']
# Cell Thermal Voltage
vth = const['k'] / const['q'] * tck
n = len(voc)
# Initial estimate of Rsh used to obtain the diode factor gamma0 and diode
# temperature coefficient mu_gamma. Rsh is estimated using the co-content
# integral method.
rsh = np.ones(n)
for j in range(n):
voltage, current = rectify_iv_curve(ivcurves['v'][j], ivcurves['i'][j])
# initial estimate of Rsh, from integral over voltage regression
# [5] Step 3a; [6] Step 3a
_, _, _, rsh[j], _ = _fit_sandia_cocontent(
voltage, current, vth[j] * specs['cells_in_series'])
n0 = _fit_desoto_sandia_diode(ee, voc, vth, tc, specs, const)
bad_n = np.isnan(n0) or not np.isreal(n0)
if bad_n:
raise RuntimeError(
"Failed to estimate the diode (ideality) factor parameter;"
" aborting parameter estimation.")
nnsvth = n0 * specs['cells_in_series'] * vth
# For each IV curve, sequentially determine initial values for Io, Rs,
# and Iph [5] Step 3a; [6] Step 3
iph, io, rs, u = _initial_iv_params(ivcurves, ee, voc, isc, rsh,
nnsvth)
# Update values for each IV curve to converge at vmp, imp, voc and isc
iph, io, rs, rsh, u = _update_iv_params(voc, isc, vmp, imp, ee,
iph, io, rs, rsh, nnsvth, u,
maxiter, eps1)
# get single diode models from converged values for each IV curve
desoto = _extract_sdm_params(ee, tc, iph, io, rs, rsh, n0, u,
specs, const, model='desoto')
# Add parameters estimated in this function
desoto['a_ref'] = n0 * specs['cells_in_series'] * const['k'] / \
const['q'] * (const['T0'] + 273.15)
desoto['cells_in_series'] = specs['cells_in_series']
return desoto
def _fit_pvsyst_sandia_gamma(voc, isc, rsh, vth, tck, specs, const):
# Estimate the diode factor gamma from Isc-Voc data. Method incorporates
# temperature dependence by means of the equation for Io
y = np.log(isc - voc / rsh) - 3. * np.log(tck / (const['T0'] + 273.15))
x1 = const['q'] / const['k'] * (1. / (const['T0'] + 273.15) - 1. / tck)
x2 = voc / (vth * specs['cells_in_series'])
uu = np.logical_or(np.isnan(y), np.isnan(x1), np.isnan(x2))
x = np.vstack((np.ones(len(x1[~uu])), x1[~uu], -x1[~uu] *
(tck[~uu] - (const['T0'] + 273.15)), x2[~uu],
-x2[~uu] * (tck[~uu] - (const['T0'] + 273.15)))).T
alpha = np.linalg.lstsq(x, y[~uu], rcond=None)[0]
gamma_ref = 1. / alpha[3]
mu_gamma = alpha[4] / alpha[3] ** 2
return gamma_ref, mu_gamma
def _fit_desoto_sandia_diode(ee, voc, vth, tc, specs, const):
# estimates the diode factor for the De Soto model.
# Helper function for fit_desoto_sandia
try:
import statsmodels.api as sm
except ImportError:
raise ImportError(
'Parameter extraction using Sandia method requires statsmodels')
x = specs['cells_in_series'] * vth * np.log(ee / const['E0'])
y = voc - specs['beta_voc'] * (tc - const['T0'])
new_x = sm.add_constant(x)
res = sm.RLM(y, new_x).fit()
return res.params[1]
def _initial_iv_params(ivcurves, ee, voc, isc, rsh, nnsvth):
# sets initial values for iph, io, rs and quality filter u.
# Helper function for fit_<model>_sandia.
n = len(ivcurves['v_oc'])
io = np.ones(n)
iph = np.ones(n)
rs = np.ones(n)
for j in range(n):
if rsh[j] > 0:
volt, curr = rectify_iv_curve(ivcurves['v'][j],
ivcurves['i'][j])
# Initial estimate of Io, evaluate the single diode model at
# voc and approximate Iph + Io = Isc [5] Step 3a; [6] Step 3b
io[j] = (isc[j] - voc[j] / rsh[j]) * np.exp(-voc[j] /
nnsvth[j])
# initial estimate of rs from dI/dV near Voc
# [5] Step 3a; [6] Step 3c
[didv, d2id2v] = _numdiff(volt, curr)
t3 = volt > .5 * voc[j]
t4 = volt < .9 * voc[j]
tmp = -rsh[j] * didv - 1.
with np.errstate(invalid="ignore"): # expect nan in didv
v = np.logical_and.reduce(np.array([t3, t4, ~np.isnan(tmp),
np.greater(tmp, 0)]))
if np.any(v):
vtrs = (nnsvth[j] / isc[j] * (
np.log(tmp[v] * nnsvth[j] / (rsh[j] * io[j]))
- volt[v] / nnsvth[j]))
rs[j] = np.mean(vtrs[vtrs > 0], axis=0)
else:
rs[j] = 0.
# Initial estimate of Iph, evaluate the single diode model at
# Isc [5] Step 3a; [6] Step 3d
iph[j] = isc[j] + io[j] * np.expm1(isc[j] / nnsvth[j]) \
+ isc[j] * rs[j] / rsh[j]
else:
io[j] = np.nan
rs[j] = np.nan
iph[j] = np.nan
# Filter IV curves for good initial values
# [5] Step 3b
u = _filter_params(ee, isc, io, rs, rsh)
# [5] Step 3c
# Refine Io to match Voc
io[u] = _update_io(voc[u], iph[u], io[u], rs[u], rsh[u], nnsvth[u])
# parameters [6], Step 3c
# Calculate Iph to be consistent with Isc and current values of other
iph = isc + io * np.expm1(rs * isc / nnsvth) + isc * rs / rsh
return iph, io, rs, u
def _update_iv_params(voc, isc, vmp, imp, ee, iph, io, rs, rsh, nnsvth, u,
maxiter, eps1):
# Refine Rsh, Rs, Io and Iph in that order.
# Helper function for fit_<model>_sandia.
counter = 1. # counter variable for parameter updating while loop,
# counts iterations
prevconvergeparams = {}
prevconvergeparams['state'] = 0.0
not_converged = np.array([True])
while not_converged.any() and counter <= maxiter:
# update rsh to match max power point using a fixed point method.
rsh[u] = _update_rsh_fixed_pt(vmp[u], imp[u], iph[u], io[u], rs[u],
rsh[u], nnsvth[u])
# Calculate Rs to be consistent with Rsh and maximum power point
_, phi = _calc_theta_phi_exact(vmp[u], imp[u], iph[u], io[u],
rs[u], rsh[u], nnsvth[u])
rs[u] = (iph[u] + io[u] - imp[u]) * rsh[u] / imp[u] - \
nnsvth[u] * phi / imp[u] - vmp[u] / imp[u]
# Update filter for good parameters
u = _filter_params(ee, isc, io, rs, rsh)
# Update value for io to match voc
io[u] = _update_io(voc[u], iph[u], io[u], rs[u], rsh[u], nnsvth[u])
# Calculate Iph to be consistent with Isc and other parameters
iph = isc + io * np.expm1(rs * isc / nnsvth) + isc * rs / rsh
# update filter for good parameters
u = _filter_params(ee, isc, io, rs, rsh)
# compute the IV curve from the current parameter values
result = singlediode(iph[u], io[u], rs[u], rsh[u], nnsvth[u])
# check convergence criteria
# [5] Step 3d
convergeparams = _check_converge(
prevconvergeparams, result, vmp[u], imp[u], counter)
prevconvergeparams = convergeparams
counter += 1.
t5 = prevconvergeparams['vmperrmeanchange'] >= eps1
t6 = prevconvergeparams['imperrmeanchange'] >= eps1
t7 = prevconvergeparams['pmperrmeanchange'] >= eps1
t8 = prevconvergeparams['vmperrstdchange'] >= eps1
t9 = prevconvergeparams['imperrstdchange'] >= eps1
t10 = prevconvergeparams['pmperrstdchange'] >= eps1
t11 = prevconvergeparams['vmperrabsmaxchange'] >= eps1
t12 = prevconvergeparams['imperrabsmaxchange'] >= eps1
t13 = prevconvergeparams['pmperrabsmaxchange'] >= eps1
not_converged = np.logical_or.reduce(np.array([t5, t6, t7, t8, t9,
t10, t11, t12, t13]))
return iph, io, rs, rsh, u
def _extract_sdm_params(ee, tc, iph, io, rs, rsh, n, u, specs, const,
model):
# Get single diode model parameters from five parameters iph, io, rs, rsh
# and n vs. effective irradiance and temperature
try:
import statsmodels.api as sm
except ImportError:
raise ImportError(
'Parameter extraction using Sandia method requires statsmodels')
tck = tc + 273.15
tok = const['T0'] + 273.15 # convert to to K
params = {}
if model == 'pvsyst':
# Estimate I_o_ref and EgRef
x_for_io = const['q'] / const['k'] * (1. / tok - 1. / tck[u]) / n[u]
# Estimate R_sh_0, R_sh_ref and R_sh_exp
# Initial guesses. R_sh_0 is value at ee=0.
nans = np.isnan(rsh)
if any(ee < 400):
grsh0 = np.mean(rsh[np.logical_and(~nans, ee < 400)])
else:
grsh0 = np.max(rsh)
# Rsh_ref is value at Ee = 1000
if any(ee > 400):
grshref = np.mean(rsh[np.logical_and(~nans, ee > 400)])
else:
grshref = np.min(rsh)
# PVsyst default for Rshexp is 5.5
R_sh_exp = 5.5
# Find parameters for Rsh equation
def fun_rsh(x, rshexp, ee, e0, rsh):
tf = np.log10(_rsh_pvsyst(x, R_sh_exp, ee, e0)) - np.log10(rsh)
return tf
x0 = np.array([grsh0, grshref])
beta = optimize.least_squares(
fun_rsh, x0, args=(R_sh_exp, ee[u], const['E0'], rsh[u]),
bounds=np.array([[1., 1.], [1.e7, 1.e6]]), verbose=2)
# Extract PVsyst parameter values
R_sh_0 = beta.x[0]
R_sh_ref = beta.x[1]
# parameters unique to PVsyst
params['R_sh_0'] = R_sh_0
params['R_sh_exp'] = R_sh_exp
elif model == 'desoto':
dEgdT = 0.0002677
x_for_io = const['q'] / const['k'] * (
1. / tok - 1. / tck[u] + dEgdT * (tc[u] - const['T0']) / tck[u])
# Estimate R_sh_ref
nans = np.isnan(rsh)
x = const['E0'] / ee[np.logical_and(u, ee > 400, ~nans)]
y = rsh[np.logical_and(u, ee > 400, ~nans)]
new_x = sm.add_constant(x)
beta = sm.RLM(y, new_x).fit()
R_sh_ref = beta.params[1]
params['dEgdT'] = dEgdT
# Estimate I_o_ref and EgRef
y = np.log(io[u]) - 3. * np.log(tck[u] / tok)
new_x = sm.add_constant(x_for_io)
res = sm.RLM(y, new_x).fit()
beta = res.params
I_o_ref = np.exp(beta[0])
EgRef = beta[1]
# Estimate I_L_ref
x = tc[u] - const['T0']
y = iph[u] * (const['E0'] / ee[u])
# average over non-NaN values of Y and X
nans = np.isnan(y - specs['alpha_sc'] * x)
I_L_ref = np.mean(y[~nans] - specs['alpha_sc'] * x[~nans])
# Estimate R_s
nans = np.isnan(rs)
R_s = np.mean(rs[np.logical_and(u, ee > 400, ~nans)])
params['I_L_ref'] = I_L_ref
params['I_o_ref'] = I_o_ref
params['EgRef'] = EgRef
params['R_sh_ref'] = R_sh_ref
params['R_s'] = R_s
# save values for each IV curve
params['iph'] = iph
params['io'] = io
params['rsh'] = rsh
params['rs'] = rs
params['u'] = u
return params
def _update_io(voc, iph, io, rs, rsh, nnsvth):
"""
Adjusts Io to match Voc using other parameter values.
Helper function for fit_pvsyst_sandia, fit_desoto_sandia
Description
-----------
Io is updated iteratively 10 times or until successive
values are less than 0.000001 % different. The updating is similar to
Newton's method.
Parameters
----------
voc: a numpy array of length N of values for Voc (V)
iph: a numpy array of length N of values for lighbt current IL (A)
io: a numpy array of length N of initial values for Io (A)
rs: a numpy array of length N of values for the series resistance (ohm)
rsh: a numpy array of length N of values for the shunt resistance (ohm)
nnsvth: a numpy array of length N of values for the diode factor x thermal
voltage for the module, equal to Ns (number of cells in series) x
Vth (thermal voltage per cell).
Returns
-------
new_io - a numpy array of length N of updated values for io
References
----------
.. [1] PVLib MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
.. [2] C. Hansen, Parameter Estimation for Single Diode Models of
Photovoltaic Modules, Sandia National Laboratories Report SAND2015-2065
.. [3] C. Hansen, Estimation of Parameteres for Single Diode Models using
Measured IV Curves, Proc. of the 39th IEEE PVSC, June 2013.
"""
eps = 1e-6
niter = 10
k = 1
maxerr = 1
tio = io # Current Estimate of Io
while maxerr > eps and k < niter:
# Predict Voc
pvoc = v_from_i(rsh, rs, nnsvth, 0., tio, iph)
# Difference in Voc
dvoc = pvoc - voc
# Update Io
with np.errstate(invalid="ignore", divide="ignore"):
new_io = tio * (1. + (2. * dvoc) / (2. * nnsvth - dvoc))
# Calculate Maximum Percent Difference
maxerr = np.max(np.abs(new_io - tio) / tio) * 100.
tio = new_io
k += 1.
return new_io
def _rsh_pvsyst(x, rshexp, g, go):
# computes rsh for PVsyst model where the parameters are in vector xL
# x[0] = Rsh0
# x[1] = Rshref
rsho = x[0]
rshref = x[1]
rshb = np.maximum(
(rshref - rsho * np.exp(-rshexp)) / (1. - np.exp(-rshexp)), 0.)
rsh = rshb + (rsho - rshb) * np.exp(-rshexp * g / go)
return rsh
def _filter_params(ee, isc, io, rs, rsh):
# Function _filter_params identifies bad parameter sets. A bad set contains
# Nan, non-positive or imaginary values for parameters; Rs > Rsh; or data
# where effective irradiance Ee differs by more than 5% from a linear fit
# to Isc vs. Ee
badrsh = np.logical_or(rsh < 0., np.isnan(rsh))
negrs = rs < 0.
badrs = np.logical_or(rs > rsh, np.isnan(rs))
imagrs = ~(np.isreal(rs))
badio = np.logical_or(np.logical_or(~(np.isreal(rs)), io <= 0),
np.isnan(io))
goodr = np.logical_and(~badrsh, ~imagrs)
goodr = np.logical_and(goodr, ~negrs)
goodr = np.logical_and(goodr, ~badrs)
goodr = np.logical_and(goodr, ~badio)
matrix = np.vstack((ee / 1000., np.zeros(len(ee)))).T
eff = np.linalg.lstsq(matrix, isc, rcond=None)[0][0]
pisc = eff * ee / 1000
pisc_error = np.abs(pisc - isc) / isc
# check for departure from linear relation between Isc and Ee
badiph = pisc_error > .05
u = np.logical_and(goodr, ~badiph)
return u
def _check_converge(prevparams, result, vmp, imp, i):
"""
Function _check_converge computes convergence metrics for all IV curves.
Helper function for fit_pvsyst_sandia, fit_desoto_sandia
Parameters
----------
prevparams: Convergence Parameters from the previous Iteration (used to
determine Percent Change in values between iterations)
result: performacne paramters of the (predicted) single diode fitting,
which includes Voc, Vmp, Imp, Pmp and Isc
vmp: measured values for each IV curve
imp: measured values for each IV curve
i: Index of current iteration in cec_parameter_estimation
Returns
-------
convergeparam: dict containing the following for Imp, Vmp and Pmp:
- maximum percent difference between measured and modeled values
- minimum percent difference between measured and modeled values
- maximum absolute percent difference between measured and modeled
values
- mean percent difference between measured and modeled values
- standard deviation of percent difference between measured and modeled
values
- absolute difference for previous and current values of maximum
absolute percent difference (measured vs. modeled)
- absolute difference for previous and current values of mean percent
difference (measured vs. modeled)
- absolute difference for previous and current values of standard
deviation of percent difference (measured vs. modeled)
"""
convergeparam = {}
imperror = (result['i_mp'] - imp) / imp * 100.
vmperror = (result['v_mp'] - vmp) / vmp * 100.
pmperror = (result['p_mp'] - (imp * vmp)) / (imp * vmp) * 100.
convergeparam['imperrmax'] = max(imperror) # max of the error in Imp
convergeparam['imperrmin'] = min(imperror) # min of the error in Imp
# max of the absolute error in Imp
convergeparam['imperrabsmax'] = max(abs(imperror))
# mean of the error in Imp
convergeparam['imperrmean'] = np.mean(imperror, axis=0)
# std of the error in Imp
convergeparam['imperrstd'] = np.std(imperror, axis=0, ddof=1)
convergeparam['vmperrmax'] = max(vmperror) # max of the error in Vmp
convergeparam['vmperrmin'] = min(vmperror) # min of the error in Vmp
# max of the absolute error in Vmp
convergeparam['vmperrabsmax'] = max(abs(vmperror))
# mean of the error in Vmp
convergeparam['vmperrmean'] = np.mean(vmperror, axis=0)
# std of the error in Vmp
convergeparam['vmperrstd'] = np.std(vmperror, axis=0, ddof=1)
convergeparam['pmperrmax'] = max(pmperror) # max of the error in Pmp
convergeparam['pmperrmin'] = min(pmperror) # min of the error in Pmp
# max of the abs err. in Pmp
convergeparam['pmperrabsmax'] = max(abs(pmperror))
# mean error in Pmp
convergeparam['pmperrmean'] = np.mean(pmperror, axis=0)
# std error Pmp
convergeparam['pmperrstd'] = np.std(pmperror, axis=0, ddof=1)
if prevparams['state'] != 0.0:
convergeparam['imperrstdchange'] = np.abs(
convergeparam['imperrstd'] / prevparams['imperrstd'] - 1.)
convergeparam['vmperrstdchange'] = np.abs(
convergeparam['vmperrstd'] / prevparams['vmperrstd'] - 1.)
convergeparam['pmperrstdchange'] = np.abs(
convergeparam['pmperrstd'] / prevparams['pmperrstd'] - 1.)
convergeparam['imperrmeanchange'] = np.abs(
convergeparam['imperrmean'] / prevparams['imperrmean'] - 1.)
convergeparam['vmperrmeanchange'] = np.abs(
convergeparam['vmperrmean'] / prevparams['vmperrmean'] - 1.)
convergeparam['pmperrmeanchange'] = np.abs(
convergeparam['pmperrmean'] / prevparams['pmperrmean'] - 1.)
convergeparam['imperrabsmaxchange'] = np.abs(
convergeparam['imperrabsmax'] / prevparams['imperrabsmax'] - 1.)
convergeparam['vmperrabsmaxchange'] = np.abs(
convergeparam['vmperrabsmax'] / prevparams['vmperrabsmax'] - 1.)
convergeparam['pmperrabsmaxchange'] = np.abs(
convergeparam['pmperrabsmax'] / prevparams['pmperrabsmax'] - 1.)
convergeparam['state'] = 1.0
else:
convergeparam['imperrstdchange'] = float("Inf")
convergeparam['vmperrstdchange'] = float("Inf")
convergeparam['pmperrstdchange'] = float("Inf")
convergeparam['imperrmeanchange'] = float("Inf")
convergeparam['vmperrmeanchange'] = float("Inf")
convergeparam['pmperrmeanchange'] = float("Inf")
convergeparam['imperrabsmaxchange'] = float("Inf")
convergeparam['vmperrabsmaxchange'] = float("Inf")
convergeparam['pmperrabsmaxchange'] = float("Inf")
convergeparam['state'] = 1.
return convergeparam
def _update_rsh_fixed_pt(vmp, imp, iph, io, rs, rsh, nnsvth):
"""
Adjust Rsh to match Vmp using other parameter values
Helper function for fit_pvsyst_sandia, fit_desoto_sandia
Description
-----------
Rsh is updated iteratively using a fixed point expression
obtained from combining Vmp = Vmp(Imp) (using the analytic solution to the
single diode equation) and dP / dI = 0 at Imp. 500 iterations are performed
because convergence can be very slow.
Parameters
----------
vmp: a numpy array of length N of values for Vmp (V)
imp: a numpy array of length N of values for Imp (A)
iph: a numpy array of length N of values for light current IL (A)
io: a numpy array of length N of values for Io (A)
rs: a numpy array of length N of values for series resistance (ohm)
rsh: a numpy array of length N of initial values for shunt resistance (ohm)
nnsvth: a numpy array length N of values for the diode factor x thermal
voltage for the module, equal to Ns (number of cells in series) x
Vth (thermal voltage per cell).
Returns
-------
numpy array of length N of updated values for Rsh
References
----------
.. [1] PVLib for MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
.. [2] C. Hansen, Parameter Estimation for Single Diode Models of
Photovoltaic Modules, Sandia National Laboratories Report SAND2015-2065
"""
niter = 500
x1 = rsh
for i in range(niter):
_, z = _calc_theta_phi_exact(vmp, imp, iph, io, rs, x1, nnsvth)
with np.errstate(divide="ignore"):
next_x1 = (1 + z) / z * ((iph + io) * x1 / imp - nnsvth * z / imp
- 2 * vmp / imp)
x1 = next_x1
return x1
def _calc_theta_phi_exact(vmp, imp, iph, io, rs, rsh, nnsvth):
"""
_calc_theta_phi_exact computes Lambert W values appearing in the analytic
solutions to the single diode equation for the max power point.
Helper function for fit_pvsyst_sandia
Parameters
----------
vmp: a numpy array of length N of values for Vmp (V)
imp: a numpy array of length N of values for Imp (A)
iph: a numpy array of length N of values for the light current IL (A)
io: a numpy array of length N of values for Io (A)
rs: a numpy array of length N of values for the series resistance (ohm)
rsh: a numpy array of length N of values for the shunt resistance (ohm)
nnsvth: a numpy array of length N of values for the diode factor x
thermal voltage for the module, equal to Ns
(number of cells in series) x Vth
(thermal voltage per cell).
Returns
-------
theta: a numpy array of values for the Lamber W function for solving
I = I(V)
phi: a numpy array of values for the Lambert W function for solving
V = V(I)
Notes
-----
_calc_theta_phi_exact calculates values for the Lambert W function which
are used in the analytic solutions for the single diode equation at the
maximum power point. For V=V(I),
phi = W(Io*Rsh/n*Vth * exp((IL + Io - Imp)*Rsh/n*Vth)). For I=I(V),
theta = W(Rs*Io/n*Vth *
Rsh/ (Rsh+Rs) * exp(Rsh/ (Rsh+Rs)*((Rs(IL+Io) + V)/n*Vth))
References
----------
.. [1] PVL MATLAB 2065 https://github.com/sandialabs/MATLAB_PV_LIB
.. [2] C. Hansen, Parameter Estimation for Single Diode Models of
Photovoltaic Modules, Sandia National Laboratories Report SAND2015-2065
.. [3] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials and
Solar Cells, 81 (2004) 269-277.
"""
# handle singleton inputs
vmp = np.asarray(vmp)
imp = np.asarray(imp)
iph = np.asarray(iph)
io = np.asarray(io)
rs = np.asarray(rs)
rsh = np.asarray(rsh)
nnsvth = np.asarray(nnsvth)
# Argument for Lambert W function involved in V = V(I) [2] Eq. 12; [3]
# Eq. 3
with np.errstate(over="ignore", divide="ignore", invalid="ignore"):
argw = np.where(
nnsvth == 0,
np.nan,
rsh * io / nnsvth * np.exp(rsh * (iph + io - imp) / nnsvth))
phi = np.where(argw > 0, lambertw(argw).real, np.nan)
# NaN where argw overflows. Switch to log space to evaluate
u = np.isinf(argw)
if np.any(u):
logargw = (
np.log(rsh[u]) + np.log(io[u]) - np.log(nnsvth[u])
+ rsh[u] * (iph[u] + io[u] - imp[u]) / nnsvth[u])
# Three iterations of Newton-Raphson method to solve w+log(w)=logargW.
# The initial guess is w=logargW. Where direct evaluation (above)
# results in NaN from overflow, 3 iterations of Newton's method gives
# approximately 8 digits of precision.
x = logargw
for i in range(3):
x *= ((1. - np.log(x) + logargw) / (1. + x))
phi[u] = x
phi = np.transpose(phi)
# Argument for Lambert W function involved in I = I(V) [2] Eq. 11; [3]
# E1. 2
with np.errstate(over="ignore", divide="ignore", invalid="ignore"):
argw = np.where(
nnsvth == 0,
np.nan,
rsh / (rsh + rs) * rs * io / nnsvth * np.exp(
rsh / (rsh + rs) * (rs * (iph + io) + vmp) / nnsvth))
theta = np.where(argw > 0, lambertw(argw).real, np.nan)
# NaN where argw overflows. Switch to log space to evaluate
u = np.isinf(argw)
if np.any(u):
with np.errstate(divide="ignore"):
logargw = (
np.log(rsh[u]) - np.log(rsh[u] + rs[u]) + np.log(rs[u])
+ np.log(io[u]) - np.log(nnsvth[u])
+ (rsh[u] / (rsh[u] + rs[u]))
* (rs[u] * (iph[u] + io[u]) + vmp[u]) / nnsvth[u])
# Three iterations of Newton-Raphson method to solve w+log(w)=logargW.
# The initial guess is w=logargW. Where direct evaluation (above)
# results in NaN from overflow, 3 iterations of Newton's method gives
# approximately 8 digits of precision.
x = logargw
for i in range(3):
x *= ((1. - np.log(x) + logargw) / (1. + x))
theta[u] = x
theta = np.transpose(theta)
return theta, phi
def pvsyst_temperature_coeff(alpha_sc, gamma_ref, mu_gamma, I_L_ref, I_o_ref,
R_sh_ref, R_sh_0, R_s, cells_in_series,
R_sh_exp=5.5, EgRef=1.121, irrad_ref=1000,
temp_ref=25):
r"""
Calculates the temperature coefficient of power for a pvsyst single
diode model.
The temperature coefficient is determined as the numerical derivative
:math:`\frac{dP}{dT}` at the maximum power point at reference conditions
[1]_.
Parameters
----------
alpha_sc : float
The short-circuit current temperature coefficient of the module. [A/C]
gamma_ref : float
The diode ideality factor. [unitless]
mu_gamma : float
The temperature coefficient for the diode ideality factor. [1/K]
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions.
[A]
I_o_ref : float
The dark or diode reverse saturation current at reference conditions.
[A]
R_sh_ref : float
The shunt resistance at reference conditions. [ohm]
R_sh_0 : float
The shunt resistance at zero irradiance conditions. [ohm]
R_s : float
The series resistance at reference conditions. [ohm]
cells_in_series : int
The number of cells connected in series.
R_sh_exp : float, default 5.5
The exponent in the equation for shunt resistance. [unitless]
EgRef : float, default 1.121
The energy bandgap of the module's cells at reference temperature.
Default of 1.121 eV is for crystalline silicon. Must be positive. [eV]
irrad_ref : float, default 1000
Reference irradiance. [W/m^2].
temp_ref : float, default 25
Reference cell temperature. [C]
Returns
-------
gamma_pdc : float
Temperature coefficient of power at maximum power point at reference
conditions. [1/C]
References
----------
.. [1] K. Sauer, T. Roessler, C. W. Hansen, Modeling the Irradiance and
Temperature Dependence of Photovoltaic Modules in PVsyst, IEEE Journal
of Photovoltaics v5(1), January 2015.
"""
def maxp(temp_cell, irrad_ref, alpha_sc, gamma_ref, mu_gamma, I_L_ref,
I_o_ref, R_sh_ref, R_sh_0, R_s, cells_in_series, R_sh_exp, EgRef,
temp_ref):
params = calcparams_pvsyst(
irrad_ref, temp_cell, alpha_sc, gamma_ref, mu_gamma, I_L_ref,
I_o_ref, R_sh_ref, R_sh_0, R_s, cells_in_series, R_sh_exp, EgRef,
irrad_ref, temp_ref)
res = bishop88_mpp(*params)
return res[2]
args = (irrad_ref, alpha_sc, gamma_ref, mu_gamma, I_L_ref,
I_o_ref, R_sh_ref, R_sh_0, R_s, cells_in_series, R_sh_exp, EgRef,
temp_ref)
pmp = maxp(temp_ref, *args)
gamma_pdc = derivative(maxp, temp_ref, args=args)
return gamma_pdc / pmp
|
pvlib/pvlib-python
|
pvlib/ivtools/sdm.py
|
Python
|
bsd-3-clause
| 50,560
|
[
"pysam"
] |
ab361b5ea5c4234e62276e500c834817db9ab465cae7b5092c201a9aaad5779b
|
# see https://github.com/pypa/sampleproject
"""Installation script for ProPhyle.
To include binaries into the package, run make and set the system variable
PROPHYLE_PACKBIN to a non-zero value, e.g.,
PROPHYLE_PACKBIN=1 python3 setup.py install
"""
import glob
import os
import setuptools
import sys
if sys.version_info < (3, 4):
sys.exit('Minimum supported Python version is 3.4')
bwa_dir = 'prophyle/prophyle_index/bwa'
if len(glob.glob(os.path.join(bwa_dir, "*.c"))) == 0 or len(glob.glob(os.path.join(bwa_dir, "*.h"))) == 0:
sys.exit("BWA submodule is missing. Run 'make submodules' to download it.")
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Get the current version
exec(open("prophyle/version.py").read())
try:
packbin = os.environ['PROPHYLE_PACKBIN']
except KeyError:
packbin = False
if packbin:
print("Adding executables and *.o files to the package", file=sys.stderr)
prophyle_files = [
'Makefile',
'*.py',
'prophyle_assembler/*.cpp',
'prophyle_assembler/*.h',
'prophyle_assembler/Makefile',
'prophyle_index/*.c',
'prophyle_index/*.h',
'prophyle_index/Makefile',
'prophyle_index/bwa/*.c',
'prophyle_index/bwa/*.h',
'prophyle_index/bwa/Makefile',
'prophyle_assignment/*.cpp',
'prophyle_assignment/*.c',
'prophyle_assignment/*.h',
'prophyle_assignment/Makefile',
'trees/*.nw',
] + (
[
'prophyle_index/prophyle_index',
'prophyle_index/*.o',
'prophyle_assembler/prophyle_assembler',
'prophyle_assembler/*.o',
'prophyle_assignment/prophyle_assignment',
'prophyle_assignment/*.o',
'prophyle_index/bwa/bwa',
'prophyle_index/bwa/*.o',
] if packbin else []
)
setuptools.setup(
name='prophyle',
version=VERSION,
description='ProPhyle metagenomic classifier',
long_description=long_description,
#
url='https://github.com/prophyle/prophyle',
download_url="https://github.com/prophyle/prophyle/releases",
#
author='Karel Brinda, Kamil Salikhov, Simone Pignotti, Gregory Kucherov',
author_email=
'[email protected], [email protected], [email protected], [email protected]',
license='MIT',
#
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: Unix',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
#
keywords='metagenomics classification NGS',
#
packages=["prophyle"],
#
package_data={'prophyle': prophyle_files},
#
entry_points={
'console_scripts': [
'prophyle = prophyle.prophyle:main',
'prophyle_analyze.py = prophyle.prophyle_analyze:main',
'prophyle_assignment.py = prophyle.prophyle_assignment:main',
'prophyle_ncbi_tree.py = prophyle.prophyle_ncbi_tree:main',
'prophyle_otu_table.py = prophyle.prophyle_otu_table:main',
'prophyle_paired_end.py = prophyle.prophyle_paired_end:main',
'prophyle_plot_tree.py = prophyle.prophyle_plot_tree:main',
'prophyle_propagation_makefile.py = prophyle.prophyle_propagation_makefile:main',
'prophyle_propagation_postprocessing.py = prophyle.prophyle_propagation_postprocessing:main',
'prophyle_propagation_preprocessing.py = prophyle.prophyle_propagation_preprocessing:main',
'prophyle_split_allseq.py = prophyle.prophyle_split_allseq:main',
],
},
#
install_requires=[
'ete3',
'wheel',
'bitarray',
'psutil',
'pysam',
'scipy',
'six',
],
#
)
|
karel-brinda/prophyle
|
setup.py
|
Python
|
mit
| 3,999
|
[
"BWA",
"pysam"
] |
72bc35b03ac3441ff5717ca677122e7cd588b09864ea18068c1cf1e05bbc0d4c
|
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Functions related to simulating observed spectra based on calculated theoratical spectra
for 0.8, need a full scale conversion of all list into dicts
instead of normalized_xxx, let's have a dict with pressure_layers as keys and relevent data as data
Takes in a simulated theoretical spectra and add observational effects
"""
import os
import sys
import numpy as np
import time
from scipy import interpolate, stats
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
#import matplotlib.pyplot as plt
#from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#ml = MultipleLocator(10)
import SEAS_Utils as utils
import SEAS_Aux.cross_section.hapi as hp
import SEAS_Main.observation_effects.noise as noise
class OS_Simulator():
def __init__(self, user_input):
self.user_input = user_input
def add_noise(self, bin_mean_I, noise_type="gaussian"):
error_scale = utils.to_float(self.user_input["Observation_Effects"]["Noise"]["error_scale"])
data_length = len(bin_mean_I)
if noise_type == "gaussian":
Noise = noise.Gaussian_Noise(error_scale, data_length)
noise_added = Noise.get_noise()
elif noise_type == "poisson":
Noise = noise.Poisson_Noise(error_scale, data_length)
noise_added = Noise.get_noise()
bin_mean_error_I = bin_mean_I*(1.0+noise_added)
bin_mean_error_bar = error_scale*bin_mean_error_I[1]
return bin_mean_error_I, bin_mean_error_bar
def add_background_stars(self):
pass
def calculate_convolve(self, nu, trans, record=False):
#if self.user_input["Observation"]["Convolve"] == "true":
amount = utils.to_float(self.user_input["Observation_Effects"]["Convolve"]["convolve_amount"])
nu,Transit_Signal,i1,i2,slit = hp.convolveSpectrum(nu,trans,SlitFunction=hp.SLIT_RECTANGULAR,Resolution=amount,AF_wing=20.0)
if record:
return nu,Transit_Signal,max(Transit_Signal)
return nu,Transit_Signal
def calculate_bin(self, x, signal):
Bins = utils.to_float(self.user_input["Observation_Effects"]["Bin"]["bin_number"])
Method = self.user_input["Observation_Effects"]["Bin"]["method"]
bin_mean_I, bin_edges, binnumber = stats.binned_statistic(x, signal, statistic=Method, bins=Bins)
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width/2
return bin_centers, bin_mean_I
def spectra_to_magnitude(self):
"""
convert simulated spectra to magnitude
This will need a simulated stellar spectra (a black body curve)?
"""
pass
def number_of_photon(self):
"""
number of photons expected per bin
"""
pass
def telescope_response_function(self):
pass
def telescope_jitter(self):
pass
|
azariven/BioSig_SEAS
|
SEAS_Main/simulation/observed_spectra_simulator.py
|
Python
|
gpl-3.0
| 3,855
|
[
"Gaussian"
] |
d2e911dabb0f9ecb3486f230d31a0adbe8efae6b009f66f07f51c4e9df9dcaed
|
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
Author: N. Benjamin Erichson <[email protected]>
"""
import numpy as np
import numba
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
def ncp_hals(
X, rank, mask=None, random_state=None, init='rand',
skip_modes=[], negative_modes=[], **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
Binary tensor, same shape as X, specifying censored or missing data values
at locations where (mask == 0) and observed data where (mask == 1).
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
skip_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor that are not fit. This can be
used to fix certain factor matrices that have been previously
fit.
negative_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor whose factors are not constrained
to be nonnegative.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
if mask is not None:
X = np.copy(X)
X[~mask] = np.mean(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = np.linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
for n in range(X.ndim):
# Skip modes that are specified as fixed.
if n in skip_modes:
continue
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = np.prod([arr.T @ arr for arr in components], axis=0)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
Xmkr = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, Xmkr, n not in negative_modes)
# iv) Update masked elements.
if mask is not None:
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if mask is None:
# Determine mode that was fit last.
n = np.setdiff1d(np.arange(X.ndim), skip_modes).max()
# Add contribution of last fit factors to gram matrix.
grams *= U[n].T @ U[n]
residsq = np.sum(grams) - 2 * np.sum(U[n] * Xmkr) + (normX ** 2)
result.update(np.sqrt(residsq) / normX)
else:
result.update(np.linalg.norm(X - pred) / normX)
# end optimization loop, return result.
return result.finalize()
@numba.jit(nopython=True)
def _hals_update(factors, grams, Xmkr, nonneg):
dim = factors.shape[0]
rank = factors.shape[1]
indices = np.arange(rank)
# Handle special case of rank-1 model.
if rank == 1:
if nonneg:
factors[:] = np.maximum(0.0, Xmkr / grams[0, 0])
else:
factors[:] = Xmkr / grams[0, 0]
# Do a few inner iterations.
else:
for itr in range(3):
for p in range(rank):
idx = (indices != p)
Cp = factors[:, idx] @ grams[idx][:, p]
r = (Xmkr[:, p] - Cp) / np.maximum(grams[p, p], 1e-6)
if nonneg:
factors[:, p] = np.maximum(r, 0.0)
else:
factors[:, p] = r
|
ahwillia/tensortools
|
tensortools/optimize/ncp_hals.py
|
Python
|
mit
| 6,382
|
[
"Gaussian"
] |
9bbfde57a299480707642cab6473549b274a0a1401a574a10b916d24ef2f5ae6
|
# Orca
#
# Copyright 2005-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for gnome-mud."""
from .script import Script
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/gnome-mud/__init__.py
|
Python
|
gpl-3.0
| 846
|
[
"ORCA"
] |
3d558ec7af108531bca3aa2f773c59c039b61bb0303ef854b9de5a6dc19e2e37
|
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
## @file test_plot.py
import unittest
import sys,os
from string import *
here = os.getcwd() + '/../../'
sys.path.append(here)
import pyvisi # this should import all of the pyvisi stuff needed
"""
Class and functions for testing the Renderer class
"""
class TestRenderer(unittest.TestCase):
"""
The main test class
"""
def testRendererExactlyTwoArgs(self):
"""
Tests that the Renderer object is instantiated only with two arguments
"""
# test just the one argument
self.assertRaises(TypeError, \
pyvisi.Renderer.__init__, 'one')
# test three arguments
self.assertRaises(TypeError, \
pyvisi.Renderer.__init__, 'one', 'two', 'three')
def testRendererNameType(self):
"""
Tests the type of the renderer name; should be alphanumeric
"""
ren = pyvisi.Renderer('vtk')
renName = ren.rendererName
self.assert_(renName.isalnum(),\
msg='Renderer() argument is not alphanumeric')
def testRendererReturn(self):
"""
Tests that a Renderer() object is returned
"""
ren = pyvisi.Renderer('vtk')
classStr = ren.__class__.__name__
self.assertEqual('Renderer', classStr)
if __name__ == '__main__':
unittest.main()
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
pyvisi/renderers/vtk/tests/test_renderer.py
|
Python
|
gpl-2.0
| 2,108
|
[
"VTK"
] |
baae460217037150dd2786ead635bf01309772dfe26918f6abb0782cd116ce66
|
#!/usr/bin/env python
# \author: Bruno Combal, IOC/UNESCO, EC/FP7 GEOWOW
# \date: July 2013
# to run the script with the correct version of uvcdat:
# source /usr/local/uvcdat/1.2.0/bin/setup_cdat.sh
# version 1: regrid lat/lon, for all t and z
# version 2: add z interpolation, for all t, lat, lon
import cdms2
import numpy
import sys
import os.path
# _____________________________
def exitWM(message='Error. Exit 1', ExitCode=1):
print message
sys.exit(ExitCode)
# _____________________________
def makeGrid():
xstart=0
xend=360
xstep=0.5
ystart=-85
yend=85
ystep=0.5
lon_bnds=[]
lon=[]
for ii in numpy.arange(xstart, xend, xstep):
lon_bnds.append( [ii, ii+xstep] )
lon.append(ii+0.5*xstep)
lon_bnds=numpy.array(lon_bnds)
lon=numpy.array(lon)
lat_bnds=[]
lat=[]
for ii in numpy.arange(ystart, yend, ystep):
lat_bnds.append([ii, ii+ystep])
lat.append(ii+0.5*ystep)
lat_bnds=numpy.array(lat_bnds)
lat=numpy.array(lat)
latAxis = cdms2.createAxis(lat, lat_bnds)
latAxis.designateLatitude(True)
latAxis.units='degrees_north'
latAxis.long_name='Latitude'
latAxis.id='latitude'
lonAxis = cdms2.createAxis(lon, lon_bnds)
lonAxis.designateLongitude(True, 360.0)
lonAxis.units='degrees_east'
lonAxis.id='longitude'
lonAxis.long_name='Longitude'
lvl_bnds=numpy.array([[0,10], [10, 20], [20,30], [30,40], [40,50], [50,60], [60,70], [70,80], [80,90], [90,100], [100, 125], [125, 150], [150,175], [175,200], [200,250],[250,300],[300,400],[400,500], [500,600], [600,700], [700,800]])
lvl = [ 0.5*(lvls[0] + lvls[1]) for lvls in lvl_bnds ]
# lvl = numpy.zeros(len(lvl_bnds))
# for ii in range(len(lvl_bnds)): lvl[ii]=0.5*(lvl_bnds[ii,0]+lvl_bnds[ii,1])
return((cdms2.createGenericGrid(latAxis, lonAxis, lat_bnds, lon_bnds), latAxis, lonAxis, lat_bnds, lon_bnds, lvl_bnds, lvl))
# _____________________________
# interpolates a series of values
# the output has the size of zNew
def do_zInterp(zProfile, zOrg, zNew, nodata):
# z profiles have constant size, but final values may be set to nodata (1.e20): they should not be considered
thisProfile = [zz for zz in zProfile if zz < nodata]
# the result has the dimensions of zNew
tmp = numpy.interp(zNew[0:len(thisProfile)], zOrg[0:len(thisProfile)], thisProfile, right=nodata)
final = numpy.zeros(len(zNew))+nodata
final[0:len(final)]=final[:]
return final
# _____________________________
# interpolates cube t, z, lat, lon along z
# assumes dimensions are t, z, lat, lon
# cubeIn: 4D dataset
# zOrg: the input data z levels
# zNew: the requested zlevels
def do_hyperInterp(cubeIn, zOrg, zNew, nodata):
thisShape=cubeIn.shape
cubeOut = numpy.zeros( (thisShape[0], len(zNew), thisShape[2], thisShape[3] ) )+nodata
for itime in range(0, thisShape[0]):
print itime
for ilat in range(0, thisShape[2]):
for ilon in range(0, thisShape[3]):
if cubeIn[itime, 0, ilat, ilon] < nodata:
tmp = do_zInterp( numpy.ravel(cubeIn[itime, :, ilat, ilon]), zOrg, zNew, nodata)
cubeOut[itime, :, ilat, ilon] = tmp[:]
return cubeOut
# _____________________________
def do_regrid(infileName, variable, outfileName, netcdfType=4):
nodata = 1.e20
if netcdfType==4:
cdms2.setNetcdfShuffleFlag(1)
cdms2.setNetcdfDeflateFlag(1)
cdms2.setNetcdfDeflateLevelFlag(3)
elif netcdfType==3:
cdms2.setNetcdfShuffleFlag(0)
cdms2.setNetcdfDeflateFlag(0)
cdms2.setNetcdfDeflateLevel(0)
else:
exitWM('Unknown netcdf type {0}. Exit 2.'.format(netcdfType),2)
infile = cdms2.open(infileName)
unitsVar = infile[variable].units
(referenceGrid, latAxis, lonAxis, latBounds, lonBounds, lvl_bounds, lvl) = makeGrid()
regridded = infile[variable][:].regrid(referenceGrid)
outvar = cdms2.createVariable(regridded, typecode='f',
id=variable, fill_value=nodata,
grid=referenceGrid, copyaxes=1,
attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar))
#final = do_hyperInterp(regridded, infile[variable].getLevel()[:], lvl, nodata)
#outvar = cdms2.createVariable(final, typecode='f', id=variable, fill_value=nodata, attributes=dict(long_name='regridded {0}'.format(variable), units=unitsVar) )
#gridBis = regridded.subSlice(longitude=0).crossSectionRegrid(lvl, latAxis, method="linear")
#zregrid = tmpvar.crossSectionRegrid(lvl)
#outvar.setAxisList((latAxis, lonAxis))
if os.path.exists(outfileName): os.remove(outfileName)
outfile=cdms2.open(outfileName, 'w')
outfile.write(outvar)
outfile.history='Created with '+__file__.encode('utf8')
outfile.close()
infile.close()
# _____________________________
if __name__=="__main__":
infile=None #input file: full path
variable='thetao'
netcdfType=4
outfile=None #output file: full path
# parse input parameters
# to do: optional grid description
ii=1
while ii < len(sys.argv):
arg=sys.argv[ii]
if arg=='-o':
ii=ii+1
outfile = sys.argv[ii]
elif arg=='-v':
ii = ii + 1
variable=sys.argv[ii]
else:
infile = sys.argv[ii]
ii = ii+1
# check input parameters
if infile is None:
exitWM('Input file is not defined. Exit 3.', 3)
if outfile is None:
exitWM('Output file is not defined, use option -o. Exit 4.')
if not os.path.isfile(infile):
# infile does not exist or is not a file
exitWM('Could not find input file {0}. Exit 1.'.format(infile), 1)
if not os.path.isdir(os.path.dirname(outfile)):
# outfile directory does not exist or is not a directory
exitWM('Directory {0} does not exist to store output file {1}'.format(os.path.dirname(outfile), os.path.basename(outfile)))
do_regrid(infile, variable, outfile, netcdfType)
|
IOC-CODE/esgf_ensemble_mean
|
regrid_thetao/bin/regrid_thetao.py
|
Python
|
gpl-2.0
| 6,151
|
[
"NetCDF"
] |
7c896766b45628ffb765999a2bd94f837ed72504e935c654a95ae9b63a8f4f69
|
import pyparsing as pp
LPAR = pp.Suppress('(')
RPAR = pp.Suppress(')')
LBRACK = pp.Suppress('[')
RBRACK = pp.Suppress(']')
LBRACE = pp.Suppress('{')
RBRACE = pp.Suppress('}')
COMMA = pp.Suppress(',')
LT = pp.Suppress('<')
GT = pp.Suppress('>')
TITLE = pp.Keyword('TITLE')
UNITS = pp.Keyword('UNITS')
PARAMETER = pp.Keyword('PARAMETER')
COMMENT = pp.Keyword('COMMENT')
ASSIGNED = pp.Keyword('ASSIGNED')
NEURON = pp.Keyword('NEURON')
BREAKPOINT = pp.Keyword('BREAKPOINT')
STATE = pp.Keyword('STATE')
FUNCTION = pp.Keyword('FUNCTION')
PROCEDURE = pp.Keyword('PROCEDURE')
INITIAL = pp.Keyword('INITIAL')
DERIVATIVE = pp.Keyword('DERIVATIVE')
LOCAL = pp.Keyword('LOCAL')
UNITSON = pp.Keyword('UNITSON')
UNITSOFF = pp.Keyword('UNITSOFF')
THREADSAFE = pp.Keyword('THREADSAFE')
FLOAT = pp.Regex('[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?')
INT = pp.Word(pp.nums)
ID = pp.Word(pp.alphas, pp.alphanums+'_') # TODO: allowed ids?
|
borismarin/nmodl-parse
|
nmodl/terminals.py
|
Python
|
gpl-3.0
| 922
|
[
"NEURON"
] |
c729a260a94d45ab2d1ce18e6501d9e79afb3924bcf5617de1cadc2bd7aa2adf
|
#!/usr/bin/env python
#
#
# computes analytical solution for a double-couple source
# according to Aki & Richards, eq. 4.32 and 4.33
#
#
from __future__ import print_function
import sys
from numpy import sqrt,fabs,sin,cos,arccos,arctan2,pi,zeros
from math import erf
##############################################################################
## parameters
# source-receiver position
xs = 0.0
ys = 0.0
zs = 0.0
# receiver
xr = 6000.0
yr = 1000.0
zr = 4000.0
# (optional) file name prefix for receiver station comparison
station_prefix = "DB.Z1.FX" # e.g., "DB.Z5.FX" -> DB.Z5.FXX.semd,DB.Z5.FXY.semd,DB.Z5.FXZ.semd
station_ending = ".semd"
# medium parameter
rho = 2300.0
beta = 1500.0
alpha = 2800.0
# source time function
hdur = 2.0 / 1.628 # hdur/gauss (according to SPECFEM definition)
dt = 0.001 # time step size
M0 = 1.e16 # double-couple scalar moment (N-m)
# trace start/end time
t0 = -4.0 # SPECFEM simulation times
t1 = (14000-1) * dt + t0 # 14000 time steps
##############################################################################
def comp_source_time_function(t,hdur):
# quasi Heaviside, small Gaussian moment-rate tensor with hdur
# (according to SPECFEM definition)
val = 0.5 * (1.0 + erf(t/hdur))
return val
def comp_source_time_function_diff(t,hdur,dt):
# derivative of the source time function
val_diff = (comp_source_time_function(t+dt,hdur)-comp_source_time_function(t-dt,hdur)) / (2.0*dt)
return val_diff
def comp_source_time_function_conv(t,hdur,dt,r,alpha,beta):
# convolution of the source time function between times r/alpha and r/beta
nmin = int((r/alpha)/dt)
nmax = int((r/beta)/dt)
val_conv = 0.0
for i in range(nmin, nmax+1):
time_temp = i * dt
val_conv += comp_source_time_function(t-time_temp,hdur) * time_temp * dt
return val_conv
def analytical_solution():
global xs,ys,zs
global xr,yr,zr
global station_prefix,station_ending
global rho,alpha,beta
global hdur,dt,M0
global t0,t1
# spherical coordinates
r = sqrt((xs-xr)**2 + (ys-yr)**2 + (zs-zr)**2)
theta = arccos((zr-zs)/r)
phi = arctan2((yr-ys),(xr-xs))
# user output
print('Aki & Richards - double-couple solution')
print('position:')
print(' xr/yr/zr = ',xr, yr, zr)
print(' r/theta/phi = ',r, theta, phi)
print('')
print(' back-rotated x/y/z = ',r*sin(theta)*cos(phi),r*sin(theta)*sin(phi),r*cos(theta))
print('')
print('hdur:',hdur)
print('')
# trace start/end time
if t1 > 0.0 and t1 > t0:
tmin = t0
tmax = t1
else:
# default: around arrivals +/- 5 * hdur
tmin = r/alpha - 5.0 * hdur
tmax = r/beta + 5.0 * hdur
ntmin = int(tmin/dt)
ntmax = int(tmax/dt)
print('trace length:')
print(' tmin / ntmin :',tmin, ntmin)
print(' tmax / ntmax :',tmax, ntmax)
print('')
#compute factors in the AKI & RICHARDS solution
cn = 1.0/(4.0 * pi * rho * r**4) * M0
cip = 1.0/(4.0 * pi * rho * alpha**2 * r**2) * M0
cis = 1.0/(4.0 * pi * rho * beta**2 * r**2) * M0
cfp = 1.0/(4.0 * pi * rho * alpha**3 * r) * M0
cfs = 1.0/(4.0 * pi * rho * beta**3 * r) * M0
# Aki & Richards, eqs. (4.33)
# components index: 0 == r, 1 == theta, 2 == phi
an = zeros(3)
an[0] = 9.0 * sin(2.0*theta) * cos(phi)
an[1] = -6.0 * cos(2.0*theta) * cos(phi)
an[2] = 6.0 * cos(theta) * sin(phi)
aip = zeros(3)
aip[0] = 4.0*sin(2.0*theta)*cos(phi)
aip[1] = -2.0*cos(2.0*theta)*cos(phi)
aip[2] = 2.0*cos(theta)*sin(phi)
ais = zeros(3)
ais[0] = -3.0*sin(2.0*theta)*cos(phi)
ais[1] = 3.0*cos(2.0*theta)*cos(phi)
ais[2] = -3.0*cos(theta)*sin(phi)
afp = zeros(3)
afp[0] = sin(2.0*theta)*cos(phi)
afp[1] = 0.0
afp[2] = 0.0
afs = zeros(3)
afs[0] = 0.0
afs[1] = cos(2.0*theta)*cos(phi)
afs[2] = -cos(theta)*sin(phi)
usph = zeros(3)
ucar = zeros(3)
tmp_nf = zeros(3)
tmp_if = zeros(3)
tmp_ff = zeros(3)
# displacement
if len(station_prefix) > 0:
name1 = station_prefix + "X" + station_ending
name2 = station_prefix + "Y" + station_ending
name3 = station_prefix + "Z" + station_ending
else:
# default naming
name1 = "Ux.dat"
name2 = "Uy.dat"
name3 = "Uz.dat"
print('trace names:')
print(' ',name1,name2,name3)
print('')
# file output
f1 = open(name1,'w')
f2 = open(name2,'w')
f3 = open(name3,'w')
# format: #time #u_cartesian #u_spherical
info = "# Aki and Richards - double-couple solution eq.(4.32) and (4.33)\n"
info += "#\n"
info += "# homogeneous elastic medium: rho/vp/vs = {} / {} / {}\n".format(rho,alpha,beta)
info += "# receiver station : x/y/z = {} / {} / {}\n".format(xr,yr,zr)
info += "# : r/theta/phi = {} / {} / {}\n".format(r,theta,phi)
info += "#\n"
comp1 = "# solution component : cartesian X / spherical R\n"
comp2 = "# solution component : cartesian Y / spherical THETA\n"
comp3 = "# solution component : cartesian Z / spherical PHI\n"
format = "#\n"
format += "# format:\n"
format += "#time \t#u_cartesian \t#u_spherical\n"
f1.write(info + comp1 + format)
f2.write(info + comp2 + format)
f3.write(info + comp3 + format)
# source time functions
stf1 = open('stf_step_time_source','w')
stf2 = open('stf_diff_step_time_source','w')
stf3 = open('stf_conv_step_time_source','w')
# format: #time #stf
stf1.write('#time #stf\n')
stf2.write('#time #stf_diff\n')
stf3.write('#time #stf_conv\n')
print('')
print('writing wavefield solution...')
for it in range(ntmin, ntmax+1):
time = it * dt
# source time functions
stf_p = comp_source_time_function(time-r/alpha,hdur)
stf_s = comp_source_time_function(time-r/beta,hdur)
stf_p_diff = comp_source_time_function_diff(time-r/alpha,hdur,dt)
stf_s_diff = comp_source_time_function_diff(time-r/beta,hdur,dt)
stf_conv = comp_source_time_function_conv(time,hdur,dt,r,alpha,beta)
stf1.write("{} \t{} \t{}\n".format(time, stf_p, stf_s))
stf2.write("{} \t{} \t{}\n".format(time, stf_p_diff, stf_s_diff))
stf3.write("{} \t{}\n".format(time, stf_conv))
# wavefield terms
# components (r,theta,phi)
for i in range(3):
# near-field
tmp_nf[i] = cn * an[i] * stf_conv
# intermediate-field
tmp_if[i] = cip * aip[i] * stf_p + cis * ais[i] * stf_s
# far-field
tmp_ff[i] = cfp * afp[i] * stf_p_diff + cfs * afs[i] * stf_s_diff
# spherical solution
usph[i] = tmp_nf[i] + tmp_if[i] + tmp_ff[i]
## displacement vector in spherical coordinates d = v_r * unit_r + v_t * unit_theta + v_p * unit_phi
#v_r = usph(1)
#v_t = usph(2)
#v_p = usph(3)
## rotation to cartesian system
## see: https://en.wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
## displacement vector in cartesian coordinates d = v_x * unit_x + v_y * unit_y + v_z * unit_z
#v_x = v_r * sin(theta)*cos(phi) + v_t * cos(theta)*cos(phi) + v_p * (-sin(phi))
#v_y = v_r * sin(theta)*sin(phi) + v_t * cos(theta)*sin(phi) + v_p * cos(phi)
#v_z = v_r * cos(theta) + v_t * (-sin(theta)) !+ v_p * 0.0
## coordinate system convention:
# SPECFEM:
# the x axis points East
# the y axis points North
# the z axis points up
#
# Aki & Richards:
# the x axis points North
# the y axis points East
# the z axis points down
#
# results for:
# unit_r = (sin(theta) cos(phi), sin(theta) sin(phi), cos(theta) )
# unit_theta = (cos(theta) cos(phi), cos(theta) sin(phi), - sin(theta))
# unit_phi = (-sin(phi) , cos(phi) , 0 )
#
# slip in (x1,x2) plane along x1
#
## Aki & Richards convention:
#v_x = v_r * sin(theta)*cos(phi) + v_t * cos(theta)*cos(phi) + v_p * (-sin(phi))
#v_y = v_r * sin(theta)*sin(phi) + v_t * cos(theta)*sin(phi) + v_p * cos(phi)
#v_z = v_r * cos(theta) + v_t * (-sin(theta)) !+ v_p * 0.0
#
## conversion to SPECFEM?
#
# STATIONS position:
# #name #network #y #x #(ignored) # z(Par_file: USE_SOURCES_RECEIVERS_Z = .true.)
# X1 DB 7000.0 8000.0 0.0 4000.0
#
# CMTSOLUTION:
# r -> z, theta -> -y, phi -> x
#
# Mxx = Mpp
# Myy = Mtt
# Mzz = Mrr
# Myz = -Mrt
# Mxz = Mrp
# Mxy = -Mtp
#
# the setup in Aki & Richards corresponds to component Mrp being non-zero
# CMTSOLUTIONs use dyne-cm, where as here M0 is in N-m -> conversion factor M_(dyne-cm) = 10**7 M_(N-m)
#
## convert vector field in r/theta/phi to cartesian coordinate system x/y/z
vec_r = usph[0]
vec_t = usph[1]
vec_p = usph[2]
# u(r,theta,phi) -> u(x,y,z)
ucar[0] = vec_r * sin(theta)*cos(phi) + vec_t * cos(theta)*cos(phi) - vec_p * sin(phi)
ucar[1] = vec_r * sin(theta)*sin(phi) + vec_t * cos(theta)*sin(phi) + vec_p * cos(phi)
ucar[2] = vec_r * cos(theta) - vec_t * sin(theta)
# format: #time #u_cartesian #u_spherical
f1.write("{} \t{} \t{} \t{} {} {}\n".format(time,ucar[0],usph[0],tmp_nf[0],tmp_if[0],tmp_ff[0])) # #t #x #r
f2.write("{} \t{} \t{} \t{} {} {}\n".format(time,ucar[1],usph[1],tmp_nf[1],tmp_if[1],tmp_ff[1])) # #t #y #theta
f3.write("{} \t{} \t{} \t{} {} {}\n".format(time,ucar[2],usph[2],tmp_nf[2],tmp_if[2],tmp_ff[2])) # #t #z #phi
f1.close()
f2.close()
f3.close()
stf1.close()
stf2.close()
stf3.close()
print('')
print('written to: ')
print(' ',name1)
print(' ',name2)
print(' ',name3)
print('')
print('done')
def usage():
print('usage: ./analytical_solution.py xr yr zr station_prefix')
print(' with')
print(' xr,yr,zr - receiver position in m (e.g., 8000.0 1000.0 4000.0)')
print(' station_prefix - station name prefix (e.g., DB.Z1.FX to compare with SPECFEM)')
print('')
if __name__ == '__main__':
# gets (optional) arguments
if len(sys.argv) > 1:
if len(sys.argv) != 5:
usage()
sys.exit(1)
# receiver position
xr = float(sys.argv[1])
yr = float(sys.argv[2])
zr = float(sys.argv[3])
# station prefix
station_prefix = sys.argv[4]
# solution
analytical_solution()
|
geodynamics/specfem3d
|
EXAMPLES/small_elastic_analytic_solution/analytical_solution_Aki.py
|
Python
|
gpl-3.0
| 10,970
|
[
"Gaussian"
] |
e7c268453971ba678c342d12b71e643b900ec7010669dcb7df27dd92d813f97e
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-status
# Author : Stuart Paterson
########################################################################
"""
Retrieve status of the given DIRAC job
"""
__RCSID__ = "$Id$"
import os
import DIRAC
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.Time import toString, date, day
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:\n',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'\nArguments:\n',
' JobID: DIRAC Job ID' ] ) )
Script.registerSwitch( "f:", "File=", "Get status for jobs with IDs from the file" )
Script.registerSwitch( "g:", "JobGroup=", "Get status for jobs in the given group" )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
jobs = []
for key, value in Script.getUnprocessedSwitches():
if key.lower() in ( 'f', 'file' ):
if os.path.exists( value ):
jFile = open( value )
jobs += jFile.read().split()
jFile.close()
elif key.lower() in ( 'g', 'jobgroup' ):
jobDate = toString( date() - 30 * day )
# Choose jobs no more than 30 days old
result = dirac.selectJobs( jobGroup = value, date = jobDate )
if not result['OK']:
print "Error:", result['Message']
DIRACExit( -1 )
jobs += result['Value']
if len( args ) < 1 and not jobs:
Script.showHelp()
if len( args ) > 0:
jobs += parseArguments( args )
result = dirac.getJobStatus( jobs )
if result['OK']:
for job in result['Value']:
print 'JobID=' + str( job ),
for status in result['Value'][job].items():
print '%s=%s;' % status,
print
else:
exitCode = 2
print "ERROR: %s" % result['Message']
DIRAC.exit( exitCode )
|
andresailer/DIRAC
|
Interfaces/scripts/dirac-wms-job-status.py
|
Python
|
gpl-3.0
| 2,067
|
[
"DIRAC"
] |
ddc90a618da8e615a23bd1a498ed665a5124abb5e163fdb8e308a666ec77cbf1
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spglib(CMakePackage):
"""C library for finding and handling crystal symmetries."""
homepage = "https://atztogo.github.io/spglib/"
url = "https://github.com/atztogo/spglib/archive/v1.10.3.tar.gz"
patch('fix_cmake_install.patch', when='@:1.10.3')
# patch by Krishnendu Ghosh
patch('fix_cpp.patch', when='@:1.10.3')
version('1.16.1', sha256='e90682239e4ef63b492fa4e44f7dbcde2e2fe2e688579d96b01f2730dfdf5b2e')
version('1.16.0', sha256='969311a2942fef77ee79ac9faab089b68e256f21713194969197e7f2bdb14772')
version('1.15.1', sha256='b6dc2c8adcc7d0edee7a076e765c28b2941b2aeba590d213a0b4893c8af0c026')
version('1.15.0', sha256='2e217beff6840a22ab2149598eb2f40616b2eeb9e155edab52761d3301412f98')
version('1.14.1', sha256='9803b0648d9c2d99377f3e1c4cecf712320488403cd674192ec5cbe956bb3c78')
version('1.14.0', sha256='0a5e518c3dc221386d2219cbd260d08b032b0d2a31bccc32e1a8cb7874e7e9e9')
version('1.13.0', sha256='ed72ae7bdd129487c45ff7bebb8e2ac03074657060e068b015e7741c0271e16b')
version('1.12.2', sha256='d92f5e4fa0f54cc0abd0209b81c4d5c647dae9d25b774c2296f44b8558b17976')
version('1.12.1', sha256='1765e68982425de6d30029d50d200f20425b8ed1deff52b8e73a4a1457ac9ab6')
version('1.12.0', sha256='79361ef230b4fd55d5eb7521c23430acc3f11ab527125dc324ffb315783ebdfa')
version('1.11.2.1', sha256='f6795523a04871e012e7f5f5ab97b249fa36657b73cdc9b4ea53ef023cfcaac4')
version('1.11.2', sha256='aae61218dd0cca1fda245d4ad906c2eed5e8d30e28b575d74eab9a6be26bbd5d')
version('1.11.1.2', sha256='d99dab24accd269df65c01febd05cb5dd1094a89d7279f8390871f0432df2b56')
version('1.11.1', sha256='3b5a859f3fe2c9b096fc0754ffbd9341c568bc8003d2eeb74c958c1cacb480f5')
version('1.11.0', sha256='e4befe27473a69b7982597760d6838cc48d9ef7b624a439436a17f5487f78f51')
version('1.10.4', sha256='6a15a324a821ad9d3e615e120d9c5e704e284d8eb1f076aa21741a23fbcf08df')
version('1.10.3', sha256='43776b5fb220b746d53c1aa39d0230f304687ec05984671392bccaf850d9d696')
version('1.10.2', sha256='5907d0d29563689146512ef24aa8960d9475c5de326501f277bb58b3de21b07d')
version('1.10.1', sha256='8ed979cda82f6d440567197ec191bffcb82ee83c5bfe8a484c5a008dd00273f0')
version('1.10.0', sha256='117fff308731784bea2ddaf3d076f0ecbf3981b31ea1c1bfd5ce4f057a5325b1')
@property
def libs(self):
return find_libraries('libsymspg', root=self.prefix,
shared=True, recursive=True)
|
LLNL/spack
|
var/spack/repos/builtin/packages/spglib/package.py
|
Python
|
lgpl-2.1
| 2,706
|
[
"CRYSTAL"
] |
c396c32f9d286bbffa592da817518005db3960374ec59e28c4c8047d0df92afd
|
# Copyright 2003-2008 by Leighton Pritchard. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Contact: Leighton Pritchard, Scottish Crop Research Institute,
# Invergowrie, Dundee, Scotland, DD2 5DA, UK
# [email protected]
################################################################################
""" Colors module
Provides:
o ColorTranslator - class to convert tuples of integers and floats into
colors.Color objects
For drawing capabilities, this module uses reportlab to define colors:
http://www.reportlab.com
"""
import sys
# Add path to Bio
sys.path.append('../../..')
# ReportLab imports
from __future__ import print_function
from Bio._py3k import basestring
from reportlab.lib import colors
class ColorTranslator(object):
""" Class providing methods for translating representations of color into
"""
def __init__(self, filename=None):
""" __init__(self, filename)
o filename Location of a file containing colorscheme
information
Optional parameters set the color scheme
"""
self._artemis_colorscheme = {0: (colors.Color(1, 1, 1,), "pathogenicity, adaptation, chaperones"),
1: (colors.Color(0.39, 0.39, 0.39), "energy metabolism"),
2: (colors.Color(1, 0, 0), "information transfer"),
3: (colors.Color(0, 1, 0), "surface"),
4: (colors.Color(0, 0, 1), "stable RNA"),
5: (colors.Color(0, 1, 1), "degradation of large molecules"),
6: (colors.Color(1, 0, 1), "degradation of small molecules"),
7: (colors.Color(1, 1, 0), "central/intermediary/miscellaneous metabolism"),
8: (colors.Color(0.60, 0.98, 0.60), "unknown"),
9: (colors.Color(0.53, 0.81, 0.98), "regulators"),
10: (colors.Color(1, 0.65, 0), "conserved hypotheticals"),
11: (colors.Color(0.78, 0.59, 0.39), "pseudogenes and partial genes"),
12: (colors.Color(1, 0.78, 0.78), "phage/IS elements"),
13: (colors.Color(0.70, 0.70, 0.70), "some miscellaneous information"),
14: (colors.Color(0, 0, 0), ""),
15: (colors.Color(1, 0.25, 0.25), "secondary metabolism"),
16: (colors.Color(1, 0.5, 0.5), ""),
17: (colors.Color(1, 0.75, 0.75), "")
} # Hardwired Artemis color scheme
self._colorscheme = {}
if filename is not None:
self.read_colorscheme(filename) # Imported color scheme
else:
self._colorscheme = self._artemis_colorscheme
def translate(self, color=None, colour=None):
""" translate(self, color)
o color Color defined as an int, a tuple of three ints 0->255
or a tuple of three floats 0 -> 1, or a string giving
one of the named colors defined by ReportLab, or a
ReportLab color object (returned as is).
(This argument is overridden by a backwards compatible
argument with UK spelling, colour).
Returns a colors.Color object, determined semi-intelligently
depending on the input values
"""
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
if color is None:
raise ValueError("Passed color (or colour) must be a valid color type")
elif isinstance(color, int):
color = self.scheme_color(color)
elif isinstance(color, colors.Color):
return color
elif isinstance(color, basestring):
# Assume its a named reportlab color like "red".
color = colors.toColor(color)
elif isinstance(color, tuple) and isinstance(color[0], float):
color = self.float1_color(color)
elif isinstance(color, tuple) and isinstance(color[0], int):
color = self.int255_color(color)
return color
def read_colorscheme(self, filename):
""" read_colorscheme(self, filename)
o filename The location of a file defining colors in tab-separated
format plaintext as:
INT \t RED \t GREEN \t BLUE \t Comment
Where RED, GREEN and BLUE are intensities in the range
0 -> 255
e.g.
2 \t 255 \t 0 \t 0 \t Red: Information transfer
Reads information from a file containing color information and
stores it internally
"""
with open(filename, 'r').readlines() as lines:
for line in lines:
data = line.strip().split('\t')
try:
label = int(data[0])
red, green, blue = int(data[1]), int(data[2]), int(data[3])
if len(data) > 4:
comment = data[4]
else:
comment = ""
self._colorscheme[label] = (self.int255_color((red, green, blue)),
comment)
except:
raise ValueError("Expected INT \t INT \t INT \t INT \t string input")
def get_artemis_colorscheme(self):
""" get_artemis_colorscheme(self)
Return the Artemis color scheme as a dictionary
"""
return self._artemis_colorscheme
def artemis_color(self, value):
""" artemis_color(self, value)
o value An int representing a functional class in the Artemis
color scheme (see www.sanger.ac.uk for a description),
or a string from a GenBank feature annotation for the
color which may be dot delimited (in which case the
first value is used).
Takes an int representing a functional class in the Artemis color
scheme, and returns the appropriate colors.Color object
"""
try:
value = int(value)
except ValueError:
if value.count('.'): # dot-delimited
value = int(artemis_color.split('.', 1)[0]) # Use only first integer
else:
raise
if value in self._artemis_colorscheme:
return self._artemis_colorscheme[value][0]
else:
raise ValueError("Artemis color out of range: %d" % value)
def get_colorscheme(self):
""" get_colorscheme(self)
Return the user-defined color scheme as a dictionary
"""
return self._colorscheme
def scheme_color(self, value):
""" scheme_color(self, value)
o value An int representing a single color in the user-defined
color scheme
Takes an int representing a user-defined color and returns the
appropriate colors.Color object
"""
if value in self._colorscheme:
return self._colorscheme[value][0]
else:
raise ValueError("Scheme color out of range: %d" % value)
def int255_color(self, values):
""" int255_color(self, values)
o values A tuple of (red, green, blue) intensities as
integers in the range 0->255
Takes a tuple of (red, green, blue) intensity values in the range
0 -> 255 and returns an appropriate colors.Color object
"""
red, green, blue = values
factor = 1/255.
red, green, blue = red * factor, green * factor, blue * factor
return colors.Color(red, green, blue)
def float1_color(self, values):
""" float1_color(self, values)
o values A tuple of (red, green, blue) intensities as floats
in the range 0 -> 1
Takes a tuple of (red, green, blue) intensity values in the range
0 -> 1 and returns an appropriate colors.Color object
"""
red, green, blue = values
return colors.Color(red, green, blue)
################################################################################
# RUN AS SCRIPT
################################################################################
if __name__ == '__main__':
# Test code
gdct = ColorTranslator()
print(gdct.float1_color((0.5, 0.5, 0.5)))
print(gdct.int255_color((1, 75, 240)))
print(gdct.artemis_color(7))
print(gdct.scheme_color(2))
print(gdct.translate((0.5, 0.5, 0.5)))
print(gdct.translate((1, 75, 240)))
print(gdct.translate(7))
print(gdct.translate(2))
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Graphics/GenomeDiagram/_Colors.py
|
Python
|
gpl-2.0
| 9,061
|
[
"Biopython"
] |
46f802e4be66746286db853c4bbe827808b15dda48d1b08148ad3606520a36b1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
import yaml
import unittest
import sys
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinit.tasks import ParalConf
from pymatgen.io.abinit.qadapters import *
from pymatgen.io.abinit.qadapters import QueueAdapter, SlurmAdapter
from pymatgen.io.abinit import qutils as qu
class ParseTimestr(PymatgenTest):
def test_slurm_parse_timestr(self):
days, hours, minutes, secs = 24*60*60, 60*60, 60, 1
aequal = self.assertEqual
slurm_parse_timestr = qu.slurm_parse_timestr
# "days-hours",
aequal(slurm_parse_timestr("2-1"), 2*days + hours)
# "days-hours:minutes",
aequal(slurm_parse_timestr("2-1:1"), 2*days + hours + minutes)
# "days-hours:minutes:seconds".
aequal(slurm_parse_timestr("3-4:2:20"), 3*days + 4*hours + 2*minutes + 20*secs)
# "minutes",
aequal(slurm_parse_timestr("10"), 10*minutes)
# "minutes:seconds",
aequal(slurm_parse_timestr("3:20"), 3*minutes + 20*secs)
# "hours:minutes:seconds",
aequal(slurm_parse_timestr("3:2:5"), 3*hours + 2*minutes + 5*secs)
@unittest.skipIf(sys.platform.startswith("win"), "Skipping for Windows")
class QadapterTest(PymatgenTest):
QDICT = yaml.load("""\
priority: 1
queue:
qtype: slurm
qname: Oban
limits:
timelimit: 2:00
min_cores: 1
max_cores: 24
#condition: {"$eq": {omp_threads: 2}}
job:
modules:
- intel/compilerpro/13.0.1.117
- fftw3/intel/3.3
shell_env:
PATH: /home/user/tmp_intel13/src/98_main/:/home/user//NAPS/intel13/bin:$PATH
mpi_runner: mpirun
hardware:
num_nodes: 3
sockets_per_node: 2
cores_per_socket: 4
mem_per_node: 8 Gb
""")
def test_base(self):
"""unit tests for Qadapter subclasses. A more complete coverage would require integration testing."""
self.maxDiff = None
aequal, atrue, afalse = self.assertEqual, self.assertTrue, self.assertFalse
sub_classes = QueueAdapter.__subclasses__()
# Test if we can instantiate the concrete classes with the abc protocol.
for subc in sub_classes:
print("subclass: ", subc)
# Create the adapter subclass.
self.QDICT["queue"]["qtype"] = subc.QTYPE
qad = make_qadapter(**self.QDICT)
print(qad)
hw = qad.hw
giga = 1024
# Test the programmatic interface used to change job parameters.
aequal(qad.num_launches, 0)
afalse(qad.has_omp)
atrue(qad.has_mpi)
qad.set_mpi_procs(2)
aequal(qad.mpi_procs, 2)
atrue(qad.pure_mpi)
afalse(qad.pure_omp)
afalse(qad.hybrid_mpi_omp)
aequal(qad.mem_per_proc, giga)
qad.set_mem_per_proc(2 * giga)
aequal(qad.mem_per_proc, 2 * giga)
aequal(qad.timelimit, 120)
# Enable OMP
qad.set_omp_threads(2)
aequal(qad.omp_threads, 2)
atrue(qad.has_omp)
afalse(qad.pure_mpi)
afalse(qad.pure_omp)
atrue(qad.hybrid_mpi_omp)
atrue(qad.hw.can_use_omp_threads(hw.sockets_per_node * hw.cores_per_socket))
afalse(qad.hw.can_use_omp_threads(hw.sockets_per_node * hw.cores_per_socket + 1))
# Test the creation of the script
script = qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
# Test whether qad can be serialized with Pickle.
deserialized_qads = self.serialize_with_pickle(qad, test_eq=False)
for new_qad in deserialized_qads:
new_script = new_qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
aequal(new_script, script)
# Test can_run and distribute
# The hardware has num_nodes=3, sockets_per_node=2, cores_per_socket=4, mem_per_node="8 Gb"
afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=hw.num_cores+1, omp_ncpus=1, mem_per_cpu=0.1)))
afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=9, mem_per_cpu=0.1)))
afalse(qad.can_run_pconf(ParalConf(mpi_ncpus=4, omp_ncpus=1, mem_per_cpu=10 * giga)))
d = qad.distribute(mpi_procs=4, omp_threads=1, mem_per_proc=giga)
assert d.num_nodes == 1 and d.mpi_per_node == 4 and d.exact
d = qad.distribute(mpi_procs=16, omp_threads=1, mem_per_proc=giga)
assert d.num_nodes == 2 and d.mpi_per_node == 8 and d.exact
# not enough memory per node but can distribute.
d = qad.distribute(mpi_procs=8, omp_threads=1, mem_per_proc=2 * giga)
assert d.num_nodes == 2 and d.mpi_per_node == 4 and not d.exact
# mem_per_proc > mem_per_node!
with self.assertRaises(qad.Error):
d = qad.distribute(mpi_procs=9, omp_threads=1, mem_per_proc=10 * giga)
# TODO
# not commensurate with node
#d = qad.distribute(mpi_procs=9, omp_threads=1, mem_per_proc=giga)
#assert d.num_nodes == 3 and d.mpi_per_node == 3 and not d.exact
with self.assertRaises(qad.Error):
qad.set_mpi_procs(25)
qad.validate()
with self.assertRaises(qad.Error):
qad.set_mpi_procs(100)
qad.validate()
with self.assertRaises(qad.Error):
qad.set_omp_threads(10)
qad.validate()
with self.assertRaises(qad.Error):
qad.set_mem_per_proc(9 * giga)
qad.validate()
# Test if one can register a customized class.
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
SlurmAdapter.register(MyAdapter)
assert issubclass(MyAdapter, QueueAdapter)
self.QDICT["queue"]["qtype"] = "myslurm"
qad = make_qadapter(**self.QDICT)
assert isinstance(qad, MyAdapter)
@unittest.skipIf(sys.platform.startswith("win"), "Skipping for Windows")
class ShellAdapterTest(PymatgenTest):
"""Test suite for Shell adapter."""
QDICT = yaml.load("""\
priority: 1
queue:
qname: localhost
qtype: shell
job:
mpi_runner: /home/local/bin/mpirun
pre_run:
- "source ~/env1.sh"
limits:
timelimit: 10:00
min_cores: 1
max_cores: 1
hardware:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 1
mem_per_node: 4 Gb
""")
def test_methods(self):
qad = make_qadapter(**self.QDICT)
print(qad)
print(qad.mpi_runner)
assert qad.QTYPE == "shell" and qad.has_mpi and not qad.has_omp
assert (qad.mpi_procs, qad.omp_threads) == (1, 1)
assert qad.priority == 1 and qad.num_launches == 0 and qad.last_launch is None
qad.set_omp_threads(1)
assert qad.has_omp
s = qad.get_script_str("job_name", "/launch_dir", "executable", "qout_path", "qerr_path",
stdin="stdin", stdout="stdout", stderr="stderr")
self.assertMultiLineEqual(s, """\
#!/bin/bash
cd /launch_dir
# OpenMp Environment
export OMP_NUM_THREADS=1
# Commands before execution
source ~/env1.sh
/home/local/bin/mpirun -n 1 executable < stdin > stdout 2> stderr
""")
@unittest.skipIf(sys.platform.startswith("win"), "Skipping for Windows")
class SlurmAdapterTest(PymatgenTest):
"""Test suite for Slurm adapter."""
QDICT = yaml.load("""\
priority: 5
queue:
qtype: slurm
qname: Oban
qparams:
account: user_account
mail_user: [email protected]
limits:
timelimit: 10:00
min_cores: 3
max_cores: 16
job:
mpi_runner: mpirun
# pre_run is a string in verbatim mode (note |)
setup:
- echo ${SLURM_JOB_NODELIST}
- ulimit -s unlimited
modules:
- intel/compilerpro/13.0.1.117
- fftw3/intel/3.3
shell_env:
PATH: /home/user/bin:$PATH
hardware:
# Mandatory
num_nodes: 2
sockets_per_node: 2
cores_per_socket: 4
mem_per_node: 8 Gb
""")
def test_methods(self):
self.maxDiff = None
qad = make_qadapter(**self.QDICT)
print(qad)
print(qad.mpi_runner)
assert qad.QTYPE == "slurm"
assert qad.has_mpi and not qad.has_omp
assert (qad.mpi_procs, qad.omp_threads) == (3, 1)
assert qad.priority == 5 and qad.num_launches == 0 and qad.last_launch is None
qad.set_mpi_procs(4)
s = qad.get_script_str("job_name", "/launch_dir", "executable", "qout_path", "qerr_path",
stdin="stdin", stdout="stdout", stderr="stderr")
print(s)
self.assertMultiLineEqual(s, """\
#!/bin/bash
#SBATCH --partition=Oban
#SBATCH --job-name=job_name
#SBATCH --ntasks=4
#SBATCH --mem-per-cpu=1024
#SBATCH --time=0-0:10:0
#SBATCH --account=user_account
#SBATCH [email protected]
#SBATCH --output=qout_path
#SBATCH --error=qerr_path
cd /launch_dir
# Setup section
echo ${SLURM_JOB_NODELIST}
ulimit -s unlimited
# Load Modules
module purge
module load intel/compilerpro/13.0.1.117 2>> mods.err
module load fftw3/intel/3.3 2>> mods.err
# OpenMp Environment
export OMP_NUM_THREADS=1
# Shell Environment
export PATH=/home/user/bin:$PATH
mpirun -n 4 executable < stdin > stdout 2> stderr
""")
#assert 0
#qad.set_omp_threads(1)
#assert qad.has_omp
@unittest.skipIf(sys.platform.startswith("win"), "Skipping for Windows")
class PbsProadapterTest(PymatgenTest):
"""Test suite for PbsPro adapter."""
QDICT = yaml.load("""\
priority: 1
queue:
qtype: pbspro
qname: fat
qparams:
group_list: naps
limits:
timelimit: 0:0:10
min_cores: 3
max_cores: 200
job:
mpi_runner: mpirun
hardware:
num_nodes: 100
sockets_per_node: 2
cores_per_socket: 4
mem_per_node: 8 Gb""")
QDICT_SHARED = yaml.load("""\
priority: 1
queue:
qtype: pbspro
qname: fat_shared
qnodes: shared
qparams:
group_list: naps
limits:
timelimit: 0:0:10
min_cores: 3
max_cores: 200
min_mem_per_proc: 2000
master_mem_overhead: 1000
job:
mpi_runner: mpirun
hardware:
num_nodes: 100
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 48000 Mb""")
QDICT_EXCLUSIVE = yaml.load("""\
priority: 1
queue:
qtype: pbspro
qname: fat_exclusive
qnodes: exclusive
qparams:
group_list: naps
limits:
timelimit: 0:0:10
min_cores: 3
max_cores: 200
min_mem_per_proc: 2000
master_mem_overhead: 1000
job:
mpi_runner: mpirun
hardware:
num_nodes: 100
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 48000 Mb""")
def test_methods(self):
self.maxDiff = None
aequal = self.assertEqual
qad = make_qadapter(**self.QDICT)
self.assertMSONable(qad)
print(qad)
print(qad.mpi_runner)
assert qad.QTYPE == "pbspro" and qad.has_mpi and not qad.has_omp
assert (qad.mpi_procs, qad.omp_threads) == (3, 1)
assert qad.priority == 1 and qad.num_launches == 0 and qad.last_launch is None
#qad.set_mpi_procs(4)
s = qad.get_script_str("job_name", "/launch_dir", "executable", "qout_path", "qerr_path",
stdin="stdin", stdout="stdout", stderr="stderr")
print(s)
self.assertMultiLineEqual(s, """\
#!/bin/bash
#PBS -q fat
#PBS -N job_name
#PBS -l select=3:ncpus=1:vmem=1024mb:mpiprocs=1
#PBS -l pvmem=1024mb
#PBS -l walltime=0:0:10
#PBS -W group_list=naps
#PBS -o qout_path
#PBS -e qerr_path
cd /launch_dir
# OpenMp Environment
export OMP_NUM_THREADS=1
mpirun -n 3 executable < stdin > stdout 2> stderr
""")
mem = 1024
qad.set_mem_per_proc(mem)
print(qad)
qad.set_mpi_procs(4)
s, params = qad.get_select(ret_dict=True)
# IN_CORE PURE MPI: MPI: 4, OMP: 1
aequal(params,
{'ncpus': 1, 'chunks': 4, 'mpiprocs': 1, "vmem": mem})
qad.set_omp_threads(2)
s, params = qad.get_select(ret_dict=True)
# HYBRID MPI-OPENMP run, perfectly divisible among nodes: MPI: 4, OMP: 2
aequal(params,
{'vmem': mem, 'ncpus': 2, 'chunks': 4, 'ompthreads': 2, 'mpiprocs': 1})
qad.set_mpi_procs(12)
s, params = qad.get_select(ret_dict=True)
# HYBRID MPI-OPENMP run, perfectly divisible among nodes: MPI: 12, OMP: 2
aequal(params,
{'vmem': mem, 'ncpus': 2, 'chunks': 12, 'ompthreads': 2, 'mpiprocs': 1})
qad.set_omp_threads(5)
qad.set_mpi_procs(3)
s, params = qad.get_select(ret_dict=True)
# HYBRID MPI-OPENMP, NOT commensurate with nodes: MPI: 3, OMP: 5
aequal(params,
{'vmem': mem, 'ncpus': 5, 'chunks': 3, 'ompthreads': 5, 'mpiprocs': 1})
# Testing the handling of master memory overhead
# Shared mode (the nodes might be shared amongst different jobs from different users)
qad_shared = make_qadapter(**self.QDICT_SHARED)
aequal(qad_shared.hw.mem_per_node, 48000)
qad_shared.set_mpi_procs(15)
qad_shared.set_mem_per_proc(6000)
aequal(qad_shared.get_select(), '1:ncpus=1:vmem=7000mb:mpiprocs=1+'
'14:ncpus=1:vmem=6000mb:mpiprocs=1')
qad_shared.set_mpi_procs(64)
qad_shared.set_mem_per_proc(3500)
qad_shared.set_master_mem_overhead(4000)
self.assertMSONable(qad_shared)
aequal(qad_shared.get_select(), '1:ncpus=1:vmem=7500mb:mpiprocs=1+'
'63:ncpus=1:vmem=3500mb:mpiprocs=1')
# Exclusive mode (the nodes are attributed exclusively to a given user)
qad_exclusive = make_qadapter(**self.QDICT_EXCLUSIVE)
aequal(qad_exclusive.hw.mem_per_node, 48000)
qad_exclusive.set_mpi_procs(47)
qad_exclusive.set_mem_per_proc(2000)
qad_exclusive.set_master_mem_overhead(1)
self.assertMSONable(qad_exclusive)
aequal(qad_exclusive.get_select(), '1:ncpus=23:vmem=48000mb:mpiprocs=23+'
'1:ncpus=24:vmem=48000mb:mpiprocs=24')
qad_exclusive.set_mpi_procs(48)
aequal(qad_exclusive.get_select(), '1:ncpus=1:vmem=48000mb:mpiprocs=1+'
'1:ncpus=24:vmem=48000mb:mpiprocs=24+'
'1:ncpus=23:vmem=48000mb:mpiprocs=23')
qad_exclusive.set_mpi_procs(50)
aequal(qad_exclusive.get_select(), '1:ncpus=2:vmem=48000mb:mpiprocs=2+'
'2:ncpus=24:vmem=48000mb:mpiprocs=24')
if __name__ == '__main__':
import unittest
unittest.main()
|
xhqu1981/pymatgen
|
pymatgen/io/abinit/tests/test_qadapters.py
|
Python
|
mit
| 15,154
|
[
"ABINIT",
"pymatgen"
] |
6d0c17a2391ff3e1175ab5e025d8b6f6cc12d62baca4da8b7bb87d79d78b3283
|
'''This example shows how to use importance sampling and how to
adapt the proposal density using the pmc algorithm.
'''
import numpy as np
import pypmc
# define the target; i.e., the function you want to sample from.
# In this case, it is a bimodal Gaussian
#
# Note that the target function "log_target" returns the log of the
# target function.
component_weights = np.array([0.3, 0.7])
mean0 = np.array ([ 5.0 , 0.01 ])
covariance0 = np.array([[ 0.01 , 0.003 ],
[ 0.003, 0.0025]])
inv_covariance0 = np.linalg.inv(covariance0)
mean1 = np.array ([-4.0 , 1.0 ])
covariance1 = np.array([[ 0.1 , 0. ],
[ 0. , 0.02 ]])
inv_covariance1 = np.linalg.inv(covariance1)
component_means = [mean0, mean1]
component_covariances = [covariance0, covariance1]
target_mixture = pypmc.density.mixture.create_gaussian_mixture(component_means, component_covariances, component_weights)
log_target = target_mixture.evaluate
# define the initial proposal density
# In this case a three-modal gaussian used
# the initial covariances are set to the unit-matrix
# the initial component weights are set equal
initial_prop_means = []
initial_prop_means.append( np.array([ 4.0, 0.0]) )
initial_prop_means.append( np.array([-5.0, 0.0]) )
initial_prop_means.append( np.array([ 0.0, 0.0]) )
initial_prop_covariance = np.eye(2)
initial_prop_components = []
for i in range(3):
initial_prop_components.append(pypmc.density.gauss.Gauss(initial_prop_means[i], initial_prop_covariance))
initial_proposal = pypmc.density.mixture.MixtureDensity(initial_prop_components)
# define an ImportanceSampler object
sampler = pypmc.sampler.importance_sampling.ImportanceSampler(log_target, initial_proposal)
# draw 10,000 samples adapting the proposal every 1,000 samples
# hereby save the generating proposal component for each sample which is
# returned by sampler.run
# Note: With too few samples components may die out, and one mode might be lost.
generating_components = []
for i in range(10):
print("\rstep", i, "...\n\t", end='')
# draw 1,000 samples and save the generating component
generating_components.append(sampler.run(10**3, trace_sort=True))
# get a reference to the weights and samples that have just been generated
samples = sampler.samples[:]
weights = sampler.weights[:][:,0]
# update the proposal using the pmc algorithm in the non Rao-Blackwellized form
pypmc.mix_adapt.pmc.gaussian_pmc(samples, sampler.proposal, weights, generating_components[-1],
mincount=20, rb=True, copy=False)
print("\rsampling finished")
print( '-----------------')
print('\n')
# print information about the adapted proposal
print('initial component weights:', initial_proposal.weights)
print('final component weights:', sampler.proposal.weights)
print('target component weights:', component_weights)
print()
for k, m in enumerate([mean0, mean1, None]):
print('initial mean of component %i:' %k, initial_proposal.components[k].mu)
print('final mean of component %i:' %k, sampler.proposal.components[k].mu)
print('target mean of component %i:' %k, m)
print()
print()
for k, c in enumerate([covariance0, covariance1, None]):
print('initial covariance of component %i:\n' %k, initial_proposal.components[k].sigma, sep='')
print()
print('final covariance of component %i:\n' %k, sampler.proposal.components[k].sigma, sep='')
print()
print('target covariance of component %i:\n' %k, c, sep='')
print('\n')
# plot results
try:
import matplotlib.pyplot as plt
except ImportError:
print('For plotting "matplotlib" needs to be installed')
exit(1)
def set_axlimits():
plt.xlim(-6.0, +6.000)
plt.ylim(-0.2, +1.401)
plt.subplot(221)
plt.title('target mixture')
pypmc.tools.plot_mixture(target_mixture, cmap='jet')
set_axlimits()
plt.subplot(222)
plt.title('pmc fit')
pypmc.tools.plot_mixture(sampler.proposal, cmap='nipy_spectral', cutoff=0.01)
set_axlimits()
plt.subplot(223)
plt.title('target mixture and pmc fit')
pypmc.tools.plot_mixture(target_mixture, cmap='jet')
pypmc.tools.plot_mixture(sampler.proposal, cmap='nipy_spectral', cutoff=0.01)
set_axlimits()
plt.subplot(224)
plt.title('weighted samples')
plt.hist2d(sampler.samples[-1][:,0], sampler.samples[-1][:,1], weights=sampler.weights[-1][:,0], cmap='gray_r', bins=200)
set_axlimits()
plt.tight_layout()
plt.show()
|
fredRos/pypmc
|
examples/pmc.py
|
Python
|
gpl-2.0
| 4,459
|
[
"Gaussian"
] |
147bea1a87fcf67b4b65c13e30c95516ba59d17d94471c009d8833c058517e13
|
#==============================================================================
#
# Program: ParaView
# Module: numeric.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
r"""
This module provides functions to vtk data arrays to NumPy arrays.
"""
__num_py_available__ = False
try:
import numpy
__num_py_available__ = True
except:
raise """NumPy module "numpy" is not accessible. Please make sure
that NumPy is installed correctly."""
# These types are returned by GetDataType to indicate data type.
VTK_VOID = 0
VTK_BIT = 1
VTK_CHAR = 2
VTK_UNSIGNED_CHAR = 3
VTK_SHORT = 4
VTK_UNSIGNED_SHORT = 5
VTK_INT = 6
VTK_UNSIGNED_INT = 7
VTK_LONG = 8
VTK_UNSIGNED_LONG = 9
VTK_FLOAT =10
VTK_DOUBLE =11
VTK_ID_TYPE =12
__typeDict = { VTK_CHAR:numpy.int8,
VTK_UNSIGNED_CHAR:numpy.uint8,
VTK_SHORT:numpy.int16,
VTK_UNSIGNED_SHORT:numpy.int16,
VTK_INT:numpy.int32,
VTK_FLOAT:numpy.float32,
VTK_DOUBLE:numpy.float64 }
def fromvtkarray(vtkarray):
"""This function takes a vtkDataArray of any type and converts it to a
NumPy array of appropriate type and dimensions."""
global __typeDict__
global __num_py_available__
if not __num_py_available__:
raise "NumPy module is not available."
#create a numpy array of the correct type.
vtktype = vtkarray.GetDataType()
if not __typeDict.has_key(vtktype):
raise "Cannot convert data arrays of the type %s" \
% vtkarray.GetDataTypeAsString()
# size = num_comps * num_tuples
# imArray = numpy.empty((size,), type)
# vtkarray.ExportToVoidPointer(imArray)
type = __typeDict[vtktype]
pyarray = numpy.frombuffer(vtkarray, dtype=type)
# re-shape the array to current number of rows and columns.
num_tuples = vtkarray.GetNumberOfTuples()
num_comps = vtkarray.GetNumberOfComponents()
pyarray = numpy.reshape(pyarray, (num_tuples, num_comps))
return pyarray
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Wrapping/Python/paraview/numeric.py
|
Python
|
gpl-3.0
| 2,496
|
[
"ParaView",
"VTK"
] |
3665cd08ce9b00db63f05300ddc21e83b8c890282d77b1f22cc8789e473cc1f0
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for probability distributions."""
import abc
import collections
import contextlib
import functools
import inspect
import logging
import types
import decorator
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import auto_composite_tensor
from tensorflow_probability.python.internal import batch_shape_lib
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import name_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import slicing
from tensorflow_probability.python.internal import tensorshape_util
# Symbol import needed to avoid BUILD-dependency cycle
from tensorflow_probability.python.math.generic import log1mexp
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
__all__ = [
'Distribution',
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = {
'batch_shape': '_batch_shape',
'batch_shape_tensor': '_batch_shape_tensor',
'cdf': '_cdf',
'covariance': '_covariance',
'cross_entropy': '_cross_entropy',
'entropy': '_entropy',
'event_shape': '_event_shape',
'event_shape_tensor': '_event_shape_tensor',
'experimental_default_event_space_bijector': (
'_default_event_space_bijector'),
'experimental_sample_and_log_prob': '_sample_and_log_prob',
'kl_divergence': '_kl_divergence',
'log_cdf': '_log_cdf',
'log_prob': '_log_prob',
'log_survival_function': '_log_survival_function',
'mean': '_mean',
'mode': '_mode',
'prob': '_prob',
'sample': '_sample_n',
'stddev': '_stddev',
'survival_function': '_survival_function',
'variance': '_variance',
}
_ALWAYS_COPY_PUBLIC_METHOD_WRAPPERS = ['kl_divergence', 'cross_entropy']
UNSET_VALUE = object()
JAX_MODE = False # Overwritten by rewrite script.
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(tf.Module):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError('fn is not callable: {}'.format(fn))
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the 'Args:' section."""
old_str = old_str or ''
old_str_lines = old_str.split('\n')
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = '\n'.join(' %s' % line for line in append_str.split('\n'))
# Step 1: Find mention of 'Args':
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == 'args:']
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ('\n'.join(old_str_lines[:final_args_ix])
+ '\n\n' + append_str + '\n\n'
+ '\n'.join(old_str_lines[final_args_ix:]))
else:
return old_str + '\n\n' + append_str
def _remove_dict_keys_with_value(dict_, val):
"""Removes `dict` keys which have have `self` as value."""
return {k: v for k, v in dict_.items() if v is not val}
def _set_sample_static_shape_for_tensor(x,
event_shape,
batch_shape,
sample_shape):
"""Helper to `_set_sample_static_shape`; sets shape info for a `Tensor`."""
sample_shape = tf.TensorShape(tf.get_static_value(sample_shape))
ndims = tensorshape_util.rank(x.shape)
sample_ndims = tensorshape_util.rank(sample_shape)
batch_ndims = tensorshape_util.rank(batch_shape)
event_ndims = tensorshape_util.rank(event_shape)
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
tensorshape_util.set_shape(x, [None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = tensorshape_util.concatenate(sample_shape,
[None] * (ndims - sample_ndims))
tensorshape_util.set_shape(x, shape)
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tf.TensorShape(
[None]*(ndims - event_ndims)).concatenate(event_shape)
tensorshape_util.set_shape(x, shape)
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tf.TensorShape([None]*sample_ndims).concatenate(
batch_shape).concatenate([None]*event_ndims)
tensorshape_util.set_shape(x, shape)
return x
class _DistributionMeta(abc.ABCMeta):
"""Helper metaclass for tfp.Distribution."""
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError('Expected non-empty baseclass. Does Distribution '
'not subclass _BaseDistribution?')
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0] if which_base else None
if base is None or base == _BaseDistribution:
# Nothing to be done for Distribution or unrelated subclass.
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError('First parent class declared for {} must be '
'Distribution, but saw "{}"'.format(
classname, base.__name__))
for attr, special_attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS.items():
if attr in attrs:
# The method is being overridden, do not update its docstring.
continue
class_attr_value = attrs.get(attr, None)
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
'Internal error: expected base class "{}" to '
'implement method "{}"'.format(base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
class_special_attr_docstring = (
None if class_special_attr_value is None else
tf_inspect.getdoc(class_special_attr_value))
if (class_special_attr_docstring or
attr in _ALWAYS_COPY_PUBLIC_METHOD_WRAPPERS):
class_attr_value = _copy_fn(base_attr_value)
attrs[attr] = class_attr_value
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
'Expected base class fn to contain a docstring: {}.{}'.format(
base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
'Additional documentation from `{}`:\n\n{}'.format(
classname, class_special_attr_docstring))
# Now we'll intercept the default __init__ if it exists.
default_init = attrs.get('__init__', None)
if default_init is None:
# The class has no __init__ because its abstract. (And we won't add one.)
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
# Warn when a subclass inherits `_parameter_properties` from its parent
# (this is unsafe, since the subclass will in general have different
# parameters). Exceptions are:
# - Subclasses that don't define their own `__init__` (handled above by
# the short-circuit when `default_init is None`).
# - Subclasses that define a passthrough `__init__(self, *args, **kwargs)`.
# pylint: disable=protected-access
init_argspec = tf_inspect.getfullargspec(default_init)
if ('_parameter_properties' not in attrs
# Passthrough exception: may only take `self` and at least one of
# `*args` and `**kwargs`.
and (len(init_argspec.args) > 1
or not (init_argspec.varargs or init_argspec.varkw))):
@functools.wraps(base._parameter_properties)
def wrapped_properties(*args, **kwargs): # pylint: disable=missing-docstring
"""Wrapper to warn if `parameter_properties` is inherited."""
properties = base._parameter_properties(*args, **kwargs)
# Warn *after* calling the base method, so that we don't bother warning
# if it just raised NotImplementedError anyway.
logging.warning("""
Distribution subclass %s inherits `_parameter_properties from its parent (%s)
while also redefining `__init__`. The inherited annotations cover the following
parameters: %s. It is likely that these do not match the subclass parameters.
This may lead to errors when computing batch shapes, slicing into batch
dimensions, calling `.copy()`, flattening the distribution as a CompositeTensor
(e.g., when it is passed or returned from a `tf.function`), and possibly other
cases. The recommended pattern for distribution subclasses is to define a new
`_parameter_properties` method with the subclass parameters, and to store the
corresponding parameter values as `self._parameters` in `__init__`, after
calling the superclass constructor:
```
class MySubclass(tfd.SomeDistribution):
def __init__(self, param_a, param_b):
parameters = dict(locals())
# ... do subclass initialization ...
super(MySubclass, self).__init__(**base_class_params)
# Ensure that the subclass (not base class) parameters are stored.
self._parameters = parameters
def _parameter_properties(self, dtype, num_classes=None):
return dict(
# Annotations may optionally specify properties, such as `event_ndims`,
# `default_constraining_bijector_fn`, `specifies_shape`, etc.; see
# the `ParameterProperties` documentation for details.
param_a=tfp.util.ParameterProperties(),
param_b=tfp.util.ParameterProperties())
```
""", classname, base.__name__, str(properties.keys()))
return properties
attrs['_parameter_properties'] = wrapped_properties
# For a comparison of different methods for wrapping functions, see:
# https://hynek.me/articles/decorators/
@decorator.decorator
def wrapped_init(wrapped, self_, *args, **kwargs):
"""A 'top-level `__init__`' which is always called."""
# We can't use `wrapped` because it results in a self reference which
# confounds `tf.function`.
del wrapped
# Note: if we ever want to have things set in `self` before `__init__` is
# called, here is the place to do it.
self_._parameters = None
default_init(self_, *args, **kwargs)
# Note: if we ever want to override things set in `self` by subclass
# `__init__`, here is the place to do it.
if self_._parameters is None:
# We prefer subclasses will set `parameters = dict(locals())` because
# this has nearly zero overhead. However, failing to do this, we will
# resolve the input arguments dynamically and only when needed.
dummy_self = tuple()
self_._parameters = self_._no_dependency(lambda: ( # pylint: disable=g-long-lambda
_remove_dict_keys_with_value(
inspect.getcallargs(default_init, dummy_self, *args, **kwargs),
dummy_self)))
elif hasattr(self_._parameters, 'pop'):
self_._parameters = self_._no_dependency(
_remove_dict_keys_with_value(self_._parameters, self_))
# pylint: enable=protected-access
attrs['__init__'] = wrapped_init(default_init) # pylint: disable=no-value-for-parameter,assignment-from-no-return
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
def __init__(cls, name, bases, dct):
super(_DistributionMeta, cls).__init__(name, bases, dct)
if not JAX_MODE:
return
def flatten(dist):
param_names = set(dist._composite_tensor_nonshape_params) # pylint: disable=protected-access
components = {param_name: getattr(
dist, param_name, value) for param_name, value
in dist.parameters.items() if param_name in param_names}
metadata = {param_name: value for param_name, value
in dist.parameters.items() if param_name not in param_names}
if components:
keys, values = zip(*sorted(components.items()))
else:
keys, values = (), ()
# Mimics the logic in `tfp.experimental.composite_tensor` where we
# aggressively try to convert arguments into Tensors.
def _maybe_convert_to_tensor(value, name):
try:
value = tf.convert_to_tensor(value, name=name)
except (ValueError, TypeError, AssertionError):
pass
return value
values = tuple([_maybe_convert_to_tensor(value, name) for value, name,
in zip(values, keys)])
return values, (keys, metadata)
def unflatten(info, xs):
keys, metadata = info
parameters = dict(list(zip(keys, xs)), **metadata)
return cls(**parameters)
from jax import tree_util # pylint: disable=g-import-not-at-top
tree_util.register_pytree_node(cls, flatten, unflatten)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name='...'`. For example, to enable `log_prob(value,
name='log_prob')` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring('Some other details.')
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
TFP methods generally assume that Distribution subclasses implement at least
the following methods:
- `_sample_n`.
- `_log_prob` or `_prob`.
- `_event_shape` and `_event_shape_tensor`.
- `_parameter_properties` OR `_batch_shape` and `_batch_shape_tensor`.
Batch shape methods can be automatically derived from `parameter_properties`
in most cases, so it's usually not necessary to implement them directly.
Exceptions include Distributions that accept non-Tensor parameters (for
example, a distribution parameterized by a callable), or that have nonstandard
batch semantics (for example, `BatchReshape`).
Some functionality may depend on implementing additional methods. It is common
for Distribution subclasses to implement:
- Relevant statistics, such as `_mean`, `_mode`, `_variance` and/or `_stddev`.
- At least one of `_log_cdf`, `_cdf`, `_survival_function`, or
`_log_survival_function`.
- `_quantile`.
- `_entropy`.
- `_default_event_space_bijector`.
- `_parameter_properties` (to support automatic batch shape derivation,
batch slicing and other features).
- `_sample_and_log_prob`,
- `_maximum_likelihood_parameters`.
Note that subclasses of existing Distributions that redefine `__init__` do
*not* automatically inherit
`_parameter_properties` annotations from their parent: the subclass must
explicitly implement its own `_parameter_properties` method to support the
features, such as batch slicing, that this enables.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample(n)`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `tfd.FULLY_REPARAMETERIZED`, then samples from the distribution are
fully reparameterized, and straight-through gradients are supported.
If `tfd.NOT_REPARAMETERIZED`, then samples from the distribution are not
fully reparameterized, and straight-through gradients are either
partially unsupported or are not supported at all.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
if not name:
name = type(self).__name__
name = name_util.camel_to_lower_snake(name)
constructor_name_scope = name_util.get_name_scope_name(name)
# Extract the (locally unique) name from the scope.
name = (constructor_name_scope.split('/')[-2]
if '/' in constructor_name_scope
else name)
name = name_util.strip_invalid_chars(name)
super(Distribution, self).__init__(name=name)
self._constructor_name_scope = constructor_name_scope
self._name = name
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tf.is_tensor(t):
raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))
self._dtype = self._no_dependency(dtype)
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = self._no_dependency(parameters)
self._parameters_sanitized = False
self._graph_parents = graph_parents
self._defer_all_assertions = (
auto_composite_tensor.is_deferred_assertion_context())
if not self._defer_all_assertions:
self._initial_parameter_control_dependencies = tuple(
d for d in self._parameter_control_dependencies(is_init=True)
if d is not None)
else:
self._initial_parameter_control_dependencies = ()
if self._initial_parameter_control_dependencies:
self._initial_parameter_control_dependencies = (
tf.group(*self._initial_parameter_control_dependencies),)
@property
def _composite_tensor_params(self):
"""A tuple describing which parameters are expected to be tensors.
CompositeTensor requires us to partition dynamic (tensor) parts from static
(metadata) parts like 'validate_args'. This collects the keys of parameters
which are expected to be tensors.
"""
return (self._composite_tensor_nonshape_params +
self._composite_tensor_shape_params)
@property
def _composite_tensor_nonshape_params(self):
"""A tuple describing which parameters are non-shape-related tensors.
Flattening in JAX involves many of the same considerations with regards to
identifying tensor arguments for the purposes of CompositeTensor, except
that shape-related items will be considered metadata. This property
identifies the keys of parameters that are expected to be tensors, except
those that are shape-related.
"""
return tuple(k for k, v in self.parameter_properties().items()
if not v.specifies_shape)
@property
def _composite_tensor_shape_params(self):
"""A tuple describing which parameters are shape-related tensors.
Flattening in JAX involves many of the same considerations with regards to
identifying tensor arguments for the purposes of CompositeTensor, except
that shape-related items will be considered metadata. This property
identifies the keys of parameters that are expected to be shape-related
tensors, so that they can be collected appropriately in CompositeTensor but
not in JAX applications.
"""
return tuple(k for k, v in self.parameter_properties().items()
if v.specifies_shape)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
raise NotImplementedError(
'_parameter_properties` is not implemented: {}.'.format(cls.__name__))
@classmethod
def parameter_properties(cls, dtype=tf.float32, num_classes=None):
"""Returns a dict mapping constructor arg names to property annotations.
This dict should include an entry for each of the distribution's
`Tensor`-valued constructor arguments.
Distribution subclasses are not required to implement
`_parameter_properties`, so this method may raise `NotImplementedError`.
Providing a `_parameter_properties` implementation enables several advanced
features, including:
- Distribution batch slicing (`sliced_distribution = distribution[i:j]`).
- Automatic inference of `_batch_shape` and
`_batch_shape_tensor`, which must otherwise be computed explicitly.
- Automatic instantiation of the distribution within TFP's internal
property tests.
- Automatic construction of 'trainable' instances of the distribution
using appropriate bijectors to avoid violating parameter constraints.
This enables the distribution family to be used easily as a
surrogate posterior in variational inference.
In the future, parameter property annotations may enable additional
functionality; for example, returning Distribution instances from
`tf.vectorized_map`.
Args:
dtype: Optional float `dtype` to assume for continuous-valued parameters.
Some constraining bijectors require advance knowledge of the dtype
because certain constants (e.g., `tfb.Softplus.low`) must be
instantiated with the same dtype as the values to be transformed.
num_classes: Optional `int` `Tensor` number of classes to assume when
inferring the shape of parameters for categorical-like distributions.
Otherwise ignored.
Returns:
parameter_properties: A
`str -> `tfp.python.internal.parameter_properties.ParameterProperties`
dict mapping constructor argument names to `ParameterProperties`
instances.
Raises:
NotImplementedError: if the distribution class does not implement
`_parameter_properties`.
"""
with tf.name_scope('parameter_properties'):
return cls._parameter_properties(dtype, num_classes=num_classes)
@classmethod
@deprecation.deprecated('2021-03-01',
'The `param_shapes` method of `tfd.Distribution` is '
'deprecated; use `parameter_properties` instead.')
def param_shapes(cls, sample_shape, name='DistributionParamShapes'):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with tf.name_scope(name):
param_shapes = {}
for (param_name, param) in cls.parameter_properties().items():
param_shapes[param_name] = tf.convert_to_tensor(
param.shape_fn(sample_shape), dtype=tf.int32)
return param_shapes
@classmethod
@deprecation.deprecated(
'2021-03-01', 'The `param_static_shapes` method of `tfd.Distribution` is '
'deprecated; use `parameter_properties` instead.')
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tf.TensorShape):
if not tensorshape_util.is_fully_defined(sample_shape):
raise ValueError('TensorShape sample_shape must be fully defined')
sample_shape = tensorshape_util.as_list(sample_shape)
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tf.get_static_value(shape)
if static_shape is None:
raise ValueError(
'sample_shape must be a fully-defined TensorShape or list/tuple')
static_params[name] = tf.TensorShape(static_shape)
return static_params
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name if hasattr(self, '_name') else None
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype if hasattr(self, '_dtype') else None
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove 'self', '__class__', or other special variables. These can appear
# if the subclass used: `parameters = dict(locals())`.
if (not hasattr(self, '_parameters_sanitized') or
not self._parameters_sanitized):
p = self._parameters() if callable(self._parameters) else self._parameters
self._parameters = self._no_dependency({
k: v for k, v in p.items()
if not k.startswith('__') and v is not self})
self._parameters_sanitized = True
# In some situations, the Distribution metaclass logic defers the evaluation
# of parameters, but at this point we actually want to evaluate the
# parameters.
return dict(
self._parameters() if callable(self._parameters) else self._parameters)
def _params_event_ndims(self):
"""Returns a dict mapping constructor argument names to per-event rank.
The ranks are pulled from `cls.parameter_properties()`; this is a
convenience wrapper.
Returns:
params_event_ndims: Per-event parameter ranks, a `str->int dict`.
"""
try:
properties = type(self).parameter_properties()
except NotImplementedError:
raise NotImplementedError(
'{} does not support batch slicing; must implement '
'_parameter_properties.'.format(type(self)))
params_event_ndims = {}
for (k, param) in properties.items():
ndims = param.instance_event_ndims(self)
if param.is_tensor and ndims is not None:
params_event_ndims[k] = ndims
return params_event_ndims
def __getitem__(self, slices):
"""Slices the batch axes of this distribution, returning a new instance.
```python
b = tfd.Bernoulli(logits=tf.zeros([3, 5, 7, 9]))
b.batch_shape # => [3, 5, 7, 9]
b2 = b[:, tf.newaxis, ..., -2:, 1::2]
b2.batch_shape # => [3, 1, 5, 2, 4]
x = tf.random.normal([5, 3, 2, 2])
cov = tf.matmul(x, x, transpose_b=True)
chol = tf.linalg.cholesky(cov)
loc = tf.random.normal([4, 1, 3, 1])
mvn = tfd.MultivariateNormalTriL(loc, chol)
mvn.batch_shape # => [4, 5, 3]
mvn.event_shape # => [2]
mvn2 = mvn[:, 3:, ..., ::-1, tf.newaxis]
mvn2.batch_shape # => [4, 2, 3, 1]
mvn2.event_shape # => [2]
```
Args:
slices: slices from the [] operator
Returns:
dist: A new `tfd.Distribution` instance with sliced parameters.
"""
return slicing.batch_slice(self, {}, slices)
def __iter__(self):
raise TypeError('{!r} object is not iterable'.format(type(self).__name__))
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`tfd.FULLY_REPARAMETERIZED` or `tfd.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
@property
def experimental_shard_axis_names(self):
"""The list or structure of lists of active shard axis names."""
return []
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
try:
# We want track provenance from origin variables, so we use batch_slice
# if this distribution supports slicing. See the comment on
# PROVENANCE_ATTR in batch_slicing.py
return slicing.batch_slice(self, override_parameters_kwargs, Ellipsis)
except NotImplementedError:
pass
parameters = dict(self.parameters, **override_parameters_kwargs)
d = type(self)(**parameters)
# pylint: disable=protected-access
d._parameters = self._no_dependency(parameters)
d._parameters_sanitized = True
# pylint: enable=protected-access
return d
def _broadcast_parameters_with_batch_shape(self, batch_shape):
"""Broadcasts each parameter's batch shape with the given `batch_shape`.
This is semantically equivalent to wrapping with the `BatchBroadcast`
distribution, but returns a distribution of the same type as the original
in which all parameter Tensors are reified at the the broadcast batch shape.
It can be understood as a pseudo-inverse operation to batch slicing:
```python
dist = tfd.Normal(0., 1.)
# ==> `dist.batch_shape == []`
broadcast_dist = dist._broadcast_parameters_with_batch_shape([3])
# ==> `broadcast_dist.batch_shape == [3]`
# `broadcast_dist.loc.shape == [3]`
# `broadcast_dist.scale.shape == [3]`
sliced_dist = broadcast_dist[0]
# ==> `sliced_dist.batch_shape == []`.
```
Args:
batch_shape: Integer `Tensor` batch shape.
Returns:
broadcast_dist: copy of this distribution in which each parameter's
batch shape is determined by broadcasting its current batch shape with
the given `batch_shape`.
"""
return self.copy(
**batch_shape_lib.broadcast_parameters_with_batch_shape(
self, batch_shape))
def _batch_shape_tensor(self, **parameter_kwargs):
"""Infers batch shape from parameters.
The overall batch shape is inferred by broadcasting the batch shapes of
all parameters,
```python
parameter_batch_shapes = []
for name, properties in self.parameter_properties.items():
parameter = self.parameters[name]
parameter_batch_shapes.append(
base_shape(parameter)[:-properties.instance_event_ndims(parameter)])
```
where a parameter's `base_shape` is its batch shape if it
defines one (e.g., if it is a Distribution, LinearOperator, etc.), and its
Tensor shape otherwise. Parameters with structured batch shape
(in particular, non-autobatched JointDistributions) are not currently
supported.
Args:
**parameter_kwargs: Optional keyword arguments overriding the parameter
values in `self.parameters`. Typically this is used to avoid multiple
Tensor conversions of the same value.
Returns:
batch_shape_tensor: `Tensor` broadcast batch shape of all parameters.
"""
try:
return batch_shape_lib.inferred_batch_shape_tensor(
self, **parameter_kwargs)
except NotImplementedError:
raise NotImplementedError('Cannot compute batch shape of distribution '
'{}: you must implement at least one of '
'`_batch_shape_tensor` or '
'`_parameter_properties`.'.format(self))
def batch_shape_tensor(self, name='batch_shape_tensor'):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_and_control_scope(name):
# Joint distributions may have a structured `batch shape_tensor` or a
# single `batch_shape_tensor` that applies to all components. (Simple
# distributions always have a single `batch_shape_tensor`.) If the
# distribution's `batch_shape` is an instance of `tf.TensorShape`, we
# infer that `batch_shape_tensor` is not structured.
shallow_structure = (None if isinstance(self.batch_shape, tf.TensorShape)
else self.dtype)
if all([tensorshape_util.is_fully_defined(s)
for s in nest.flatten_up_to(
shallow_structure, self.batch_shape, check_types=False)]):
batch_shape = nest.map_structure_up_to(
shallow_structure,
tensorshape_util.as_list,
self.batch_shape, check_types=False)
else:
batch_shape = self._batch_shape_tensor()
def conversion_fn(s):
return tf.identity(
tf.convert_to_tensor(s, dtype=tf.int32), name='batch_shape')
if JAX_MODE:
conversion_fn = ps.convert_to_shape_tensor
return nest.map_structure_up_to(
shallow_structure,
conversion_fn,
batch_shape, check_types=False)
def _batch_shape(self):
"""Infers static batch shape from parameters.
The overall batch shape is inferred by broadcasting the batch shapes of
all parameters
```python
parameter_batch_shapes = []
for name, properties in self.parameter_properties.items():
parameter = self.parameters[name]
parameter_batch_shapes.append(
base_shape(parameter)[:-properties.instance_event_ndims(parameter)])
```
where a parameter's `base_shape` is its batch shape if it
defines one (e.g., if it is a Distribution, LinearOperator, etc.), and its
Tensor shape otherwise. Distributions with structured batch shape
(in particular, non-autobatched JointDistributions) are not currently
supported.
Returns:
batch_shape: `tf.TensorShape` broadcast batch shape of all parameters; may
be partially defined or unknown.
"""
try:
return batch_shape_lib.inferred_batch_shape(self)
except NotImplementedError:
# If a distribution doesn't implement `_parameter_properties` or its own
# `_batch_shape` method, we can only return the most general shape.
return tf.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
if not hasattr(self, '__cached_batch_shape'):
# Cache the batch shape so that it's only inferred once. This is safe
# because runtime changes to parameter shapes can only affect
# `batch_shape_tensor`, never `batch_shape`.
batch_shape = self._batch_shape()
# See comment in `batch_shape_tensor()` on structured batch shapes. If
# `_batch_shape()` is a `tf.TensorShape` instance or a flat list/tuple
# that does not contain `tf.TensorShape`s, we infer that it is not
# structured.
if (isinstance(batch_shape, tf.TensorShape)
or all(len(path) == 1 and not isinstance(s, tf.TensorShape)
for path, s in nest.flatten_with_tuple_paths(batch_shape))):
batch_shape = tf.TensorShape(batch_shape)
else:
batch_shape = nest.map_structure_up_to(
self.dtype, tf.TensorShape, batch_shape, check_types=False)
self.__cached_batch_shape = self._no_dependency(batch_shape)
return self.__cached_batch_shape
def _event_shape_tensor(self):
raise NotImplementedError(
'event_shape_tensor is not implemented: {}'.format(type(self).__name__))
def event_shape_tensor(self, name='event_shape_tensor'):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_and_control_scope(name):
if all([tensorshape_util.is_fully_defined(s)
for s in nest.flatten(self.event_shape)]):
event_shape = nest.map_structure_up_to(
self.dtype,
tensorshape_util.as_list,
self.event_shape, check_types=False)
else:
event_shape = self._event_shape_tensor()
def conversion_fn(s):
return tf.identity(
tf.convert_to_tensor(s, dtype=tf.int32), name='event_shape')
if JAX_MODE:
conversion_fn = ps.convert_to_shape_tensor
return nest.map_structure_up_to(
self.dtype,
conversion_fn,
event_shape, check_types=False)
def _event_shape(self):
return None
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return nest.map_structure_up_to(
self.dtype, tf.TensorShape, self._event_shape(), check_types=False)
def is_scalar_event(self, name='is_scalar_event'):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_and_control_scope(name):
return tf.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name='is_scalar_event')
def is_scalar_batch(self, name='is_scalar_batch'):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_and_control_scope(name):
return tf.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name='is_scalar_batch')
def _sample_n(self, n, seed=None, **kwargs):
raise NotImplementedError('sample_n is not implemented: {}'.format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, **kwargs):
"""Wrapper around _sample_n."""
if JAX_MODE and seed is None:
raise ValueError('Must provide JAX PRNGKey as `dist.sample(seed=.)`')
sample_shape = ps.convert_to_shape_tensor(
ps.cast(sample_shape, tf.int32), name='sample_shape')
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, 'sample_shape')
samples = self._sample_n(
n, seed=seed() if callable(seed) else seed, **kwargs)
samples = tf.nest.map_structure(
lambda x: tf.reshape(x, ps.concat([sample_shape, ps.shape(x)[1:]], 0)),
samples)
return self._set_sample_static_shape(samples, sample_shape)
def sample(self, sample_shape=(), seed=None, name='sample', **kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
name: name to give to the op.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_and_control_scope(name):
return self._call_sample_n(sample_shape, seed, **kwargs)
def _call_sample_and_log_prob(self, sample_shape, seed, **kwargs):
"""Wrapper around `_sample_and_log_prob`."""
if hasattr(self, '_sample_and_log_prob'):
sample_shape = ps.convert_to_shape_tensor(
ps.cast(sample_shape, tf.int32), name='sample_shape')
return self._sample_and_log_prob(
distribution_util.expand_to_vector(
sample_shape, tensor_name='sample_shape'),
seed=seed, **kwargs)
# Naive default implementation. This calls private, rather than public,
# methods, to avoid duplicating the name_and_control_scope.
value = self._call_sample_n(sample_shape, seed=seed, **kwargs)
if hasattr(self, '_log_prob'):
log_prob = self._log_prob(value, **kwargs)
elif hasattr(self, '_prob'):
log_prob = tf.math.log(self._prob(value, **kwargs))
else:
raise NotImplementedError('log_prob is not implemented: {}'.format(
type(self).__name__))
return value, log_prob
def experimental_sample_and_log_prob(self, sample_shape=(), seed=None,
name='sample_and_log_prob', **kwargs):
"""Samples from this distribution and returns the log density of the sample.
The default implementation simply calls `sample` and `log_prob`:
```
def _sample_and_log_prob(self, sample_shape, seed, **kwargs):
x = self.sample(sample_shape=sample_shape, seed=seed, **kwargs)
return x, self.log_prob(x, **kwargs)
```
However, some subclasses may provide more efficient and/or numerically
stable implementations.
Args:
sample_shape: integer `Tensor` desired shape of samples to draw.
Default value: `()`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
Default value: `None`.
name: name to give to the op.
Default value: `'sample_and_log_prob'`.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor`, or structure of `Tensor`s, with prepended dimensions
`sample_shape`.
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_and_control_scope(name):
return self._call_sample_and_log_prob(sample_shape, seed=seed, **kwargs)
def _call_log_prob(self, value, name, **kwargs):
"""Wrapper around _log_prob."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_log_prob'):
return self._log_prob(value, **kwargs)
if hasattr(self, '_prob'):
return tf.math.log(self._prob(value, **kwargs))
raise NotImplementedError('log_prob is not implemented: {}'.format(
type(self).__name__))
def log_prob(self, value, name='log_prob', **kwargs):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name, **kwargs)
def _call_prob(self, value, name, **kwargs):
"""Wrapper around _prob."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_prob'):
return self._prob(value, **kwargs)
if hasattr(self, '_log_prob'):
return tf.exp(self._log_prob(value, **kwargs))
raise NotImplementedError('prob is not implemented: {}'.format(
type(self).__name__))
def prob(self, value, name='prob', **kwargs):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name, **kwargs)
def _call_unnormalized_log_prob(self, value, name, **kwargs):
"""Wrapper around _unnormalized_log_prob."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype, allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_unnormalized_log_prob'):
return self._unnormalized_log_prob(value, **kwargs)
if hasattr(self, '_unnormalized_prob'):
return tf.math.log(self._unnormalized_prob(value, **kwargs))
if hasattr(self, '_log_prob'):
return self._log_prob(value, **kwargs)
if hasattr(self, '_prob'):
return tf.math.log(self._prob(value, **kwargs))
raise NotImplementedError(
'unnormalized_log_prob is not implemented: {}'.format(
type(self).__name__))
def unnormalized_log_prob(self,
value,
name='unnormalized_log_prob',
**kwargs):
"""Potentially unnormalized log probability density/mass function.
This function is similar to `log_prob`, but does not require that the
return value be normalized. (Normalization here refers to the total
integral of probability being one, as it should be by definition for any
probability distribution.) This is useful, for example, for distributions
where the normalization constant is difficult or expensive to compute. By
default, this simply calls `log_prob`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
unnormalized_log_prob: a `Tensor` of shape
`sample_shape(x) + self.batch_shape` with values of type `self.dtype`.
"""
return self._call_unnormalized_log_prob(value, name, **kwargs)
def _call_log_cdf(self, value, name, **kwargs):
"""Wrapper around _log_cdf."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_log_cdf'):
return self._log_cdf(value, **kwargs)
if hasattr(self, '_cdf'):
return tf.math.log(self._cdf(value, **kwargs))
raise NotImplementedError('log_cdf is not implemented: {}'.format(
type(self).__name__))
def log_cdf(self, value, name='log_cdf', **kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name, **kwargs)
def _call_cdf(self, value, name, **kwargs):
"""Wrapper around _cdf."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_cdf'):
return self._cdf(value, **kwargs)
if hasattr(self, '_log_cdf'):
return tf.exp(self._log_cdf(value, **kwargs))
raise NotImplementedError('cdf is not implemented: {}'.format(
type(self).__name__))
def cdf(self, value, name='cdf', **kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name, **kwargs)
def _log_survival_function(self, value, **kwargs):
raise NotImplementedError(
'log_survival_function is not implemented: {}'.format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
"""Wrapper around _log_survival_function."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError:
if hasattr(self, '_log_cdf'):
return log1mexp(self._log_cdf(value, **kwargs))
if hasattr(self, '_cdf'):
return tf.math.log1p(-self._cdf(value, **kwargs))
raise
def log_survival_function(self, value, name='log_survival_function',
**kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name, **kwargs)
def _survival_function(self, value, **kwargs):
raise NotImplementedError('survival_function is not implemented: {}'.format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
"""Wrapper around _survival_function."""
value = nest_util.cast_structure(value, self.dtype)
value = nest_util.convert_to_nested_tensor(
value, name='value', dtype_hint=self.dtype,
allow_packing=True)
with self._name_and_control_scope(name, value, kwargs):
try:
return self._survival_function(value, **kwargs)
except NotImplementedError:
if hasattr(self, '_log_cdf'):
return -tf.math.expm1(self._log_cdf(value, **kwargs))
if hasattr(self, '_cdf'):
return 1. - self.cdf(value, **kwargs)
raise
def survival_function(self, value, name='survival_function', **kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name, **kwargs)
def _entropy(self, **kwargs):
raise NotImplementedError('entropy is not implemented: {}'.format(
type(self).__name__))
def entropy(self, name='entropy', **kwargs):
"""Shannon entropy in nats."""
with self._name_and_control_scope(name):
return self._entropy(**kwargs)
def _mean(self, **kwargs):
raise NotImplementedError('mean is not implemented: {}'.format(
type(self).__name__))
def mean(self, name='mean', **kwargs):
"""Mean."""
with self._name_and_control_scope(name):
return self._mean(**kwargs)
def _quantile(self, value, **kwargs):
raise NotImplementedError('quantile is not implemented: {}'.format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_and_control_scope(name):
dtype = tf.float32 if tf.nest.is_nested(self.dtype) else self.dtype
value = tf.convert_to_tensor(value, name='value', dtype_hint=dtype)
if self.validate_args:
value = distribution_util.with_dependencies([
assert_util.assert_less_equal(value, tf.cast(1, value.dtype),
message='`value` must be <= 1'),
assert_util.assert_greater_equal(value, tf.cast(0, value.dtype),
message='`value` must be >= 0')
], value)
return self._quantile(value, **kwargs)
def quantile(self, value, name='quantile', **kwargs):
"""Quantile function. Aka 'inverse cdf' or 'percent point function'.
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name, **kwargs)
def _variance(self, **kwargs):
raise NotImplementedError('variance is not implemented: {}'.format(
type(self).__name__))
def variance(self, name='variance', **kwargs):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_and_control_scope(name):
try:
return self._variance(**kwargs)
except NotImplementedError:
try:
return tf.nest.map_structure(tf.square, self._stddev(**kwargs))
except NotImplementedError:
pass
raise
def _stddev(self, **kwargs):
raise NotImplementedError('stddev is not implemented: {}'.format(
type(self).__name__))
def stddev(self, name='stddev', **kwargs):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_and_control_scope(name):
try:
return self._stddev(**kwargs)
except NotImplementedError:
try:
return tf.nest.map_structure(tf.sqrt, self._variance(**kwargs))
except NotImplementedError:
pass
raise
def _covariance(self, **kwargs):
raise NotImplementedError('covariance is not implemented: {}'.format(
type(self).__name__))
def covariance(self, name='covariance', **kwargs):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_and_control_scope(name):
return self._covariance(**kwargs)
def _mode(self, **kwargs):
raise NotImplementedError('mode is not implemented: {}'.format(
type(self).__name__))
def mode(self, name='mode', **kwargs):
"""Mode."""
with self._name_and_control_scope(name):
return self._mode(**kwargs)
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name='cross_entropy'):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shannon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shannon) cross entropy.
"""
with self._name_and_control_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name='kl_divergence'):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shannon) cross entropy, and `H[.]` denotes (Shannon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
# NOTE: We do not enter a `self._name_and_control_scope` here. We rely on
# `tfd.kl_divergence(self, other)` to use `_name_and_control_scope` to apply
# assertions on both Distributions.
#
# Subclasses that override `Distribution.kl_divergence` or `_kl_divergence`
# must ensure that assertions are applied for both `self` and `other`.
return self._kl_divergence(other)
def _default_event_space_bijector(self, *args, **kwargs):
raise NotImplementedError(
'_default_event_space_bijector` is not implemented: {}'.format(
type(self).__name__))
def experimental_default_event_space_bijector(self, *args, **kwargs):
"""Bijector mapping the reals (R**n) to the event space of the distribution.
Distributions with continuous support may implement
`_default_event_space_bijector` which returns a subclass of
`tfp.bijectors.Bijector` that maps R**n to the distribution's event space.
For example, the default bijector for the `Beta` distribution
is `tfp.bijectors.Sigmoid()`, which maps the real line to `[0, 1]`, the
support of the `Beta` distribution. The default bijector for the
`CholeskyLKJ` distribution is `tfp.bijectors.CorrelationCholesky`, which
maps R^(k * (k-1) // 2) to the submanifold of k x k lower triangular
matrices with ones along the diagonal.
The purpose of `experimental_default_event_space_bijector` is
to enable gradient descent in an unconstrained space for Variational
Inference and Hamiltonian Monte Carlo methods. Some effort has been made to
choose bijectors such that the tails of the distribution in the
unconstrained space are between Gaussian and Exponential.
For distributions with discrete event space, or for which TFP currently
lacks a suitable bijector, this function returns `None`.
Args:
*args: Passed to implementation `_default_event_space_bijector`.
**kwargs: Passed to implementation `_default_event_space_bijector`.
Returns:
event_space_bijector: `Bijector` instance or `None`.
"""
return self._default_event_space_bijector(*args, **kwargs)
@classmethod
def experimental_fit(cls, value, sample_ndims=1, validate_args=False,
**init_kwargs):
"""Instantiates a distribution that maximizes the likelihood of `x`.
Args:
value: a `Tensor` valid sample from this distribution family.
sample_ndims: Positive `int` Tensor number of leftmost dimensions of
`value` that index i.i.d. samples.
Default value: `1`.
validate_args: Python `bool`, default `False`. When `True`, distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False`, invalid inputs may silently render incorrect
outputs.
Default value: `False`.
**init_kwargs: Additional keyword arguments passed through to
`cls.__init__`. These take precedence in case of collision with the
fitted parameters; for example,
`tfd.Normal.experimental_fit([1., 1.], scale=20.)` returns a Normal
distribution with `scale=20.` rather than the maximum likelihood
parameter `scale=0.`.
Returns:
maximum_likelihood_instance: instance of `cls` with parameters that
maximize the likelihood of `value`.
"""
with tf.name_scope('experimental_fit'):
value = tf.convert_to_tensor(value, name='value')
sample_ndims_ = tf.get_static_value(sample_ndims)
# Reshape `value` if needed to have a single leftmost sample dimension.
if sample_ndims_ != 1:
assertions = []
if sample_ndims_ is None and validate_args:
assertions += [assert_util.assert_positive(
sample_ndims,
message='`sample_ndims` must be a positive integer.')]
elif sample_ndims_ is not None and sample_ndims_ < 1:
raise ValueError(
'`sample_ndims` must be a positive integer. (saw: `{}`)'.format(
sample_ndims_))
with tf.control_dependencies(assertions):
value_shape = ps.convert_to_shape_tensor(ps.shape(value))
value = tf.reshape(
value, ps.concat([[-1], value_shape[sample_ndims:]], axis=0))
kwargs = cls._maximum_likelihood_parameters(value)
kwargs.update(init_kwargs)
return cls(**kwargs, validate_args=validate_args)
@classmethod
def _maximum_likelihood_parameters(cls, value):
"""Returns a dictionary of parameters that maximize likelihood of `value`.
Following the [`Distribution` contract](
https://github.com/tensorflow/probability/blob/main/discussion/tfp_distributions_contract.md),
this method should be implemented only when the parameter estimate can be
computed efficiently and accurately. Iterative algorithms are permitted if
they are guaranteed to converge within a fixed number of steps (for example,
Newton iterations on a convex objective).
Args:
value: a `Tensor` valid sample from this distribution family, whose
leftmost dimension indexes independent samples.
Returns:
parameters: a dict with `str` keys and `Tensor` values, such that
`cls(**parameters)` gives maximum likelihood to `value` among all
instances of `cls`.
"""
raise NotImplementedError(
'Fitting maximum likelihood parameters is not implemented for this '
'distribution: {}.'.format(cls.__name__))
def experimental_local_measure(self, value, backward_compat=False, **kwargs):
"""Returns a log probability density together with a `TangentSpace`.
A `TangentSpace` allows us to calculate the correct push-forward
density when we apply a transformation to a `Distribution` on
a strict submanifold of R^n (typically via a `Bijector` in the
`TransformedDistribution` subclass). The density correction uses
the basis of the tangent space.
Args:
value: `float` or `double` `Tensor`.
backward_compat: `bool` specifying whether to fall back to returning
`FullSpace` as the tangent space, and representing R^n with the standard
basis.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` representing the log probability density, of shape
`sample_shape(x) + self.batch_shape` with values of type `self.dtype`.
tangent_space: a `TangentSpace` object (by default `FullSpace`)
representing the tangent space to the manifold at `value`.
Raises:
UnspecifiedTangentSpaceError if `backward_compat` is False and
the `_experimental_tangent_space` attribute has not been defined.
"""
log_prob = self.log_prob(value, **kwargs)
tangent_space = None
if hasattr(self, '_experimental_tangent_space'):
tangent_space = self._experimental_tangent_space
elif backward_compat:
# Import here rather than top-level to avoid circular import.
# pylint: disable=g-import-not-at-top
from tensorflow_probability.python.experimental import tangent_spaces
tangent_space = tangent_spaces.FullSpace()
if not tangent_space:
# Import here rather than top-level to avoid circular import.
# pylint: disable=g-import-not-at-top
from tensorflow_probability.python.experimental import tangent_spaces
raise tangent_spaces.UnspecifiedTangentSpaceError
return log_prob, tangent_space
def __str__(self):
if self.batch_shape:
maybe_batch_shape = ', batch_shape=' + _str_tensorshape(self.batch_shape)
else:
maybe_batch_shape = ''
if self.event_shape:
maybe_event_shape = ', event_shape=' + _str_tensorshape(self.event_shape)
else:
maybe_event_shape = ''
if self.dtype is not None:
maybe_dtype = ', dtype=' + _str_dtype(self.dtype)
else:
maybe_dtype = ''
return ('tfp.distributions.{type_name}('
'"{self_name}"'
'{maybe_batch_shape}'
'{maybe_event_shape}'
'{maybe_dtype})'.format(
type_name=type(self).__name__,
self_name=self.name or '<unknown>',
maybe_batch_shape=maybe_batch_shape,
maybe_event_shape=maybe_event_shape,
maybe_dtype=maybe_dtype))
def __repr__(self):
return ('<tfp.distributions.{type_name} '
'\'{self_name}\''
' batch_shape={batch_shape}'
' event_shape={event_shape}'
' dtype={dtype}>'.format(
type_name=type(self).__name__,
self_name=self.name or '<unknown>',
batch_shape=_str_tensorshape(self.batch_shape),
event_shape=_str_tensorshape(self.event_shape),
dtype=_str_dtype(self.dtype)))
@contextlib.contextmanager
def _name_and_control_scope(self, name=None, value=UNSET_VALUE, kwargs=None):
"""Helper function to standardize op scope."""
# Note: we recieve `kwargs` and not `**kwargs` to ensure no collisions on
# other args we choose to take in this function.
with name_util.instance_scope(
instance_name=self.name,
constructor_name_scope=self._constructor_name_scope):
with tf.name_scope(name) as name_scope:
deps = []
if self._defer_all_assertions:
deps.extend(self._parameter_control_dependencies(is_init=True))
else:
deps.extend(self._initial_parameter_control_dependencies)
deps.extend(self._parameter_control_dependencies(is_init=False))
if value is not UNSET_VALUE:
deps.extend(self._sample_control_dependencies(
value, **({} if kwargs is None else kwargs)))
if not deps:
yield name_scope
return
# In eager mode, some `assert_util.assert_xyz` calls return None. If a
# Distribution is created in eager mode with `validate_args=True`, then
# used in a `tf.function` context, it can result in errors when
# `tf.convert_to_tensor` is called on the inputs to
# `tf.control_dependencies` below. To avoid these errors, we drop the
# `None`s here.
deps = [x for x in deps if x is not None]
with tf.control_dependencies(deps) as deps_scope:
yield deps_scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
prod = ps.reduce_prod(x)
x = distribution_util.expand_to_vector(x, tensor_name=name)
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
batch_shape = self.batch_shape
if (tf.nest.is_nested(self.dtype)
and not tf.nest.is_nested(batch_shape)):
batch_shape = tf.nest.map_structure(
lambda _: batch_shape, self.dtype)
return tf.nest.map_structure(
functools.partial(
_set_sample_static_shape_for_tensor, sample_shape=sample_shape),
x, self.event_shape, batch_shape)
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if tensorshape_util.rank(static_shape) is not None:
return tensorshape_util.rank(static_shape) == 0
shape = dynamic_shape_fn()
if tf.compat.dimension_value(shape.shape[0]) is not None:
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return tensorshape_util.as_list(shape.shape) == [0]
return tf.equal(tf.shape(shape)[0], 0)
def _parameter_control_dependencies(self, is_init):
"""Returns a list of ops to be executed in members with graph deps.
Typically subclasses override this function to return parameter specific
assertions (eg, positivity of `scale`, etc.).
Args:
is_init: Python `bool` indicating that the call site is `__init__`.
Returns:
dependencies: `list`-like of ops to be executed in member functions with
graph dependencies.
"""
return ()
def _sample_control_dependencies(self, value, **kwargs):
"""Returns a list of ops to be executed to validate distribution samples.
The ops are executed in methods that take distribution samples as an
argument (e.g. `log_prob` and `cdf`). They validate that `value` is
within the support of the distribution. Typically subclasses override this
function to return assertions specific to the distribution (e.g. samples
from `Beta` must be between `0` and `1`). By convention, finite bounds of
the support are considered valid samples, since `sample` may output values
that are numerically equivalent to the bounds.
Args:
value: `float` or `double` `Tensor`.
**kwargs: Additional keyword args.
Returns:
assertions: `list`-like of ops to be executed in member functions that
take distribution samples as input.
"""
return ()
class _AutoCompositeTensorDistributionMeta(_DistributionMeta):
"""Metaclass for `AutoCompositeTensorBijector`."""
def __new__(mcs, classname, baseclasses, attrs): # pylint: disable=bad-mcs-classmethod-argument
"""Give subclasses their own type_spec, not an inherited one."""
cls = super(_AutoCompositeTensorDistributionMeta, mcs).__new__( # pylint: disable=too-many-function-args
mcs, classname, baseclasses, attrs)
if 'tensorflow_probability.python.distributions' in cls.__module__:
module_name = 'tfp.distributions'
elif ('tensorflow_probability.python.experimental.distributions'
in cls.__module__):
module_name = 'tfp.experimental.distributions'
else:
module_name = cls.__module__
return auto_composite_tensor.auto_composite_tensor(
cls,
omit_kwargs=('parameters',),
non_identifying_kwargs=('name',),
module_name=module_name)
class AutoCompositeTensorDistribution(
Distribution, auto_composite_tensor.AutoCompositeTensor,
metaclass=_AutoCompositeTensorDistributionMeta):
r"""Base for `CompositeTensor` bijectors with auto-generated `TypeSpec`s.
`CompositeTensor` objects are able to pass in and out of `tf.function` and
`tf.while_loop`, or serve as part of the signature of a TF saved model.
`Distribution` subclasses that follow the contract of
`tfp.experimental.auto_composite_tensor` may be defined as `CompositeTensor`s
by inheriting from `AutoCompositeTensorDistribution`:
```python
class MyDistribution(tfb.AutoCompositeTensorDistribution):
# The remainder of the subclass implementation is unchanged.
```
"""
pass
class _PrettyDict(dict):
"""`dict` with stable `repr`, `str`."""
def __str__(self):
pairs = (': '.join([str(k), str(v)]) for k, v in sorted(self.items()))
return '{' + ', '.join(pairs) + '}'
def __repr__(self):
pairs = (': '.join([repr(k), repr(v)]) for k, v in sorted(self.items()))
return '{' + ', '.join(pairs) + '}'
def _recursively_replace_dict_for_pretty_dict(x):
"""Recursively replace `dict`s with `_PrettyDict`."""
# We use "PrettyDict" because collections.OrderedDict repr/str has the word
# "OrderedDict" in it. We only want to print "OrderedDict" if in fact the
# input really is an OrderedDict.
if isinstance(x, dict):
return _PrettyDict({
k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
if (isinstance(x, collections.abc.Sequence) and
not isinstance(x, six.string_types)):
args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x)
is_named_tuple = (isinstance(x, tuple) and
hasattr(x, '_asdict') and
hasattr(x, '_fields'))
return type(x)(*args) if is_named_tuple else type(x)(args)
if isinstance(x, collections.abc.Mapping):
return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
return x
def _str_tensorshape(x):
def _str(s):
if tensorshape_util.rank(s) is None:
return '?'
return str(tensorshape_util.as_list(s)).replace('None', '?')
# Because Python2 `dict`s are unordered, we must replace them with
# `PrettyDict`s so __str__, __repr__ are deterministic.
x = _recursively_replace_dict_for_pretty_dict(x)
return str(tf.nest.map_structure(_str, x)).replace('\'', '')
def _str_dtype(x):
def _str(s):
if s is None:
return '?'
return dtype_util.name(s)
# Because Python2 `dict`s are unordered, we must replace them with
# `PrettyDict`s so __str__, __repr__ are deterministic.
x = _recursively_replace_dict_for_pretty_dict(x)
return str(tf.nest.map_structure(_str, x)).replace('\'', '')
|
tensorflow/probability
|
tensorflow_probability/python/distributions/distribution.py
|
Python
|
apache-2.0
| 86,727
|
[
"Gaussian"
] |
9884f926c507ebc9ad0349aaa70470ae6ad640d3d187cd05f01a3d9714ac749b
|
#!/usr/bin/python3
''' XYZ2VTK.py - Takes in an XYZ file containing coordinates
and vectors then converts them into VTK structured grids
(snapshots) for use in ParaView.
'''
import argparse
import os
from os import path
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
help="Number of frames to dump",
type=int,
default=0
)
parser.add_argument(
"-k",
"--skip",
help="Dump every 'n'th frame",
type=int,
default=1
)
parser.add_argument(
"-t",
"--titles",
help="Titles of each data column",
type=str,
default=""
)
parser.add_argument(
"--vector",
help="Column positions of vector data",
type=int,
nargs=3
)
parser.add_argument(
"-s",
"--scalars",
help="Column positions of scalar data",
type=int,
nargs="+"
)
parser.add_argument(
"input",
help="Input file name",
type=str
)
parser.add_argument(
"-o",
"--output",
help="Output folder name/file prefix",
type=str
)
# Parse arguments
args = parser.parse_args()
# Definitions
frame = 0 # current frame
n = args.skip # Dump every "nth" frame
titles = args.titles.split()
nbegin = 0 # Are we at the beginning of a new frame?
fmax = args.n # Maximum number of frames to dump
dirout = path.splitext(args.input)[0] # Folder name
prefix = "frame" # File prefix
vector = args.vector # Vector to write
scalars = args.scalars # Scalars to write
# Set output directory if needed
if args.output is not None:
dirout = args.output
# Calculate required number of titles
ntit = 0
if vector is not None:
ntit += len(vector) / 3
if scalars is not None:
ntit += len(scalars)
if len(titles) is not int(ntit):
print("Invalid number of titles. Must specify {0}".format(int(ntit)))
exit()
# Check that the output directory exists.
# If not, make it.
if not path.exists(dirout):
os.makedirs(dirout)
# Containers for frame data.
pdata = [] # Positional data
vdata = [] # Vector data
sdata = [] # Scalar data
species = [] # Species data
# Open input file
with open(args.input, 'r') as f:
for line in f:
# If split is 1 we are at a new frame.
if len(line.split()) is 1:
nbegin = 1
fname = path.join(dirout, "{0}{num:04d}.vtk".format(prefix, num=frame))
# Write frame data if available
if len(pdata):
with open(fname, "w") as fout:
fout.write("# vtk DataFile Version 2.0\n")
fout.write("VTK from XYZ2VTK\n")
fout.write("ASCII\n")
fout.write("DATASET STRUCTURED_GRID\n")
fout.write("DIMENSIONS 1 {0} 1\n".format(len(pdata)))
fout.write("POINTS {0} float\n".format(len(pdata)))
for pline in pdata:
fout.write("{0} {1} {2}\n".format(pline[0], pline[1], pline[2]))
# If there's vector data.
if len(vdata):
fout.write("POINT_DATA {0}\n".format(len(vdata)))
fout.write("VECTORS {0} float\n".format(titles[0]))
for vline in vdata:
fout.write("{0} {1} {2}\n".format(vline[0], vline[1], vline[2]))
# Add species data
fout.write("SCALARS species float\n")
fout.write("LOOKUP_TABLE default\n")
for sline in sdata:
fout.write("{0}\n".format(sline[0]))
# Clear data lists.
pdata = []
vdata = []
sdata = []
# Increment frame counter.
frame += 1
continue
# If we are at the beginning of a line, skip
# this frame since it's the box vecotrs.
if nbegin is 1:
nbegin = 0
continue
# If we are not at a frame we're interested in,
# pass.
if n and (frame - 1) % n is not 0:
continue
# If we maxed out the number of frames, quit
if fmax and frame > fmax:
break
# Just in case we check that we're not at frame 0.
if not nbegin and frame is not 0:
data = line.split()
# Append position data to array
pdata.append(list(map(float, data[1:4])))
# Append vector data to array
if vector is not None:
vlist = []
vlist.append(float(data[vector[0]]))
vlist.append(float(data[vector[1]]))
vlist.append(float(data[vector[2]]))
vdata.append(vlist)
# Append species data to array
if data[0] not in species:
species.append(data[0])
slist = []
slist.append(species.index(data[0]))
sdata.append(slist)
|
hsidky/MolSimScripts
|
XYZ2VTK.py
|
Python
|
mit
| 4,111
|
[
"ParaView",
"VTK"
] |
e73f396f6cdd41817b5a956977f264e5dcf50f1b03fb3d06158b7bcf33c6fd76
|
import time
from itertools import chain
import pysam
import numpy as np
from mitty.lib.bedfile import read_bed
from mitty.lib.sanitizeseq import sanitize
import logging
logger = logging.getLogger(__name__)
SEED_MAX = (1 << 32) - 1 # Used for seeding rng
def main(fp_out, fasta_fname, sample_name, bed_fname, seed, p_het, models):
"""
:param fp_out: output file pointer
:param fasta_fname:
:param sample_name:
:param bed_fname:
:param seed:
:param p_het:
:param models:
:return:
"""
logger.debug('Starting variant simulation ...')
t0 = time.time()
v_cnt_tot = 0
fp_out.write(generate_header(fasta_fname, sample_name))
fasta = pysam.FastaFile(fasta_fname)
rng = np.random.RandomState(seed=seed)
for region in read_bed(bed_fname):
t1 = time.time()
v_cnt = write_out_variants(
fp_out,
region,
[
model_dispatch[model[0]](
rng, region=region, seq=fasta.fetch(reference=region[0], start=region[1], end=region[2]),
p=model[1],
p_het=p_het,
min_size=model[2], max_size=model[3])
for model in models
])
t2 = time.time()
logger.debug('Wrote {} variants in region {} in {:0.2f}s'.format(v_cnt, region, t2 - t1))
v_cnt_tot += v_cnt
t2 = time.time()
logger.debug('Simulated {} variants in {:0.2f}s'.format(v_cnt_tot, t2 - t0))
def generate_header(fasta_fname, sample_name):
fasta = pysam.FastaFile(fasta_fname)
return '\n'.join(
['##fileformat=VCFv4.2'] + \
['##contig=<ID={}, length={}>'.format(i, l) for i, l in zip(fasta.references, fasta.lengths)] + \
['##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype string">',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{}'.format(sample_name)]) + '\n'
def place_poisson_seq(rng, p, seq):
"""Given a random number generator, a probability and an end point, generate poisson distributed events. Skip bases
that are 'N'. For short end_p this may, by chance, generate fewer locations that normal"""
if p == 0.0:
return np.array([], dtype='i4')
l = len(seq)
return np.array(
[loc for loc in rng.geometric(p=p, size=int(l * p * 1.2)).cumsum()
if loc < l and seq[loc] != 'N'], dtype='i4')
def genotype(p_het, rng, l):
"""
:param rng:
:param l:
:return: gt - 0 = 1|0
1 = 0|1
2 = 1|1
"""
r1 = rng.rand(l) # For the het/hom determination
r2 = rng.rand(l) # For hets, for determining which copy
gt = np.zeros(l, dtype=int)
gt[r1 > p_het] = 2
gt[(r1 <= p_het) & (r2 < 0.5)] = 1
return gt
def snp_model(rng, region, seq, p, p_het, **args):
"""Places SNPs"""
base_sub = {
'A': 'CTG',
'C': 'ATG',
'T': 'ACG',
'G': 'ACT'
}
pos = place_poisson_seq(rng, p, seq)
ref = [seq[x] for x in pos]
alt = [base_sub[r][i] for r, i in zip(ref, rng.randint(0, 3, size=pos.shape[0]))]
gt = genotype(p_het, rng, pos.shape[0])
return [pos + region[1] + 1, ref, alt, gt]
t_mat = {
'A': [0.32654629, 0.17292732, 0.24524503, 0.25528135],
'T': [0.3489394, 0.25942695, 0.04942584, 0.3422078],
'G': [0.28778188, 0.21087004, 0.25963262, 0.24171546],
'C': [0.21644706, 0.20588717, 0.24978216, 0.32788362]
}
cum_t_mat = {
k: np.cumsum(v)
for k, v in t_mat.items()
}
def markov_chain(ref, rng, l):
"""
:param ref:
:param rng:
:param l:
:return:
"""
dna = 'ACTG'
alt = [ref] * (l + 1)
for n, r in enumerate(rng.rand(l)):
v = cum_t_mat[alt[n]]
for m in range(4):
if r <= v[m]:
alt[n + 1] = dna[m]
break
else:
alt[n + 1] = dna[3]
return ''.join(alt)
def ins_model(rng, region, seq, p, p_het, min_size, max_size):
"""Insertions uniformly spanning minimum and maximum lengths. Sequences are generated using a Markov chain
generator"""
pos = place_poisson_seq(rng, p, seq)
ref = [seq[x] for x in pos]
alt = [markov_chain(r, rng, l) for r, l in zip(ref, rng.randint(min_size, max_size, size=pos.shape[0]))]
gt = genotype(p_het, rng, pos.shape[0])
return [pos + region[1] + 1, ref, alt, gt]
def del_model(rng, region, seq, p, p_het, min_size, max_size):
"""Deletions uniformly spanning minimum and maximum lengths"""
pos = place_poisson_seq(rng, p, seq)
ref = [seq[x:x + l + 1] for x, l in zip(pos, rng.randint(min_size, max_size, size=pos.shape[0]))]
alt = [seq[x] for x in pos]
gt = genotype(p_het, rng, pos.shape[0])
return [pos + region[1] + 1, ref, alt, gt]
def copy_ins_model(rng, region, seq, p, p_het, min_size, max_size):
"""The `CINS` model works just like the `INS` model except the insertion sequences, instead of being
novel DNA sequences created with a Markov chain generator, are exact copies of random parts of
the input reference genome. This creates insertions that are more challenging to align to and
assemble, especially when their lengths start to exceed the template size of the sequencing
technology used.
"""
seq = sanitize(seq)
pos = place_poisson_seq(rng, p, seq)
ref = [seq[x] for x in pos]
alt = [copied_insertion(r, rng, seq, l) for r, l in zip(ref, rng.randint(min_size, max_size, size=pos.shape[0]))]
gt = genotype(p_het, rng, pos.shape[0])
try:
pos, ref, alt, gt = zip(*filter_none_alt(pos + region[1] + 1, ref, alt, gt))
except ValueError:
pos, ref, alt, gt = [[], [], [], []]
return pos, ref, alt, gt
def copied_insertion(ref, rng, seq, l):
if len(seq) <= l:
return None
n0 = rng.randint(len(seq) - l)
n1 = n0 + l
if 'N' in seq[n0:n1]:
return None
return ref + seq[n0:n1]
def filter_none_alt(pos, ref, alt, gt):
for p, r, a, g in zip(pos, ref, alt, gt):
if a is not None:
yield p, r, a, g
model_dispatch = {
'SNP': snp_model,
'INS': ins_model,
'CINS': copy_ins_model,
'DEL': del_model
}
def write_out_variants(fp_out, region, v_l):
"""Given a list of list of variants (as returned from the variant model functions) concatenate them and then
write them out in position order
:param fp_out:
:param region:
:param v_l:
:return:
"""
gt_str = ['0|1', '1|0', '1|1']
contig = region[0]
pos = list(chain(*(v[0] for v in v_l)))
ref = list(chain(*(v[1] for v in v_l)))
alt = list(chain(*(v[2] for v in v_l)))
gt = list(chain(*(v[3] for v in v_l)))
for idx in np.argsort(np.array(pos)): # Sort by position
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tGT\n
fp_out.write('{}\t{}\t.\t{}\t{}\t100\tPASS\t.\tGT\t{}\n'.format(contig, pos[idx], ref[idx], alt[idx], gt_str[gt[idx]]))
return len(pos)
|
sbg/Mitty
|
mitty/simulation/genome/simulatevariants.py
|
Python
|
apache-2.0
| 6,543
|
[
"pysam"
] |
3c1ee038f2ce6be77909985bd682ec317c69ce3f092eb9b715559e789a1c822f
|
#
# Copyright (c) 2017 Brian J. Kidney
# Copyright (c) 2017 Jonathan Anderson
# All rights reserved.
#
# This software was developed by BAE Systems, the University of Cambridge
# Computer Laboratory, and Memorial University under DARPA/AFRL contract
# FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
# (TC) research program.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cdg.query
class FilterError(Exception):
"""Raised when an error is encountered filtering a graph.
Attributes:
filter_spec User-provided filter specification
message Explanation
"""
def __init__(self, filter_spec, message):
self.filter_spec = filter_spec
self.message = message
def apply(filter_spec, graph):
'''
Apply a filter like calls-to:read,write or flows-from:main to a graph.
'''
tokens = filter_spec.split(':')
name = tokens[0]
args = tokens[1].split(',')
depth_limit = int(tokens[2]) if len(tokens) > 2 else None
def get_neighbours(select_fn, **annotations):
return cdg.query.transitive_neighbours(graph, args, select_fn,
annotations, depth_limit)
if name == 'identity':
return graph
elif name == 'exclude':
return exclude(graph, args)
elif name == 'calls-from':
select_fn = lambda node: cdg.query.succ(graph, node, is_call)
nodes = get_neighbours(select_fn, call='root')
description = 'successors'
elif name == 'calls-to':
select_fn = lambda node: cdg.query.pred(graph, node, is_call)
nodes = get_neighbours(select_fn, call='target')
description = 'predecessors'
elif name == 'flows-from':
select_fn = lambda node: cdg.query.succ(graph, node, lambda _: True)
nodes = get_neighbours(select_fn, flow='source')
description = 'successors'
elif name == 'flows-to':
select_fn = lambda node: cdg.query.pred(graph, node, lambda _: True)
nodes = get_neighbours(select_fn, flow='sink')
description = 'predecessors'
else:
raise FilterError(filter_spec, 'Invalid filter')
also_keep = set()
for n in nodes:
node = graph.nodes[n]
if 'parent' in node:
also_keep.add(node['parent'])
print('Keeping %d %s of %d nodes (and %d parents)' % (
len(nodes), description, len(args), len(also_keep)))
nodes = nodes.union(also_keep)
return cdg.hot_patch(graph.subgraph(nodes))
def exclude(graph, to_exclude):
result = graph.full_copy()
for n in to_exclude:
result.remove_node(n)
print('Removed %d nodes, %d edges' % (
len(graph.nodes) - len(result.nodes),
len(graph.edges) - len(result.edges),
))
return result
def is_call(attrs):
return attrs['kind'] == cdg.EdgeKind.Call
def intersection(G, H):
R = G.full_copy()
R.remove_nodes_from(n for n in G if n not in H)
return R
def union(G, H):
R = G.full_copy()
R.add_edges_from(H.edges())
return R
|
musec/py-cdg
|
cdg/filters.py
|
Python
|
apache-2.0
| 3,585
|
[
"Brian"
] |
0ca7f5e117430c625a5350c9e8a5bb3b4ef3d984ad8a63b21f28214617a6e706
|
import sys
import os
import gzip
import argparse
import numpy as np
from itertools import product, groupby
import pysam
import util
import snptable
import tables
MAX_SEQS_DEFAULT = 64
MAX_SNPS_DEFAULT = 6
class DataFiles(object):
"""Object to hold names and filehandles for all input / output
datafiles"""
def __init__(self, bam_filename, is_sorted, is_paired,
output_dir=None, snp_dir=None,
snp_tab_filename=None, snp_index_filename=None,
haplotype_filename=None, samples=None):
# flag indicating whether reads are paired-end
self.is_paired = is_paired
# prefix for output files
self.prefix = None
# name of input BAM filename
self.bam_filename = bam_filename
# name of sorted input bam_filename
# (new file is created if input file is not
# already sorted)
self.bam_sort_filename = None
# pysam file handle for input BAM
self.input_bam = None
# name of output keep and to.remap BAM files
self.keep_filename = None
self.remap_filename = None
# pysam file handles for output BAM filenames
self.keep_bam = None
self.remap_bam = None
# name of output fastq files
self.fastq_single_filename = None
self.fastq1_filename = None
self.fastq2_filename = None
self.fastq1 = None
self.fastq2 = None
self.fastq_single = None
# name of directory to read SNPs from
self.snp_dir = snp_dir
# paths to HDF5 files to read SNP info from
self.snp_tab_filename = snp_tab_filename
self.snp_index_filename = snp_index_filename
self.haplotype_filename = haplotype_filename
if self.snp_tab_filename:
self.snp_tab_h5 = tables.open_file(snp_tab_filename, "r")
self.snp_index_h5 = tables.open_file(snp_index_filename, "r")
self.hap_h5 = tables.open_file(haplotype_filename, "r")
else:
self.snp_tab_h5 = None
self.snp_index_h5 = None
self.hap_h5 = None
# separate input directory and bam filename
tokens = self.bam_filename.split("/")
bam_dir = "/".join(tokens[:-1])
filename = tokens[-1]
if output_dir is None:
# if no output dir specified, use same directory as input
# bam file
output_dir = bam_dir
else:
if output_dir.endswith("/"):
# strip trailing '/' from output dir name
output_dir = output_dir[:-1]
name_split = filename.split(".")
if len(name_split) > 1:
self.prefix = output_dir + "/" + ".".join(name_split[:-1])
else:
self.prefix = output_dir + "/" + name_split[0]
# create output dir if does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# TODO: could allow names of output files to be specified
# on command line rather than appending name to prefix
sys.stderr.write("prefix: %s\n" % self.prefix)
if not is_sorted:
util.sort_bam(self.bam_filename, self.prefix)
self.bam_sort_filename = self.prefix + ".sort.bam"
else:
self.bam_sort_filename = self.bam_filename
self.keep_filename = self.prefix + ".keep.bam"
self.remap_filename = self.prefix + ".to.remap.bam"
sys.stderr.write("reading reads from:\n %s\n" %
self.bam_sort_filename)
sys.stderr.write("writing output files to:\n")
if self.is_paired:
self.fastq1_filename = self.prefix + ".remap.fq1.gz"
self.fastq2_filename = self.prefix + ".remap.fq2.gz"
self.fastq1 = gzip.open(self.fastq1_filename, "wt")
self.fastq2 = gzip.open(self.fastq2_filename, "wt")
self.fastq_single_filename = self.prefix + ".remap.single.fq.gz"
self.fastq_single = gzip.open(self.fastq_single_filename, "wt")
sys.stderr.write(" %s\n %s\n %s\n" %
(self.fastq1_filename,
self.fastq2_filename,
self.fastq_single_filename))
else:
self.fastq_single_filename = self.prefix + ".remap.fq.gz"
self.fastq_single = gzip.open(self.fastq_single_filename, "wt")
sys.stderr.write(" %s\n" % (self.fastq_single_filename))
self.input_bam = pysam.Samfile(self.bam_sort_filename, "r")
self.keep_bam = pysam.Samfile(self.keep_filename, "w",
template=self.input_bam)
self.remap_bam = pysam.Samfile(self.remap_filename, "w",
template=self.input_bam)
sys.stderr.write(" %s\n %s\n" % (self.keep_filename,
self.remap_filename))
def close(self):
"""close open filehandles"""
filehandles = [self.keep_bam, self.remap_bam, self.fastq1,
self.fastq2, self.fastq_single,
self.snp_tab_h5, self.snp_index_h5,
self.hap_h5]
for fh in filehandles:
if fh:
fh.close()
class ReadStats(object):
"""Track information about reads and SNPs that they overlap"""
def __init__(self):
# number of read matches to reference allele
self.ref_count = 0
# number of read matches to alternative allele
self.alt_count = 0
# number of reads that overlap SNP but match neither allele
self.other_count = 0
# number of reads discarded becaused not mapped
self.discard_unmapped = 0
# number of reads discarded because not proper pair
self.discard_improper_pair = 0
# number of reads discarded because mate unmapped
self.discard_mate_unmapped = 0
# paired reads map to different chromosomes
self.discard_different_chromosome = 0
# number of reads discarded because overlap an indel
self.discard_indel = 0
# number of reads discarded because secondary match
self.discard_secondary = 0
# number of chimeric reads discarded
self.discard_supplementary = 0
# number of reads discarded because of too many overlapping SNPs
self.discard_excess_snps = 0
# number of reads discarded because too many allelic combinations
self.discard_excess_reads = 0
# when read pairs share SNP locations but have different alleles there
self.discard_discordant_shared_snp = 0
# reads where we expected to see other pair, but it was missing
# possibly due to read-pairs with different names
self.discard_missing_pair = 0
# number of single reads kept
self.keep_single = 0
# number of read pairs kept
self.keep_pair = 0
# number of single reads that need remapping
self.remap_single = 0
# number of read pairs kept
self.remap_pair = 0
def write(self, file_handle):
sys.stderr.write("DISCARD reads:\n"
" unmapped: %d\n"
" mate unmapped: %d\n"
" improper pair: %d\n"
" different chromosome: %d\n"
" indel: %d\n"
" secondary alignment: %d\n"
" supplementary alignment: %d\n"
" excess overlapping snps: %d\n"
" excess allelic combinations: %d\n"
" read pairs with discordant shared SNPs: %d\n"
" missing pairs (e.g. mismatched read names): %d\n"
"KEEP reads:\n"
" single-end: %d\n"
" pairs: %d\n"
"REMAP reads:\n"
" single-end: %d\n"
" pairs: %d\n" %
(self.discard_unmapped,
self.discard_mate_unmapped,
self.discard_improper_pair,
self.discard_different_chromosome,
self.discard_indel,
self.discard_secondary,
self.discard_supplementary,
self.discard_excess_snps,
self.discard_excess_reads,
self.discard_discordant_shared_snp,
self.discard_missing_pair,
self.keep_single,
self.keep_pair,
self.remap_single,
self.remap_pair))
file_handle.write("read SNP ref matches: %d\n" % self.ref_count)
file_handle.write("read SNP alt matches: %d\n" % self.alt_count)
file_handle.write("read SNP mismatches: %d\n" % self.other_count)
total = self.ref_count + self.alt_count + self.other_count
if total > 0:
mismatch_pct = 100.0 * float(self.other_count) / total
if mismatch_pct > 10.0:
sys.stderr.write("WARNING: many read SNP overlaps do not match "
"either allele (%.1f%%). SNP coordinates "
"in input file may be incorrect.\n" %
mismatch_pct)
def parse_options():
parser = argparse.ArgumentParser(description="Looks for SNPs and indels "
"overlapping reads. If a read overlaps "
"SNPs, alternative versions of the read "
"containing different alleles are created "
"and written to files for remapping. "
"Reads that do not overlap SNPs or indels "
"are written to a 'keep' BAM file."
"Reads that overlap indels are presently "
"discarded.")
parser.add_argument("--is_paired_end", "-p", action='store_true',
dest='is_paired_end',
default=False,
help=("Indicates that reads are paired-end "
"(default is single)."))
parser.add_argument("--is_sorted", "-s", action='store_true',
dest='is_sorted',
default=False,
help=('Indicates that the input BAM file'
' is coordinate-sorted (default '
'is False).'))
parser.add_argument("--max_seqs", type=int, default=MAX_SEQS_DEFAULT,
help="The maximum number of sequences with different "
"allelic combinations to consider remapping "
"(default=%d). Read pairs with more allelic "
"combinations than MAX_SEQs are discarded" %
MAX_SEQS_DEFAULT)
parser.add_argument("--max_snps", type=int, default=MAX_SNPS_DEFAULT,
help="The maximum number of SNPs allowed to overlap "
"a read before discarding the read. Allowing higher "
"numbers will decrease speed and increase memory "
"usage (default=%d)."
% MAX_SNPS_DEFAULT)
parser.add_argument("--output_dir", default=None,
help="Directory to write output files to. If not "
"specified, output files are written to the "
"same directory as the input BAM file.")
parser.add_argument("--snp_dir", action='store',
help="Directory containing SNP text files "
"This directory should contain one file per "
"chromosome named like chr<#>.snps.txt.gz. "
"Each file should contain 3 columns: position "
"RefAllele AltAllele. This option should "
"only be used if --snp_tab, --snp_index, "
"and --haplotype arguments are not used."
" If this argument is provided, all possible "
"allelic combinations are used (rather "
"than set of observed haplotypes).",
default=None)
parser.add_argument("--snp_tab",
help="Path to HDF5 file to read SNP information "
"from. Each row of SNP table contains SNP name "
"(rs_id), position, allele1, allele2.",
metavar="SNP_TABLE_H5_FILE",
default=None)
parser.add_argument("--snp_index",
help="Path to HDF5 file containing SNP index. The "
"SNP index is used to convert the genomic position "
"of a SNP to its corresponding row in the haplotype "
"and snp_tab HDF5 files.",
metavar="SNP_INDEX_H5_FILE",
default=None)
parser.add_argument("--haplotype",
help="Path to HDF5 file to read phased haplotypes "
"from. When generating alternative reads "
"use known haplotypes from this file rather "
"than all possible allelic combinations.",
metavar="HAPLOTYPE_H5_FILE",
default=None)
parser.add_argument("--samples",
help="Use only haplotypes and SNPs that are "
"polymorphic in these samples. "
"SAMPLES can either be a comma-delimited string "
"of sample names or a path to a file with one sample "
"name per line (file is assumed to be whitespace-"
"delimited and first column is assumed to be sample "
"name). Sample names should match those present in the "
"--haplotype file. Samples are ignored if no haplotype "
"file is provided.",
metavar="SAMPLES")
parser.add_argument("bam_filename", action='store',
help="Coordinate-sorted input BAM file "
"containing mapped reads.")
options = parser.parse_args()
if options.snp_dir:
if(options.snp_tab or options.snp_index or options.haplotype):
parser.error("expected --snp_dir OR (--snp_tab, --snp_index and "
"--haplotype) arguments but not both")
else:
if not (options.snp_tab and options.snp_index and options.haplotype):
parser.error("either --snp_dir OR (--snp_tab, "
"--snp_index AND --haplotype) arguments must be "
"provided")
if options.samples and not options.haplotype:
# warn because no way to use samples if haplotype file not specified
sys.stderr.write("WARNING: ignoring --samples argument "
"because --haplotype argument not provided")
return options
def write_read(read, snp_tab, snp_idx, read_pos):
snp_allele1 = [' '] * read.qlen
snp_allele2 = [' '] * read.qlen
for (s_idx, r_idx) in zip(snp_idx, read_pos):
a1 = snp_tab.snp_allele1[s_idx]
a2 = snp_tab.snp_allele2[s_idx]
snp_allele1[r_pos-1] = a1
snp_allele2[r_pos-1] = a2
sys.stderr.write("READ: %s\n" % read.query_sequence)
sys.stderr.write("A1: %s\n" % "".join(snp_allele1))
sys.stderr.write("A2: %s\n" % "".join(snp_allele2))
def count_ref_alt_matches(read, read_stats, snp_tab, snp_idx, read_pos):
ref_alleles = snp_tab.snp_allele1[snp_idx]
alt_alleles = snp_tab.snp_allele2[snp_idx]
for i in range(len(snp_idx)):
ref = ref_alleles[i].decode("utf-8")
alt = alt_alleles[i].decode("utf-8")
if ref == read.query_sequence[read_pos[i]-1]:
# read matches reference allele
read_stats.ref_count += 1
elif alt == read.query_sequence[read_pos[i]-1]:
# read matches non-reference allele
read_stats.alt_count += 1
else:
# read matches neither ref nor other
read_stats.other_count += 1
def get_unique_haplotypes(haplotypes, phasing, snp_idx):
"""
returns list of vectors of unique haplotypes for this set of SNPs
all possible combinations of ref/alt are calculated at unphased sites
"""
haps = haplotypes[snp_idx,:].T
# get phasing of SNPs for each individual as bool array
# True = unphased and False = phased
if phasing is not None:
phasing = np.logical_not(phasing[snp_idx, :].T.astype(bool))
else:
# assume all SNPs are unphased
phasing = np.full((int(haps.shape[0]/2), haps.shape[1]), True)
# if a haplotype has unphased SNPs, generate all possible allelic
# combinations and add each combination as a new haplotype
new_haps = []
# iterate through each individual
for i in range(len(phasing)):
# get the haplotype data for this individual
hap_pair = haps[i*2:(i*2)+2]
# get bool index of cols in hap_pair that contain hets
hets = np.not_equal(hap_pair[0], hap_pair[1])
# get bool index of cols in hap_pair that are both unphased and hets
phase = np.logical_and(phasing[i], hets)
# get all combinations of indices at unphased cols
# then, index into hap_pair with each combination
for j in product([0, 1], repeat=sum(phase)):
# index into hap_pair using all ref genes at genotyped columns
ref = np.repeat(0, hap_pair.shape[1])
np.place(ref, phase, j)
new_haps.append(hap_pair[ref, range(len(ref))])
# index into hap_pair using all alt genes at genotyped columns
alt = np.repeat(1, hap_pair.shape[1])
np.place(alt, phase, j)
new_haps.append(hap_pair[alt, range(len(alt))])
# add new haps to old haps
haps = np.concatenate((haps, new_haps))
# create view of data that joins all elements of column
# into single void datatype
h = np.ascontiguousarray(haps).view(np.dtype((np.void, haps.dtype.itemsize * haps.shape[1])))
# get index of unique columns
_, idx = np.unique(h, return_index=True)
return haps[idx,:]
def generate_haplo_reads(read_seq, snp_idx, read_pos, ref_alleles, alt_alleles,
haplo_tab, phase_tab):
"""
read_seq - a string representing the the sequence of the read in question
snp_index - a list of indices of SNPs that this read overlaps
read_pos - a list of positions in read_seq that overlap SNPs
ref_alleles - a np array of reference alleles with
indices corresponding to snp_index
alt_alleles - a np array of alternate alleles with
indices corresponding to snp_index
haplo_tab - a pytables node with haplotypes from haplotype.h5
"""
haps = get_unique_haplotypes(haplo_tab, phase_tab, snp_idx)
# sys.stderr.write("UNIQUE haplotypes: %s\n"
# "read_pos: %s\n"
# % (repr(haps), read_pos))
read_len = len(read_seq)
new_read_list = set()
# loop over haplotypes
for hap in haps:
new_read = []
cur_pos = 1
missing_data = False
# loop over the SNPs to get alleles that make up this haplotype
for i in range(len(hap)):
if read_pos[i] > cur_pos:
# add segment of read
new_read.append(read_seq[cur_pos-1:read_pos[i]-1])
# add segment for appropriate allele
if hap[i] == 0:
# reference allele
new_read.append(ref_alleles[i].decode("utf-8"))
elif hap[i] == 1:
# alternate allele
new_read.append(alt_alleles[i].decode("utf-8"))
else:
# haplotype has unknown genotype so skip it...
missing_data = True
break
cur_pos = read_pos[i] + 1
if read_len >= cur_pos:
# add final segment
new_read.append(read_seq[cur_pos-1:read_len])
if not missing_data:
new_seq = "".join(new_read)
# sanity check: read should be same length as original
if len(new_seq) != read_len:
raise ValueError("Expected read len to be %d but "
"got %d.\n"
"ref_alleles: %s\n"
"alt_alleles: %s\n"
"read_pos: %s\n"
"snp_idx: %s\n"
"haps: %s\n"
% (read_len, len(new_seq),
repr(ref_alleles), repr(alt_alleles),
repr(read_pos), repr(snp_idx),
repr(haps)))
new_read_list.add("".join(new_seq))
return new_read_list
def generate_reads(read_seq, read_pos, ref_alleles, alt_alleles):
"""Generate set of reads with all possible combinations
of alleles (i.e. 2^n combinations where n is the number of snps overlapping
the reads)
"""
reads = [read_seq]
# iterate through all snp locations
for i in range(len(read_pos)):
idx = read_pos[i]-1
# for each read we've already created...
for j in range(len(reads)):
read = reads[j]
# create a new version of this read with both reference
# and alternative versions of the allele at this index
reads.append(
read[:idx] + ref_alleles[i].decode("utf-8") + read[idx+1:]
)
reads.append(
read[:idx] + alt_alleles[i].decode("utf-8") + read[idx+1:]
)
return set(reads)
def write_fastq(fastq_file, orig_read, new_seqs):
n_seq = len(new_seqs)
i = 1
for new_seq in new_seqs:
# Give each read a new name giving:
# 1 - the original name of the read
# 2 - the coordinate that it should map to
# 3 - the number of the read
# 4 - the total number of reads being remapped
name = "%s.%d.%d.%d" % (orig_read.qname, orig_read.pos+1, i, n_seq)
fastq_file.write("@%s\n%s\n+%s\n%s\n" %
(name, new_seq, name, orig_read.qual))
i += 1
def write_pair_fastq(fastq_file1, fastq_file2, orig_read1, orig_read2,
new_pairs):
n_pair = len(new_pairs)
i = 1
for pair in new_pairs:
# give each fastq record a new name giving:
# 1 - the original name of the read
# 2 - the coordinates the two ends of the pair should map to
# 3 - the number of the read
# 4 - the total number of reads being remapped
pos_str = "%d-%d" % (min(orig_read1.pos+1, orig_read2.pos+1),
max(orig_read1.pos+1, orig_read2.pos+1))
name = "%s.%s.%d.%d" % (orig_read1.qname, pos_str, i, n_pair)
fastq_file1.write("@%s\n%s\n+%s\n%s\n" %
(name, pair[0], name, orig_read1.qual))
rev_seq = util.revcomp(pair[1])
fastq_file2.write("@%s\n%s\n+%s\n%s\n" %
(name, rev_seq, name, orig_read2.qual))
i += 1
def filter_reads(files, max_seqs=MAX_SEQS_DEFAULT, max_snps=MAX_SNPS_DEFAULT,
samples=None):
cur_chrom = None
cur_tid = None
seen_chrom = set([])
snp_tab = snptable.SNPTable()
read_stats = ReadStats()
read_pair_cache = {}
cache_size = 0
read_count = 0
for read in files.input_bam:
read_count += 1
# if (read_count % 100000) == 0:
# sys.stderr.write("\nread_count: %d\n" % read_count)
# sys.stderr.write("cache_size: %d\n" % cache_size)
# TODO: need to change this to use new pysam API calls
# but need to check pysam version for backward compatibility
if read.tid == -1:
# unmapped read
read_stats.discard_unmapped += 1
continue
if (cur_tid is None) or (read.tid != cur_tid):
# this is a new chromosome
cur_chrom = files.input_bam.getrname(read.tid)
if len(read_pair_cache) != 0:
sys.stderr.write("WARNING: failed to find pairs for %d "
"reads on this chromosome\n" %
len(read_pair_cache))
read_stats.discard_missing_pair += len(read_pair_cache)
read_pair_cache = {}
cache_size = 0
read_count = 0
if cur_chrom in seen_chrom:
# sanity check that input bam file is sorted
raise ValueError("expected input BAM file to be sorted "
"but chromosome %s is repeated\n" % cur_chrom)
seen_chrom.add(cur_chrom)
cur_tid = read.tid
sys.stderr.write("starting chromosome %s\n" % cur_chrom)
# use HDF5 files if they are provided, otherwise use text
# files from SNP dir
if files.snp_tab_h5:
sys.stderr.write("reading SNPs from file '%s'\n" %
files.snp_tab_h5.filename)
snp_tab.read_h5(files.snp_tab_h5, files.snp_index_h5,
files.hap_h5, cur_chrom, samples)
else:
snp_filename = "%s/%s.snps.txt.gz" % (files.snp_dir, cur_chrom)
sys.stderr.write("reading SNPs from file '%s'\n" % snp_filename)
snp_tab.read_file(snp_filename)
sys.stderr.write("processing reads\n")
if read.is_secondary:
# this is a secondary alignment (i.e. read was aligned more than
# once and this has align score that <= best score)
read_stats.discard_secondary += 1
continue
if read.is_supplementary:
# this is a supplementary alignment (ie chimeric and not the representative alignment)
read_stats.discard_supplementary += 1
continue
if read.is_paired:
if read.mate_is_unmapped:
# other side of pair not mapped
# we could process as single... but these not likely
# useful so discard
# process_single_read(read, read_stats, files,
# snp_tab, max_seqs, max_snps)
read_stats.discard_mate_unmapped += 1
elif(read.next_reference_name == cur_chrom or
read.next_reference_name == "="):
# other pair mapped to same chrom
# sys.stderr.write("flag: %s" % read.flag)
if not read.is_proper_pair:
# sys.stderr.write(' => improper\n')
read_stats.discard_improper_pair += 1
continue
# sys.stderr.write(' => proper\n')
if read.qname in read_pair_cache:
# we already saw prev pair, retrieve from cache
read1 = read_pair_cache[read.qname]
read2 = read
del read_pair_cache[read.qname]
cache_size -= 1
if read2.next_reference_start != read1.reference_start:
sys.stderr.write("WARNING: read pair positions "
"do not match for pair %s\n" %
read.qname)
else:
process_paired_read(read1, read2, read_stats,
files, snp_tab, max_seqs,
max_snps)
else:
# we need to wait for next pair
read_pair_cache[read.qname] = read
cache_size += 1
else:
# other side of pair mapped to different
# chromosome, discard this read
read_stats.discard_different_chromosome += 1
else:
process_single_read(read, read_stats, files, snp_tab,
max_seqs, max_snps)
if len(read_pair_cache) != 0:
sys.stderr.write("WARNING: failed to find pairs for %d "
"reads on this chromosome\n" %
len(read_pair_cache))
read_stats.discard_missing_pair += len(read_pair_cache)
read_stats.write(sys.stderr)
def slice_read(read, indices):
"""slice a read by an array of indices"""
return "".join(np.array(list(read))[indices])
def group_reads_by_snps(reads, snp_read_pos):
"""
group the reads by strings containing the combinations of ref/alt alleles
among the reads at the shared_snps. return a list of sets of reads - one
for each group
"""
# group the reads by the snp string and create a list to hold the groups
return [
set(reads) for hap, reads in
groupby(
# note that groupby needs the data to be sorted by the same key func
sorted(reads, key=lambda read: slice_read(read, snp_read_pos)),
key=lambda read: slice_read(read, snp_read_pos)
)
]
def read_pair_combos(old_reads, new_reads, max_seqs, snp_idx, snp_read_pos):
"""
Collects all unique combinations of read pairs. Handles the possibility of
shared SNPs among the pairs (ie doesn't treat them as independent).
Returns False before more than max_seqs pairs are created or None
when the original read pair has discordant alleles at shared SNPs.
Input:
old_reads - a tuple of length 2, containing the pair of original reads
new_reads - a list of two sets, each containing the reads generated
from old_reads for remapping
snp_index - a list of two lists of the indices of SNPs that overlap
with old_reads
snp_read_pos - a list of two lists of the positions in old_reads where
SNPs are located
Output:
unique_pairs - a set of tuples, each representing a unique pair of
new_reads
"""
# get the indices of the shared SNPs in old_reads
for i in range(len(snp_read_pos)):
# get the indices of the SNP indices that are in both reads
idx_idxs = np.nonzero(np.in1d(snp_idx[i], snp_idx[(i+1) % 2]))[0]
# now, use the indices in idx_idxs to get the relevant snp positions
# and convert positions to indices
snp_read_pos[i] = np.array(snp_read_pos[i], dtype=int)[idx_idxs] - 1
# check: are there discordant alleles at the shared SNPs?
# if so, discard these reads
if (
slice_read(old_reads[0], snp_read_pos[0])
!= slice_read(old_reads[1], snp_read_pos[1])
):
return None
# group reads by the alleles they have at shared SNPs
for i in range(len(new_reads)):
new_reads[i] = group_reads_by_snps(
new_reads[i], snp_read_pos[i]
)
unique_pairs = set()
# calculate unique combinations of read pairs only among reads that
# have the same alleles at shared SNPs (ie if they're in the correct group)
for group in range(len(new_reads[0])):
for pair in product(new_reads[0][group], new_reads[1][group]):
if len(unique_pairs) <= max_seqs:
unique_pairs.add(pair)
else:
return False
return unique_pairs
def process_paired_read(read1, read2, read_stats, files,
snp_tab, max_seqs, max_snps):
"""Checks if either end of read pair overlaps SNPs or indels
and writes read pair (or generated read pairs) to appropriate
output files"""
new_reads = []
pair_snp_idx = []
pair_snp_read_pos = []
for read in (read1, read2):
# check if either read overlaps SNPs or indels
# check if read overlaps SNPs or indels
snp_idx, snp_read_pos, \
indel_idx, indel_read_pos = snp_tab.get_overlapping_snps(read)
if len(indel_idx) > 0:
# for now discard this read pair, we want to improve this to handle
# the indel reads appropriately
read_stats.discard_indel += 2
# TODO: add option to handle indels instead of throwing out reads
return
if len(snp_idx) > 0:
ref_alleles = snp_tab.snp_allele1[snp_idx]
alt_alleles = snp_tab.snp_allele2[snp_idx]
count_ref_alt_matches(read, read_stats, snp_tab, snp_idx,
snp_read_pos)
# limit recursion here by discarding reads that
# overlap too many SNPs
if len(snp_read_pos) > max_snps:
read_stats.discard_excess_snps += 1
return
if files.hap_h5:
# generate reads using observed set of haplotypes
read_seqs = generate_haplo_reads(read.query_sequence,
snp_idx,
snp_read_pos,
ref_alleles, alt_alleles,
snp_tab.haplotypes,
snp_tab.phase)
else:
# generate all possible allelic combinations of reads
read_seqs = generate_reads(read.query_sequence, snp_read_pos,
ref_alleles, alt_alleles)
new_reads.append(read_seqs)
pair_snp_idx.append(snp_idx)
pair_snp_read_pos.append(snp_read_pos)
else:
# no SNPs or indels overlap this read
new_reads.append(set())
pair_snp_idx.append([])
pair_snp_read_pos.append([])
if len(new_reads[0]) == 0 and len(new_reads[1]) == 0:
# neither read overlapped SNPs or indels
files.keep_bam.write(read1)
files.keep_bam.write(read2)
read_stats.keep_pair += 1
else:
# add original version of both sides of pair
new_reads[0].add(read1.query_sequence)
new_reads[1].add(read2.query_sequence)
if len(new_reads[0]) + len(new_reads[1]) > max_seqs:
# quit now before generating a lot of read pairs
read_stats.discard_excess_reads += 2
return
# get all unique combinations of read pairs
unique_pairs = read_pair_combos(
(read1.query_sequence, read2.query_sequence), new_reads,
max_seqs, pair_snp_idx, pair_snp_read_pos
)
# if unique_pairs is None or False we should discard these reads
if unique_pairs is None:
read_stats.discard_discordant_shared_snp += 1
return
elif not unique_pairs:
read_stats.discard_excess_reads += 2
return
# remove original read pair, if present
unique_pairs.discard((read1.query_sequence, read2.query_sequence))
# write read pair to fastqs for remapping
write_pair_fastq(files.fastq1, files.fastq2, read1, read2,
unique_pairs)
# Write read to 'remap' BAM for consistency with previous
# implementation of script. Probably not needed and will result in
# BAM that is not coordinate sorted. Possibly remove this...
files.remap_bam.write(read1)
files.remap_bam.write(read2)
read_stats.remap_pair += 1
def process_single_read(read, read_stats, files, snp_tab, max_seqs,
max_snps):
"""Check if a single read overlaps SNPs or indels, and writes
this read (or generated read pairs) to appropriate output files"""
# check if read overlaps SNPs or indels
snp_idx, snp_read_pos, \
indel_idx, indel_read_pos = snp_tab.get_overlapping_snps(read)
if len(indel_idx) > 0:
# for now discard this read, we want to improve this to handle
# the indel reads appropriately
read_stats.discard_indel += 1
# TODO: add option to handle indels instead of throwing out reads
return
if len(snp_idx) > 0:
ref_alleles = snp_tab.snp_allele1[snp_idx]
alt_alleles = snp_tab.snp_allele2[snp_idx]
count_ref_alt_matches(read, read_stats, snp_tab, snp_idx,
snp_read_pos)
# limit recursion here by discarding reads that
# overlap too many SNPs
if len(snp_read_pos) > max_snps:
read_stats.discard_excess_snps += 1
return
if files.hap_h5:
read_seqs = generate_haplo_reads(read.query_sequence, snp_idx,
snp_read_pos,
ref_alleles, alt_alleles,
snp_tab.haplotypes,
snp_tab.phase)
else:
read_seqs = generate_reads(read.query_sequence, snp_read_pos,
ref_alleles, alt_alleles)
# we don't want the read that matches the original
read_seqs.discard(read.query_sequence)
if len(read_seqs) == 0:
# only read generated matches original read,
# so keep original
files.keep_bam.write(read)
read_stats.keep_single += 1
elif len(read_seqs) < max_seqs:
# write read to fastq file for remapping
write_fastq(files.fastq_single, read, read_seqs)
# write read to 'to remap' BAM
# this is probably not necessary with new implmentation
# but kept for consistency with previous version of script
files.remap_bam.write(read)
read_stats.remap_single += 1
else:
# discard read
read_stats.discard_excess_reads += 1
return
else:
# no SNPs overlap read, write to keep file
files.keep_bam.write(read)
read_stats.keep_single += 1
def parse_samples(samples_str):
"""Gets list of samples from --samples argument. This may be
a comma-delimited string or a path to a file. If a file is provided
then the first column of the file is assumed to be the sample name"""
if samples_str is None:
return None
# first check if this is a path to a file
if os.path.exists(samples_str) and not os.path.isdir(samples_str):
samples = []
if util.is_gzipped(samples_str):
f = gzip.open(samples_str, "rt")
else:
f = open(samples_str, "rt")
for line in f:
# assume first token in line is sample name
samples.append(line.split()[0])
sys.stderr.write("read %d sample names from file '%s'\n" %
(len(samples), samples_str))
f.close()
else:
# otherwise assume comma-delimited string
if ("/" in samples_str or "\\" in samples_str):
sys.stderr.write("WARNING: --samples argument (%s) "
"does not look like list of sample names "
"(contains '/' or '\\') but is not path to "
"valid file. Assuming it is list of sample "
"names anyway." % samples_str)
samples = samples_str.split(",")
sys.stderr.write("SAMPLES: %s\n"% repr(samples))
return samples
def main(bam_filenames, is_paired_end=False,
is_sorted=False, max_seqs=MAX_SEQS_DEFAULT,
max_snps=MAX_SNPS_DEFAULT, output_dir=None,
snp_dir=None, snp_tab_filename=None,
snp_index_filename=None,
haplotype_filename=None, samples=None):
files = DataFiles(bam_filenames, is_sorted, is_paired_end,
output_dir=output_dir,
snp_dir=snp_dir,
snp_tab_filename=snp_tab_filename,
snp_index_filename=snp_index_filename,
haplotype_filename=haplotype_filename)
filter_reads(files, max_seqs=max_seqs, max_snps=max_snps,
samples=samples)
files.close()
if __name__ == '__main__':
sys.stderr.write("command line: %s\n" % " ".join(sys.argv))
sys.stderr.write("python version: %s\n" % sys.version)
sys.stderr.write("pysam version: %s\n" % pysam.__version__)
sys.stderr.write("pytables version: %s\n" % tables.__version__)
util.check_pysam_version()
util.check_pytables_version()
util.check_python_version()
options = parse_options()
samples = parse_samples(options.samples)
main(options.bam_filename,
is_paired_end=options.is_paired_end, is_sorted=options.is_sorted,
max_seqs=options.max_seqs, max_snps=options.max_snps,
output_dir=options.output_dir,
snp_dir=options.snp_dir,
snp_tab_filename=options.snp_tab,
snp_index_filename=options.snp_index,
haplotype_filename=options.haplotype,
samples=samples)
|
bmvdgeijn/WASP
|
mapping/find_intersecting_snps.py
|
Python
|
apache-2.0
| 42,682
|
[
"pysam"
] |
566d8bad54d9f0a85bbadc21c1edbe27026061500f9512f341657aced8ec7edb
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import json
import logging
from collections import OrderedDict
from time import time
from lxml import html
from lxml import etree
from werkzeug import urls
from odoo.tools import pycompat
from odoo import api, models, tools
from odoo.tools.safe_eval import assert_valid_codeobj, _BUILTINS, _SAFE_OPCODES
from odoo.http import request
from odoo.modules.module import get_resource_path
from .qweb import QWeb, Contextifier
from .assetsbundle import AssetsBundle
_logger = logging.getLogger(__name__)
class IrQWeb(models.AbstractModel, QWeb):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
@api.model
def render(self, id_or_xml_id, values=None, **options):
""" render(id_or_xml_id, values, **options)
Render the template specified by the given name.
:param id_or_xml_id: name or etree (see get_template)
:param dict values: template values to be used for rendering
:param options: used to compile the template (the dict available for the rendering is frozen)
* ``load`` (function) overrides the load method
* ``profile`` (float) profile the rendering (use astor lib) (filter
profile line with time ms >= profile)
"""
for method in dir(self):
if method.startswith('render_'):
_logger.warning("Unused method '%s' is found in ir.qweb." % method)
context = dict(self.env.context, dev_mode='qweb' in tools.config['dev_mode'])
context.update(options)
return super(IrQWeb, self).render(id_or_xml_id, values=values, **context)
def default_values(self):
""" attributes add to the values for each computed template
"""
default = super(IrQWeb, self).default_values()
default.update(request=request, cache_assets=round(time()/180), true=True, false=False) # true and false added for backward compatibility to remove after v10
return default
# assume cache will be invalidated by third party on write to ir.ui.view
def _get_template_cache_keys(self):
""" Return the list of context keys to use for caching ``_get_template``. """
return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations', 'website_id']
# apply ormcache_context decorator unless in dev mode...
@tools.conditional(
'xml' not in tools.config['dev_mode'],
tools.ormcache('id_or_xml_id', 'tuple(options.get(k) for k in self._get_template_cache_keys())'),
)
def compile(self, id_or_xml_id, options):
return super(IrQWeb, self).compile(id_or_xml_id, options=options)
def load(self, name, options):
lang = options.get('lang', 'en_US')
env = self.env
if lang != env.context.get('lang'):
env = env(context=dict(env.context, lang=lang))
template = env['ir.ui.view'].read_template(name)
# QWeb's `read_template` will check if one of the first children of
# what we send to it has a "t-name" attribute having `name` as value
# to consider it has found it. As it'll never be the case when working
# with view ids or children view or children primary views, force it here.
def is_child_view(view_name):
view_id = self.env['ir.ui.view'].get_view_id(view_name)
view = self.env['ir.ui.view'].browse(view_id)
return view.inherit_id is not None
if isinstance(name, pycompat.integer_types) or is_child_view(name):
for node in etree.fromstring(template):
if node.get('t-name'):
node.set('t-name', str(name))
return node.getparent()
return None # trigger "template not found" in QWeb
else:
return template
# order
def _directives_eval_order(self):
directives = super(IrQWeb, self)._directives_eval_order()
directives.insert(directives.index('call'), 'lang')
directives.insert(directives.index('field'), 'call-assets')
return directives
# compile directives
def _compile_directive_lang(self, el, options):
lang = el.attrib.pop('t-lang', 'en_US')
if el.get('t-call-options'):
el.set('t-call-options', el.get('t-call-options')[0:-1] + u', "lang": %s}' % lang)
else:
el.set('t-call-options', u'{"lang": %s}' % lang)
return self._compile_node(el, options)
def _compile_directive_call_assets(self, el, options):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(el):
raise SyntaxError("t-call-assets cannot contain children nodes")
# self._get_asset(xmlid, options, css=css, js=js, debug=values.get('debug'), async=async, values=values)
return [
self._append(ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr='_get_asset',
ctx=ast.Load()
),
args=[
ast.Str(el.get('t-call-assets')),
ast.Name(id='options', ctx=ast.Load()),
],
keywords=[
ast.keyword('css', self._get_attr_bool(el.get('t-css', True))),
ast.keyword('js', self._get_attr_bool(el.get('t-js', True))),
ast.keyword('debug', ast.Call(
func=ast.Attribute(
value=ast.Name(id='values', ctx=ast.Load()),
attr='get',
ctx=ast.Load()
),
args=[ast.Str('debug')],
keywords=[], starargs=None, kwargs=None
)),
ast.keyword('async', self._get_attr_bool(el.get('async', False))),
ast.keyword('values', ast.Name(id='values', ctx=ast.Load())),
],
starargs=None, kwargs=None
))
]
# for backward compatibility to remove after v10
def _compile_widget_options(self, el, directive_type):
field_options = super(IrQWeb, self)._compile_widget_options(el, directive_type)
if ('t-%s-options' % directive_type) in el.attrib:
if tools.config['dev_mode']:
_logger.warning("Use new syntax t-options instead of t-%s-options" % directive_type)
if not field_options:
field_options = el.attrib.pop('t-%s-options' % directive_type)
if field_options and 'monetary' in field_options:
try:
options = "{'widget': 'monetary'"
for k, v in json.loads(field_options).items():
if k in ('display_currency', 'from_currency'):
options = "%s, '%s': %s" % (options, k, v)
else:
options = "%s, '%s': '%s'" % (options, k, v)
options = "%s}" % options
field_options = options
_logger.warning("Use new syntax for '%s' monetary widget t-options (python dict instead of deprecated JSON syntax)." % etree.tostring(el))
except ValueError:
pass
return field_options
# end backward
# method called by computing code
@tools.conditional(
# in non-xml-debug mode we want assets to be cached forever, and the admin can force a cache clear
# by restarting the server after updating the source code (or using the "Clear server cache" in debug tools)
'xml' not in tools.config['dev_mode'],
tools.ormcache_context('xmlid', 'options.get("lang", "en_US")', 'css', 'js', 'debug', 'async', keys=("website_id",)),
)
def _get_asset(self, xmlid, options, css=True, js=True, debug=False, async=False, values=None):
files, remains = self._get_asset_content(xmlid, options)
asset = AssetsBundle(xmlid, files, remains, env=self.env)
return asset.to_html(css=css, js=js, debug=debug, async=async, url_for=(values or {}).get('url_for', lambda url: url))
@tools.ormcache_context('xmlid', 'options.get("lang", "en_US")', keys=("website_id",))
def _get_asset_content(self, xmlid, options):
options = dict(options,
inherit_branding=False, inherit_branding_auto=False,
edit_translations=False, translatable=False,
rendering_bundle=True)
env = self.env(context=options)
# TODO: This helper can be used by any template that wants to embedd the backend.
# It is currently necessary because the ir.ui.view bundle inheritance does not
# match the module dependency graph.
def get_modules_order():
if request:
from odoo.addons.web.controllers.main import module_boot
return json.dumps(module_boot())
return '[]'
template = env['ir.qweb'].render(xmlid, {"get_modules_order": get_modules_order})
files = []
remains = []
for el in html.fragments_fromstring(template):
if isinstance(el, pycompat.string_types):
remains.append(pycompat.to_text(el))
elif isinstance(el, html.HtmlElement):
href = el.get('href', '')
src = el.get('src', '')
atype = el.get('type')
media = el.get('media')
can_aggregate = not urls.url_parse(href).netloc and not href.startswith('/web/content')
if el.tag == 'style' or (el.tag == 'link' and el.get('rel') == 'stylesheet' and can_aggregate):
if href.endswith('.sass'):
atype = 'text/sass'
elif href.endswith('.less'):
atype = 'text/less'
if atype not in ('text/less', 'text/sass'):
atype = 'text/css'
path = [segment for segment in href.split('/') if segment]
filename = get_resource_path(*path) if path else None
files.append({'atype': atype, 'url': href, 'filename': filename, 'content': el.text, 'media': media})
elif el.tag == 'script':
atype = 'text/javascript'
path = [segment for segment in src.split('/') if segment]
filename = get_resource_path(*path) if path else None
files.append({'atype': atype, 'url': src, 'filename': filename, 'content': el.text, 'media': media})
else:
remains.append(html.tostring(el, encoding='unicode'))
else:
try:
remains.append(html.tostring(el, encoding='unicode'))
except Exception:
# notYETimplementederror
raise NotImplementedError
return (files, remains)
def _get_field(self, record, field_name, expression, tagName, field_options, options, values):
field = record._fields[field_name]
field_options['tagName'] = tagName
field_options['expression'] = expression
field_options['type'] = field_options.get('widget', field.type)
inherit_branding = options.get('inherit_branding', options.get('inherit_branding_auto') and record.check_access_rights('write', False))
field_options['inherit_branding'] = inherit_branding
translate = options.get('edit_translations') and options.get('translatable') and field.translate
field_options['translate'] = translate
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.record_to_html(record, field_name, field_options)
attributes = converter.attributes(record, field_name, field_options, values)
return (attributes, content, inherit_branding or translate)
def _get_widget(self, value, expression, tagName, field_options, options, values):
field_options['type'] = field_options['widget']
field_options['tagName'] = tagName
field_options['expression'] = expression
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.value_to_html(value, field_options)
attributes = OrderedDict()
attributes['data-oe-type'] = field_options['type']
attributes['data-oe-expression'] = field_options['expression']
return (attributes, content, None)
# compile expression add safe_eval
def _compile_expr(self, expr):
""" Compiles a purported Python expression to ast, verifies that it's safe
(according to safe_eval's semantics) and alter its variable references to
access values data instead
"""
# string must be stripped otherwise whitespace before the start for
# formatting purpose are going to break parse/compile
st = ast.parse(expr.strip(), mode='eval')
assert_valid_codeobj(
_SAFE_OPCODES,
compile(st, '<>', 'eval'), # could be expr, but eval *should* be fine
expr
)
# ast.Expression().body -> expr
return Contextifier(_BUILTINS).visit(st).body
def _get_attr_bool(self, attr, default=False):
if attr:
if attr is True:
return ast.Name(id='True', ctx=ast.Load())
attr = attr.lower()
if attr in ('false', '0'):
return ast.Name(id='False', ctx=ast.Load())
elif attr in ('true', '1'):
return ast.Name(id='True', ctx=ast.Load())
return ast.Name(id=str(attr if attr is False else default), ctx=ast.Load())
|
Aravinthu/odoo
|
odoo/addons/base/ir/ir_qweb/ir_qweb.py
|
Python
|
agpl-3.0
| 14,416
|
[
"VisIt"
] |
db24df4a85d98fd3836dc85f1b2c3c57cb3fe217d952d8d143818468d06108c4
|
#!/usr/bin/env python
__author__ = 'waroquiers'
import unittest
from pymatgen.analysis.chemenv.utils.func_utils import CSMFiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import CSMInfiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import DeltaCSMRatioFunction
import numpy as np
class FuncUtilsTest(unittest.TestCase):
def test_CSMFiniteRatioFunction(self):
max_csm = 8.0
alpha = 1.0
csm_finite_ratio = CSMFiniteRatioFunction(function='power2_decreasing_exp',
options_dict={'max_csm': max_csm,
'alpha': alpha})
self.assertEqual(csm_finite_ratio.evaluate(0.0), 1.0)
self.assertEqual(csm_finite_ratio.evaluate(2.0), 0.43807544047766522)
self.assertEqual(csm_finite_ratio.evaluate(4.0), 0.15163266492815836)
self.assertEqual(csm_finite_ratio.evaluate(8.0), 0.0)
self.assertEqual(csm_finite_ratio.evaluate(9.0), 0.0)
max_csm = 8.0
alpha = 2.0
csm_finite_ratio = CSMFiniteRatioFunction(function='power2_decreasing_exp',
options_dict={'max_csm': max_csm,
'alpha': alpha})
self.assertEqual(csm_finite_ratio.evaluate(0.0), 1.0)
self.assertEqual(csm_finite_ratio.evaluate(4.0), 0.091969860292860584)
self.assertEqual(csm_finite_ratio.evaluate(8.0), 0.0)
self.assertEqual(csm_finite_ratio.evaluate(9.0), 0.0)
max_csm = 4.0
alpha = 1.0
csm_finite_ratio = CSMFiniteRatioFunction(function='power2_decreasing_exp',
options_dict={'max_csm': max_csm,
'alpha': alpha})
self.assertEqual(csm_finite_ratio.evaluate(0.0), 1.0)
self.assertEqual(csm_finite_ratio.evaluate(1.0), 0.43807544047766522)
self.assertEqual(csm_finite_ratio.evaluate(2.0), 0.15163266492815836)
self.assertEqual(csm_finite_ratio.evaluate(4.0), 0.0)
self.assertEqual(csm_finite_ratio.evaluate(4.5), 0.0)
self.assertRaises(ValueError, CSMFiniteRatioFunction,
function='powern_decreasing',
options_dict={'max_csm': max_csm,
'nn': 2})
def test_CSMInfiniteRatioFunction(self):
max_csm = 8.0
self.assertRaises(ValueError, CSMInfiniteRatioFunction,
function='power2_inverse_decreasing',
options_dict={'max_csm': max_csm,
'nn': 2})
self.assertRaises(ValueError, CSMInfiniteRatioFunction,
function='power2_tangent_decreasing',
options_dict={'max_csm': max_csm})
csm_infinite_ratio = CSMInfiniteRatioFunction(function='power2_inverse_decreasing',
options_dict={'max_csm': max_csm})
# csm_infinite_ratio = CSMInfiniteRatioFunction(function='power2_inverse_decreasing')
self.assertEqual(csm_infinite_ratio.evaluate(0.0), np.inf)
self.assertEqual(csm_infinite_ratio.evaluate(2.0), 2.25)
self.assertEqual(csm_infinite_ratio.evaluate(4.0), 0.5)
self.assertEqual(csm_infinite_ratio.evaluate(8.0), 0.0)
self.assertEqual(csm_infinite_ratio.evaluate(9.0), 0.0)
csm_infinite_ratio = CSMInfiniteRatioFunction(function='power2_inverse_power2_decreasing',
options_dict={'max_csm': max_csm})
self.assertEqual(csm_infinite_ratio.evaluate(0.0), np.inf)
self.assertEqual(csm_infinite_ratio.evaluate(2.0), 9.0)
self.assertEqual(csm_infinite_ratio.evaluate(4.0), 1.0)
self.assertEqual(csm_infinite_ratio.evaluate(8.0), 0.0)
self.assertEqual(csm_infinite_ratio.evaluate(9.0), 0.0)
max_csm = 12.0
csm_infinite_ratio = CSMInfiniteRatioFunction(function='power2_inverse_power2_decreasing',
options_dict={'max_csm': max_csm})
self.assertEqual(csm_infinite_ratio.evaluate(0.0), np.inf)
self.assertEqual(csm_infinite_ratio.evaluate(3.0), 9.0)
self.assertEqual(csm_infinite_ratio.evaluate(6.0), 1.0)
self.assertEqual(csm_infinite_ratio.evaluate(12.0), 0.0)
self.assertEqual(csm_infinite_ratio.evaluate(20.0), 0.0)
def test_DeltaCSMRatioFunction(self):
self.assertRaises(ValueError, DeltaCSMRatioFunction,
function='smoothstep',
options_dict={})
self.assertRaises(ValueError, DeltaCSMRatioFunction,
function='smootherstep',
options_dict={})
self.assertRaises(ValueError, DeltaCSMRatioFunction,
function='smootherstep',
options_dict={'delta_csm_min': 1.0})
self.assertRaises(ValueError, DeltaCSMRatioFunction,
function='smootherstep',
options_dict={'delta_csm_max': 1.0})
delta_csm_ratio_function = DeltaCSMRatioFunction(function='smootherstep',
options_dict={'delta_csm_min': 1.0, 'delta_csm_max': 4.0})
self.assertEqual(delta_csm_ratio_function.evaluate(0.0), 0.0)
self.assertEqual(delta_csm_ratio_function.evaluate(1.0), 0.0)
self.assertEqual(delta_csm_ratio_function.evaluate(2.5), 0.5)
self.assertEqual(delta_csm_ratio_function.evaluate(4.0), 1.0)
self.assertEqual(delta_csm_ratio_function.evaluate(5.0), 1.0)
delta_csm_ratio_function = DeltaCSMRatioFunction(function='smootherstep',
options_dict={'delta_csm_min': 2.0, 'delta_csm_max': 8.0})
self.assertEqual(delta_csm_ratio_function.evaluate(0.0), 0.0)
self.assertEqual(delta_csm_ratio_function.evaluate(2.0), 0.0)
self.assertEqual(delta_csm_ratio_function.evaluate(5.0), 0.5)
self.assertEqual(delta_csm_ratio_function.evaluate(8.0), 1.0)
self.assertEqual(delta_csm_ratio_function.evaluate(12.0), 1.0)
if __name__ == "__main__":
unittest.main()
|
mbkumar/pymatgen
|
pymatgen/analysis/chemenv/utils/tests/test_func_utils.py
|
Python
|
mit
| 6,455
|
[
"pymatgen"
] |
985f5ed6502fe12f4a80ac9fea117afe54e4bc8d7c677b679212b50e4a4f8651
|
# Copyright (C) 2017,2018(1H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++ -> Powered by HeSpaDDA algorithm developed by [email protected]
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**************************************************
loadbal - HeSpaDDA load balancing python functions
**************************************************
* `qbicity(box_size,rc,skin)`:
It's a function to check the system size cubicity, with a tolerance given by the rc+skin
`dLnorm` - (Lx,Ly,Lz)/Lmax
* `changeIndex(dN,ima,imi)`:
It's a function that sorts the nodeGrid, according to the index of the maximum size of the
box (ima) and its corresponding minimum (imi)
* `nodeGridSizeCheck(node_gridX,node_gridY,node_gridZ)`:
It's a function that verifies if it is worthy to take care of special DD for inhomogeneous
systems, otherwise the homogenous DD is triggered and returned as a xyz flags
nodeGridSizeCheck(node_gridX,node_gridY,node_gridZ)
* `halfDecomp(adrCenter1D,rc_skin,eh_size,halfCores1D,cellsX,ratioMS,idealGas)`:
It's a function that decomposes half of the box in one Dimension(1D) and it is assuming symmetry
in the initial simulation box. Even with irregularities in the DD it will find the half-Box-DD
* `addHsymmetry(halfNeilListX,eh_size,rc_skin,node_gridX,cellsX,ratioMS,idealGas)`:
Its a function that uses the previously decomposed half-box (from halfDecomp) and unfolds it to
match the whole Neighbors List. If it does not match the whole neighbor, due to any asymmetry in
the number of cores or the number of cells. It will be redistributed region by region by
considering the whole simulation box size.
* `adaptNeiList(neiListxin)`:
It's a function which adapts the number of cells that go into each core into the data structure of
left and right cell lists for example for 4 cores and 8 cells [3,4,5,8] to [0,3,4,5,8]
* `reDistCellsHom(node_gridX,sizeX,rc_skin)`:
It's a function which distributes the cells into nodes as if they where homogeneous. It also
applies to inhomogeneous system whenever there are less than 2 cores per direction: X, Y or Z.
* `reDistCells(halfNeilListX,cellsX,eh_size,rc_skin,node_gridX,ratioMS,idealGas)`:
It's a function which is matching proportion of cells to the cores on a dual resolution region
basis
* `redistDeltaRandomly(wholeNeiListX,deltaCells,totNodesEH=0,biased=0)`:
It's a function which distributes the remaining DELTA cells into nodes semi-randomly. By default
the biase applies to the CG-region and it assumes ther cannot be more than 3 extra cells to
redistribute, because this is the total number of regions in the simulation box `|CG|EH|CG|` (by
default the cg biased is left this could be updated in the dyn load balancing case!
* `findNodesMS(node_gridX,totCellsEH,totCellsCG,ratioMS,idealGas)`:
It's a function which normalizes the number of cells to go to the EH and CG regions and find the
ideal corresponding number of Nodes EH and CG
"""
from random import randint
__author__ = 'Dr. Horacio V Guzman'
__email__ = 'horacio.v.g at gmail dot com'
__version__ = '1.0'
__all__ = [
'qbicity', 'changeIndex', 'nodeGridSizeCheck',
'halfDecomp', 'addHsymmetry', 'adaptNeiList',
'reDistCellsHom', 'reDistCells', 'redistDeltaRandomly',
'findNodesMS'
]
# This function verifies if the simuation box is of cubic dimensions
def qbicity(box_size, rc, skin, cellDomTol=2.0):
rc_skin = rc + skin
box_aux = [box_size[0], box_size[1], box_size[2]]
indMax = box_aux.index(max(box_aux))
indMin = box_aux.index(min(box_aux))
if (box_size[indMax] - box_size[indMin]) < (cellDomTol * rc_skin):
qFlag = bool(1)
else:
qFlag = bool(0)
return qFlag
# This function sorts the nodeGrid, according to the index of the maximum size of the box (indMax) and its corresponding minimum (indMin)
def changeIndex(dN, indMax, indMin):
aux = [0, 0, 0]
ndN = [0, 0, 0]
dIndMax = dN.index(max(dN))
dIndMin = dN.index(min(dN))
ndN[indMax] = dN[dIndMax]
ndN[indMin] = dN[dIndMin]
listInd = range(3)
if indMax > indMin:
listInd.pop(indMax)
listInd.pop(indMin)
else:
listInd.pop(indMin)
listInd.pop(indMax)
aux = dN[:]
if dIndMax > dIndMin:
aux.pop(dIndMax)
aux.pop(dIndMin)
else:
aux.pop(dIndMin)
aux.pop(dIndMax)
ndN[listInd[0]] = aux[0]
return ndN
# This function verifies if it is worthy to take care of special DD for inhomogeneous systems, otherwise the homogenous DD is triggered and returned as a xyz flags
def nodeGridSizeCheck(node_gridX, node_gridY, node_gridZ):
flx = 0
fly = 0
flz = 0
if node_gridX < 3:
flx = node_gridX
else:
flx = 0
if node_gridY < 3:
fly = node_gridY
else:
fly = 0
if node_gridZ < 3:
flz = node_gridZ
else:
flz = 0
return flx, fly, flz
# This function decomposes half of the box in one Dimension(1D) and it is assuming symmetry in the initial simulation box. Even with irregularities in the box dimension and cell-size relation it will find the half-Box-DD
def halfDecomp(adrCenter1D, rc_skin, eh_size, halfCores1D, cellsX, ratioMS, sizeX, idealGas):
# this value is only in case the Ideal Gas will in reality improve any calculation or communication (i.e. Improve notoriously the sims parallelization, which is not the case yet)
pLoadIG = 1
cellSizes = []
usedCores = 0
totCellsEH = round(2. * eh_size / rc_skin - 0.5)
totCellsCG = cellsX - totCellsEH
totNodesCG, totNodesEH = findNodesMS(halfCores1D * 2, totCellsEH, totCellsCG, ratioMS, sizeX, eh_size, idealGas)
for i in xrange(halfCores1D):
if idealGas:
if i == 0:
# 2do: SuperCell stuff
cellSizes.append(round((adrCenter1D - eh_size) / rc_skin - 0.5) + pLoadIG)
usedCores = 1 # For Ideal Gas purposes only 1 core covers the low-resolution region
else:
[cellSizes.append(round((eh_size) / rc_skin / (halfCores1D - usedCores) - 0.5)) for i in xrange(usedCores, halfCores1D)] # 2do: SuperCell stuff
deltaCells = round((eh_size) / rc_skin - 0.5) - round((eh_size) / rc_skin / (halfCores1D - usedCores) - 0.5) * (halfCores1D - usedCores) # 2do: SuperCell stuff
# Both applies a benefit to the usedCores=1 in the sense that will have one cell less loaded to CG-region. But also a penalty to the usedCores=1 that will have to manage any cells non distributed before
cellSizes[usedCores] = cellSizes[usedCores] + deltaCells - pLoadIG
return cellSizes
else: # Applies to all other systems besides the Ideal Gas
if totNodesCG / 2. >= 2.:
[cellSizes.append(round((adrCenter1D - eh_size) / rc_skin / totNodesCG / 2. - 0.5)) for j in xrange(int(totNodesCG / 2.))] # 2do: SuperCell stuff
else:
# 2do: SuperCell stuff
cellSizes.append(round((adrCenter1D - eh_size) / rc_skin - 0.5))
if totNodesEH / 2. >= 2.:
[cellSizes.append(round((eh_size) / rc_skin / (totNodesEH / 2.) - 0.5)) for i in xrange(int(totNodesEH / 2.))] # 2do: SuperCell stuff
else:
# 2do: SuperCell stuff
cellSizes.append(round((eh_size) / rc_skin - 0.5))
return cellSizes
return cellSizes
# This function uses the previously decomposed half-box (from halfDecomp) and unfolds it to match the whole Neighbors List. If it does not match the whole neighbor, due to any asymmetry in the number of cores or the number of cells. It will be redistributed region by region by considering the whole simulation box size.
def addHsymmetry(halfNeilListX, eh_size, rc_skin, node_gridX, cellsX, ratioMS, sizeX, idealGas):
wholeNeilListX = []
aux = halfNeilListX[:]
# unfolds halfDecomp and attempts to match the whole neighbor list.
aux.reverse()
# here half neighbor list X turns into whole neighbor list
halfNeilListX.extend(aux)
aux2 = 0
if len(halfNeilListX) < node_gridX:
if (node_gridX - len(halfNeilListX)) == 1: # Verifies if number of cores are even
aux2 = halfNeilListX[len(halfNeilListX) - 1]
halfNeilListX.append(round(halfNeilListX[len(halfNeilListX) - 1] / 2. - 0.5))
halfNeilListX[len(halfNeilListX) - 2] = aux2 - halfNeilListX[len(halfNeilListX) - 1]
if sum(halfNeilListX) != cellsX:
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
else:
if any([v == 0 for v in halfNeilListX]): # Recently added 138, 139 and 140
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
else:
print "HeSpaDDA message: addHsymmetry all tests passed although not all cells were used"
wholeNeilListX = halfNeilListX[:]
else:
print "HeSpaDDA message: The distributed cores are not matching the available ones (++ reDistCells())"
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
# To be determined if additional reDitsCells should be called!
elif len(halfNeilListX) == node_gridX and aux2 == 0:
if sum(halfNeilListX) != cellsX:
print "HeSpaDDA message: The distributed cells are not matching the available ones"
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
else:
# Recently added 152, 153 and 154
if any([v == 0 for v in halfNeilListX]):
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
else:
print "HeSpaDDA message: addHsymmetry all tests passed although not all cells are used"
wholeNeilListX = halfNeilListX[:]
else:
print "HeSpaDDA message: The distributed cores are not matching the available ones", halfNeilListX
halfNeilListX[len(halfNeilListX) - 2] = halfNeilListX[len(halfNeilListX) - 1] + halfNeilListX[len(halfNeilListX) - 2]
halfNeilListX.pop(len(halfNeilListX) - 1)
print "HeSpaDDA message: During DD a core has been reduced"
wholeNeilListX = reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas)
return wholeNeilListX
# This function adapts the number of cells that go into each core into the data structure of left and right cell lists for example for 4 core and 8 cells [3,4,5,8] to [0,3,4,5,8]
def adaptNeiList(neiListxin):
neiListx = []
neiListx.append(0)
[neiListx.append(neiListxin[i] + neiListx[i]) for i in xrange(len(neiListxin) - 1)]
neiListx.append(neiListxin[len(neiListxin) - 1] + neiListx[len(neiListx) - 1])
print "HeSpaDDA message: Your Cells Neighbor Lists is:", neiListx
return neiListx
# This function distributes the cells into nodes as if they where homogeneous. It also applies to inhomogeneous system whenever there are less than 2 cores per direction: X, Y or Z.
def reDistCellsHom(node_gridX, sizeX, rc_skin):
wholeNeiListX = []
cellsX = round(sizeX / rc_skin - 0.5)
if node_gridX % 2 == 0 and cellsX % 2 == 0:
[wholeNeiListX.append(cellsX / node_gridX) for i in xrange(node_gridX)]
elif node_gridX % 2 != 0 and cellsX % 2 != 0:
[wholeNeiListX.append(round((cellsX) / node_gridX - 0.5)) for i in xrange(node_gridX)]
if int(cellsX - sum(wholeNeiListX)) != 0:
# passing Delta as cellsX-sum(wholeNeiListX)
wholeNeiListX = redistDeltaRandomly(wholeNeiListX, cellsX - sum(wholeNeiListX), 0)
else:
print "HeSpaDDA message: PASS appears...here, take a look at this value Px/Cx", round((cellsX) / node_gridX - 0.5)
pass
else:
if node_gridX % 2 == 0 and cellsX % 2 != 0:
[wholeNeiListX.append((cellsX - 1) / node_gridX) for i in xrange(node_gridX)]
# Punishing the last one
wholeNeiListX[node_gridX - 1] = wholeNeiListX[node_gridX - 1] + 1
elif cellsX % 2 == 0 and node_gridX % 2 != 0:
[wholeNeiListX.append(round((cellsX) / node_gridX - 0.5)) for i in xrange(node_gridX)]
wholeNeiListX = redistDeltaRandomly(wholeNeiListX, cellsX - sum(wholeNeiListX), 0)
return wholeNeiListX
# This This function is matching proportion of cells to the cores on a dual resolution region basis
def reDistCells(halfNeilListX, cellsX, eh_size, rc_skin, node_gridX, ratioMS, sizeX, idealGas):
#global preFactCen
preFactCen = 1.0
print "HeSpaDDA message: Cells redistribution will improve whenever the Cells1D are at least twice as big as Nodes1D!"
wholeNeiListX = []
totCellsEH = round(2. * eh_size / rc_skin - 0.5)
totCellsCG = cellsX - totCellsEH
totNodesCG, totNodesEH = findNodesMS(node_gridX, totCellsEH, totCellsCG, ratioMS, sizeX, eh_size, idealGas)
print "HeSpaDDA message: Cores in Both LR and HR, are:", totNodesCG, totNodesEH
if idealGas: # This represents the Ideal Gas (IG)!!! (OJO)
wholeNeiListX_EH = []
wholeNeiListX_CG = []
wholeNeiListX = []
# High Resolution region
if totNodesEH % 2 == 0 and totCellsEH % 2 == 0:
[wholeNeiListX_EH.append(totCellsEH / totNodesEH) for i in xrange(totNodesEH)]
print "HeSpaDDA message IG: HR region: P and C are EVEN, given by:"
print wholeNeiListX_EH
elif totNodesEH % 2 != 0 and totCellsEH % 2 != 0:
[wholeNeiListX_EH.append(round(totCellsEH / totNodesEH - 0.5)) for i in xrange(totNodesEH)]
if int(totCellsEH - sum(wholeNeiListX_EH)) != 0:
wholeNeiListX_EH[0:totNodesEH] = redistDeltaRandomly(wholeNeiListX_EH[0:totNodesEH], totCellsEH - sum(wholeNeiListX_EH[0:totNodesEH]), 0)
else:
print "HeSpaDDA message IG: HR region: PASS appears...here, take a look at this value Px/Cx", round(totCellsEH / totNodesEH - 0.5)
pass
else:
if totNodesEH % 2 == 0 and totCellsEH % 2 != 0:
[wholeNeiListX_EH.append((totCellsEH - 1) / totNodesEH) for i in xrange(totNodesEH)]
wholeNeiListX_EH[totNodesEH - 1] = wholeNeiListX_EH[totNodesEH - 1] + 1
print "HeSpaDDA message IG: HR region: P and noC are EVEN"
elif totCellsEH % 2 == 0 and totNodesEH % 2 != 0:
[wholeNeiListX_EH.append(round((totCellsEH) / totNodesEH - 0.5)) for i in xrange(totNodesEH)]
# passing Delta cells to be redistributed semi-randomly (after prioritizying the EH-region, additional cells should go to the CG-region).
wholeNeiListX_EH[0:totNodesEH] = redistDeltaRandomly(wholeNeiListX_EH[0:totNodesEH], totCellsEH - sum(wholeNeiListX_EH[0:totNodesEH]), 0)
print "HeSpaDDA message IG: HR region: noP and C are EVEN"
#@@@ Low Resolution region
if totNodesCG % 2 == 0 and totCellsCG % 2 == 0:
[wholeNeiListX_CG.append(totCellsCG / totNodesCG) for i in xrange(totNodesCG)]
print "HeSpaDDA message IG: LR region: P and C are EVEN, given by:"
print wholeNeiListX_CG
elif totNodesCG % 2 != 0 and totCellsCG % 2 != 0:
[wholeNeiListX_CG.append(round(totCellsCG / totNodesCG - 0.5)) for i in xrange(totNodesCG)]
if int(totCellsCG - sum(wholeNeiListX_CG)) != 0:
wholeNeiListX_CG[0:totNodesCG] = redistDeltaRandomly(wholeNeiListX_CG[0:totNodesCG], totCellsCG - sum(wholeNeiListX_CG[0:totNodesCG]), 0)
else:
print "HeSpaDDA message IG: LR region: PASS appears...here, take a look at this value Px/Cx", round(totCellsCG / totNodesCG - 0.5)
pass
else:
if totNodesCG % 2 == 0 and totCellsCG % 2 != 0:
[wholeNeiListX_CG.append((totCellsCG - 1) / totNodesCG) for i in xrange(totNodesCG)]
wholeNeiListX_CG[totNodesCG - 1] = wholeNeiListX_CG[totNodesCG - 1] + 1
print "HeSpaDDA message IG: LR region: P and noC are EVEN"
print wholeNeiListX_CG
elif totCellsCG % 2 == 0 and totNodesCG % 2 == 0:
[wholeNeiListX_CG.append(round((totCellsCG) / totNodesCG - 0.5)) for i in xrange(totNodesCG)]
# passing Delta cells to be redistributed semi-randomly (after prioritizying the EH-region, additional cells may come to the CG-region).
wholeNeiListX_CG[0:totNodesCG] = redistDeltaRandomly(wholeNeiListX_CG[0:totNodesCG], totCellsCG - sum(wholeNeiListX_CG[0:totNodesCG]), 0)
print "HeSpaDDA message IG: LR region: noP and C are EVEN"
# Index of the middle LR region begin of HR
indCG1 = int((len(wholeNeiListX_CG)) / 2)
print "HeSpaDDA message indexing: The CG first subregion index is:", indCG1
# Index of the start of the second LR region end of HR
indEH1 = indCG1 + int(totNodesEH)
# Ensembling the array of Cells Neighbors list
wholeNeiListX.extend(wholeNeiListX_CG[0:indCG1])
wholeNeiListX.extend(wholeNeiListX_EH)
wholeNeiListX.extend(wholeNeiListX_CG[indCG1:len(wholeNeiListX_CG)])
#@ not Ideal Gas
else:
if (totNodesCG + totNodesEH) > node_gridX:
# minimum number of cores(nodes=cores) for the CG region version 1.0
if totNodesCG > 2:
# Assign exceeding number of cores to LR
totNodesCG = totNodesCG + \
((totNodesCG + totNodesEH) - node_gridX)
totNodesEH = node_gridX - totNodesCG
else:
totNodesCG = 2 # At least use 2 core for the CG region
totNodesEH = node_gridX - totNodesCG
else:
print "HeSpaDDA message indexing: Nodes CG and Nodes EH are respectively,", totNodesCG, totNodesEH
if node_gridX % 2 == 0 and cellsX % 2 == 0:
wholeNeiListX_EH = [0] * (node_gridX)
wholeNeiListX_CG = [0] * (node_gridX)
wholeNeiListX = [0] * (node_gridX)
if totNodesCG % 2 == 0:
indCG1 = int(totNodesCG / 2)
indEH1 = indCG1 + int(totNodesEH)
au1 = range(int(indCG1))
# au1 contains the index of CG cells in the whole neighbor list
au1.extend(range(indEH1, indEH1 + indCG1))
# internal parameter which in case of dynamic LB, if more weight of DD goes to the center by defaults (EH -region Flag =1) and hence DD focus on any CG-region (default value decomposes cells to CG). preFactCen=ratioMS per default(OLD)
centralFlagEH = 1
# new stuff TO BE CHECKED
if centralFlagEH > 0: # H's WL as well #NHEW
for i in xrange(int(ratioMS), int(cellsX + 1)):
tempWNL = [0] * (node_gridX)
ratioMS2t = round(1. * (cellsX / (1. * pow(i, 1. / 3.))) - 0.5)
print "HeSpaDDA message indexing: the Ratio MS2Cellslot 'cells weight' in the CG region is:", ratioMS2t
for j in au1: # This loop goes over the CG-regions
tempWNL[j] = round(ratioMS2t * totCellsCG / totNodesCG - 0.5)
totCellsEHtemp = cellsX - sum(tempWNL)
if totCellsEHtemp < totNodesEH and totCellsEHtemp > 0:
print "HeSpaDDA message indexing: Error with pass Factor MS-2-Cells, no worries HeSpaDDA will find another for you!", totCellsEHtemp
else:
# i was not the cubic root... yet
preFactCen = pow(i, 1. / 3.)
# if int(totCellsEHtemp)-1==int(totNodesEH):
# totCellsCG=totCellsCG+1
print "HeSpaDDA message indexing: The rescaling preFactor for the Cells distribution is...an optimized value, like this:", preFactCen
break
break # the first value found for preFactCen takes you out of the for
# Now redistributing the cells to nodes with a proper dimension factor as a f(dimensions,ratioMS, coresEH, coresCG)
for i in au1:
numRegBox = 3. # 3. it is a region based parameter |CG|EH|CG| =3 fixed param, number of regions in the box
if cellsX > numRegBox * pow(ratioMS, 1. / 3.) and totNodesEH < cellsX - (pow(ratioMS, 1. / 3.) * totCellsCG):
wholeNeiListX_CG[i] = round(pow(ratioMS, 1. / 3.) * totCellsCG / totNodesCG - 0.5)
print "HeSpaDDA message LR: cells dist No IG if cells fit in 3 subregions..."
else:
ratioMS2 = round(pow(1. * (cellsX / (1. * preFactCen)), 1. / 3.) - 0.5)
volRatioX = (sizeX - 2. * eh_size) / sizeX # Check eh_size or 2*eh_size
wholeNeiListX_CG[i] = round(ratioMS2 * volRatioX * totCellsCG / totNodesCG - 0.5)
print "HeSpaDDA message LR: cells dist No IG if cells fit volRatio used..."
totCellsEH = cellsX - sum(wholeNeiListX_CG)
print "HeSpaDDA message LR: wholeNeiListX_CG is, ", wholeNeiListX_CG
# Now the redist in the EH-region occurs | NHEW-> We still need to check if the cells are enough for the EH cores
if totNodesEH % 2 == 0 and totCellsEH >= totNodesEH:
for i in xrange(indCG1, indEH1):
wholeNeiListX_EH[i] = round(1.0 * totCellsEH / totNodesEH - 0.5)
elif totNodesEH % 2 != 0 and totCellsEH % 2 == 0:
for i in xrange(indCG1, indEH1):
wholeNeiListX_EH[i] = round(1.0 * (totCellsEH - 1) / totNodesEH - 0.5)
# Punishing the last Node with an additional cell
wholeNeiListX[indEH1 - 1] = wholeNeiListX_EH[indEH1 - 1] + 1
# print "Whole lists are as:",wholeNeiListX_EH,wholeNeiListX_CG
for k in xrange(node_gridX):
wholeNeiListX[k] = wholeNeiListX_EH[k] + wholeNeiListX_CG[k] # Superposing both Arrays
else: # TO BE IMPROVED (not fullfilling all nodes)!!! not EVEN number of nodes!
indCG1 = int(totNodesCG / 2.0) # gives 1
indEH1 = indCG1 + int(totNodesEH) # gives 6
au2 = range(int(indCG1)) # 1
# no assuming odd totNodesCG, before (indEH1,indEH1+indCG1))
au2.extend(range(indEH1, indEH1 + indCG1 + 1))
print "HeSpaDDA message: Cells CG wrong", totCellsCG
if int(totCellsCG) % int(totNodesCG) == 0: # NHEW
for i in au2:
wholeNeiListX_CG[i] = round(1.0 * (totCellsCG) / totNodesCG - 0.5)
else:
for i in au2:
wholeNeiListX_CG[i] = round(1.0 * (totCellsCG - 1) / totNodesCG - 0.5)
# Punishing the last Node with an additional cell NHEW (got rid of -1 in the index)
wholeNeiListX_CG[indEH1 + indCG1] = wholeNeiListX_CG[indEH1 + indCG1] + 1
if totNodesEH % 2 == 0:
for i in xrange(indCG1, indEH1):
wholeNeiListX_EH[i] = round(1.0 * totCellsEH / totNodesEH - 0.5)
else:
for i in xrange(indCG1, indEH1):
wholeNeiListX_EH[i] = round(1.0 * (totCellsEH - 1) / totNodesEH - 0.5)
# Punishing the last Node with an additional cell
wholeNeiListX_EH[indEH1 - 1] = wholeNeiListX_EH[indEH1 - 1] + 1
for k in xrange(node_gridX):
wholeNeiListX[k] = wholeNeiListX_EH[k] + wholeNeiListX_CG[k]
else: # TO BE IMPROVED
if node_gridX % 2 == 0:
[wholeNeiListX.append(round((cellsX - 1) / node_gridX - 0.5)) for i in xrange(node_gridX)] # Homogeneously distributed
# Punishing the last Node with an additional cell
wholeNeiListX[node_gridX - 1] = wholeNeiListX[node_gridX - 1] + 1
elif cellsX % 2 == 0:
[wholeNeiListX.append(round((cellsX) / node_gridX - 0.5)) for i in xrange(node_gridX)]
wholeNeiListX = redistDeltaRandomly(wholeNeiListX, cellsX - sum(wholeNeiListX), totNodesEH, cellsX - sum(wholeNeiListX) - 1)
# print "My Redist WholeNeiList is TODOs !:",wholeNeiListX
return wholeNeiListX
# This function distributes the remaining DELTA cells into nodes as semi randomly. By default the biase applies to the CG-region and it assumes ther cannot be more than 3 extra cells to redistribute, because this is the total number of regions in the simulation box |CG|EH|CG| (by default the cg biased is left this could be updated in the dyn load balancing case!
def redistDeltaRandomly(wholeNeiListX, deltaCells, totNodesEH=0, biased=0):
flagBiased = 0
wholeNeiListXcopy = wholeNeiListX[:]
index = len(wholeNeiListX) - 1
indexOut = [0] * int(deltaCells)
print "HeSpaDDA message: This are the deltaCells", deltaCells
if deltaCells > 0.5:
indexOut[-1] = 3 # initialization value for the index of the nodes that will get more cells, so that the random number generator is never punishing the same node with more cells
else:
indexOut = [0]
return wholeNeiListXcopy
if totNodesEH == 0:
for p in xrange(0, int(deltaCells)):
aux2 = randint(0, index)
while aux2 == indexOut[p - 1]:
aux2 = randint(0, index)
indexOut[p] = aux2
for i in indexOut:
wholeNeiListXcopy[i] = wholeNeiListX[i] + 1
else:
for p in xrange(0, int(deltaCells)):
index = len(wholeNeiListX) - 1
if biased > 0 and biased < 3: # Left biased!
# Left CG region | * | | |
aux2 = randint(0, index - totNodesEH - 1)
nIndMin = 0
if biased > 1 or flagBiased == 1:
nIndMax = index - totNodesEH
flagBiased = 1
else:
nIndMax = index - totNodesEH - 1
biased = biased - 1
else:
# Right CG region | | | * |
aux2 = randint(totNodesEH + 1, index)
nIndMin = totNodesEH + 1
nIndMax = index
while aux2 == indexOut[p - 1]:
aux2 = randint(nIndMin, nIndMax)
indexOut[p] = aux2
for i in indexOut:
wholeNeiListXcopy[i] = wholeNeiListX[i] + 1
return wholeNeiListXcopy
# This function normalizes the number of cells to go to the EH and CG regions and find the ideal corresponding number of Nodes EH and CG
def findNodesMS(node_gridX, totCellsEH, totCellsCG, ratioMS, sizeX, eh_size, idealGas, procsWEH=1.):
fRatioEH = pow(ratioMS, 1. / 3.) * (2.0 * eh_size / (1.0 * (sizeX) + 2.0 * eh_size * (pow(ratioMS, 1. / 3.) - 1.))) # Seems to be wo Bu!
# pow(ratioMS,1./3.)*(1.0*totCellsEH/(1.0*(totCellsCG+totCellsEH)+totCellsEH*(pow(ratioMS,1./3.)-1.)))
# fRatioCG=(1./1.)*(1.0*totCellsCG/(1.0*(totCellsCG+totCellsEH)))
if idealGas:
if (node_gridX - 2.) <= totCellsEH and node_gridX > totCellsEH:
# 2do: Could be tuned for every case! SuperCell!
totNodesEH = round(totCellsEH / procsWEH - 0.5)
totNodesCG = node_gridX - totNodesEH
elif node_gridX > totCellsEH + 2:
totNodesEH = totCellsEH # 2do: Could be tuned for every case! SuperCell!
totNodesCG = node_gridX - totNodesEH
else:
totNodesEH = node_gridX - 2 # 2do: Could be tuned for every case! SuperCell!
totNodesCG = node_gridX - totNodesEH
if totNodesEH < 1 and node_gridX > 0:
print "HeSpaDDA message: You are using the minimum amount of cores!!!"
totNodesEH = 1
totNodesCG = 1
else:
print "HeSpaDDA message: Are you sure you need to use a Domain Decomposition? Verify that you are not trying to run this simulation on a single core"
else: # Applies to all other systems besides the Ideal Gas
if node_gridX <= (totCellsEH + totCellsCG):
totNodesEH = round(fRatioEH * node_gridX)
print "HeSpaDDA message: According to the theory of HV Guzman article P_{HR} is :", totNodesEH
totNodesCG = node_gridX - totNodesEH
if (totNodesEH + totNodesCG) != node_gridX:
# If there are more nodes than cells in EH=> redistribute nodes to EH and CG
if totNodesEH > (totCellsEH):
diffNodesCells = totNodesEH - totCellsEH
# Add some cores to the CG nodes
if diffNodesCells <= (totCellsCG - totNodesCG):
# more weight in terms of cores in the LR region
totNodesCG = totNodesCG + diffNodesCells
totNodesEH = totNodesEH - diffNodesCells
else:
print "HeSpaDDA message: You seem to have more Cores than Cells! Hint(H): reduce the Nr. of Cores or use cherrypickTotalProcs function!"
# If there are more nodes than cells in LR=> redistribute nodes to EH and CG
elif totNodesCG > (totCellsCG):
diffNodesCells = totNodesCG - totCellsCG
if diffNodesCells <= (totCellsEH - totNodesEH):
totNodesCG = totNodesCG - diffNodesCells
# more weight in terms of cores in the HR region
totNodesEH = totNodesEH + diffNodesCells
if totNodesEH > totCellsEH:
print "HeSpaDDA message: Reduce the number of Processors to be used or try with cherrypickTotalProcs function!"
else:
print "HeSpaDDA message: You seem to have more Cores than Cells! Hint(H): reduce the Nr. of Cores"
else: # Everything seems to be fine, now look the size of NodesCG
if totNodesCG < 2: # Verify if the CG Nodes could built at least 2 CG regions, one left and one right according to its geometry
if totNodesCG == 1:
totNodesEH = totNodesEH - 1
totNodesCG = 2
else:
totNodesEH = totNodesEH - 2
totNodesCG = 2
else:
pass
else:
print "HeSpaDDA message: You seem to have more Cores than Cells! Hint(H): reduce the Nr. of Cores"
return totNodesCG, totNodesEH
|
MrTheodor/espressopp
|
src/tools/loadbal.py
|
Python
|
gpl-3.0
| 32,052
|
[
"ESPResSo"
] |
f55a52af8e0df9af7bbdf78a76c1cdd8b569e9d18252303a81493df07e50f5c9
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import logging
from MooseDocs import common, base, tree
from MooseDocs.test import MooseDocsTestCase
from MooseDocs.extensions import core, command, floats, devel
logging.basicConfig()
class TestExample(MooseDocsTestCase):
EXTENSIONS = [core, command, floats, devel]
def testNoFloatAST(self):
ast = self.tokenize('!devel example\ntest')
self.assertToken(ast(0), 'Example', size=2)
self.assertToken(ast(0,0), 'Code', size=0, content='test')
self.assertToken(ast(0,1), 'Paragraph', size=1)
self.assertToken(ast(0,1,0), 'Word', size=0, content='test')
def testFloatAST(self):
ast = self.tokenize('!devel example id=foo caption=bar\ntest')
self.assertToken(ast(0), 'Float', size=2)
self.assertToken(ast(0,0), 'FloatCaption', size=1, prefix='Example', key='foo')
self.assertToken(ast(0,0,0), 'Word', size=0, content='bar')
self.assertToken(ast(0,1), 'Example', size=2)
self.assertToken(ast(0,1,0), 'Code', size=0, content='test')
self.assertToken(ast(0,1,1), 'Paragraph', size=1)
self.assertToken(ast(0,1,1,0), 'Word', size=0, content='test')
class TestSettings(MooseDocsTestCase):
EXTENSIONS = [core, command, floats, devel]
def testNoFloatAST(self):
ast = self.tokenize('!devel settings module=MooseDocs.extensions.devel object=SettingsCommand')
self.assertToken(ast(0), 'Table', size=2)
self.assertToken(ast(0,0), 'TableHead', size=1)
self.assertToken(ast(0,1), 'TableBody', size=7)
def testFloatAST(self):
ast = self.tokenize('!devel settings id=foo module=MooseDocs.extensions.devel object=SettingsCommand')
self.assertToken(ast(0), 'TableFloat', size=2)
self.assertToken(ast(0,1), 'Table', size=2)
self.assertToken(ast(0,1,0), 'TableHead', size=1)
self.assertToken(ast(0,1,1), 'TableBody', size=7)
def testErrors(self):
ast = self.tokenize('!devel settings object=SettingsCommand')
self.assertToken(ast(0), 'ErrorToken', message="The 'module' setting is required.")
ast = self.tokenize('!devel settings module=MooseDocs.extensions.devel')
self.assertToken(ast(0), 'ErrorToken', message="The 'object' setting is required.")
ast = self.tokenize('!devel settings module=wrong object=SettingsCommand')
self.assertToken(ast(0), 'ErrorToken', message="Unable to load the 'wrong' module.")
ast = self.tokenize('!devel settings module=MooseDocs.extensions.devel object=wrong')
self.assertToken(ast(0), 'ErrorToken', message="Unable to load the 'wrong' attribute from the 'MooseDocs.extensions.devel' module.")
class TestRenderExample(MooseDocsTestCase):
EXTENSIONS = [core, command, floats, devel]
def testHTML(self):
ast = self.tokenize('!devel example\ntest')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=2)
self.assertHTMLTag(res(0), 'pre', size=1, class_='moose-pre')
self.assertHTMLTag(res(0,0), 'code', size=1, string='test')
self.assertHTMLTag(res(1), 'p', string='test')
def testMaterialize(self):
ast = self.tokenize('!devel example\ntest')
res = self.render(ast, renderer=base.MaterializeRenderer())
self.assertHTMLTag(res(0), 'div', size=3, class_='moose-devel-example')
self.assertHTMLTag(res(0,0), 'ul', size=2, class_='tabs')
self.assertHTMLTag(res(0,0,0), 'li', size=1, class_='tab')
self.assertHTMLTag(res(0,0,0,0), 'a', string='Markdown')
self.assertHTMLTag(res(0,0,1), 'li', size=1, class_='tab')
self.assertHTMLTag(res(0,0,1,0), 'a', string='HTML')
self.assertHTMLTag(res(0,1), 'div', size=1, class_='moose-devel-example-code')
self.assertHTMLTag(res(0,1,0), 'pre', string='test')
self.assertHTMLTag(res(0,2), 'div', size=1, class_='moose-devel-example-html')
self.assertHTMLTag(res(0,2,0), 'p', string='test')
def testLatex(self):
ast = self.tokenize('!devel example\ntest')
res = self.render(ast, renderer=base.LatexRenderer())
self.assertLatex(res(0), 'Environment', 'example')
self.assertLatex(res(0,0), 'Environment', 'verbatim', string='test')
self.assertLatex(res(0,1), 'Command', 'tcblower')
self.assertLatex(res(0,2), 'Command', 'par')
self.assertLatexString(res(0,3), content='test')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/MooseDocs/test/extensions/test_devel.py
|
Python
|
lgpl-2.1
| 4,858
|
[
"MOOSE"
] |
430c5300e613b3c03936a2b4f3cd79a4bc31b855a0544de50d787b0a5156c6e8
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
import time
import pprint
import pytest
import signal
import threading
import Queue
import traceback
from urlparse import urlsplit, parse_qsl, urlunsplit, urljoin
from urllib import urlencode
from bs4 import BeautifulSoup
from testlib import web, var_dir
from testlib import CMKWebSession
class InvalidUrl(Exception):
pass
class Url(object):
def __init__(self, url, orig_url=None, referer_url=None):
self.url = url
self.orig_url = orig_url
self.referer_url = referer_url
def __hash__(self):
return hash(self.url)
# Strip host and site prefix
def neutral_url(self):
return "check_mk/" + self.url.split("/check_mk/", 1)[1]
# Strip proto and host
def url_without_host(self):
parsed = list(urlsplit(self.url))
parsed[0] = None
parsed[1] = None
return urlunsplit(parsed)
class Worker(threading.Thread):
def __init__(self, num, crawler):
super(Worker, self).__init__()
self.name = "worker-%d" % num
self.crawler = crawler
self.daemon = True
self.terminate = False
self.idle = True
self.client = CMKWebSession(self.crawler.site)
self.client.login()
self.client.set_language("en")
def run(self):
while not self.terminate:
try:
while not self.terminate:
url = self.crawler.todo.get(block=False)
self.idle = False
try:
self.visit_url(url)
except Exception, e:
self.error(url, "Failed to visit: %s\n%s" %
(e, traceback.format_exc()))
self.crawler.todo.task_done()
except Queue.Empty:
self.idle = True
time.sleep(0.5)
def stop(self):
self.terminate = True
def visit_url(self, url):
if url.url in self.crawler.visited:
print("Already visited: %s" % url.url)
return
self.crawler.visited.append(url.url)
#print("%s - Visiting #%d (todo %d): %s" %
# (self.name, len(self.crawler.visited), self.crawler.todo.qsize(), url.url))
started = time.time()
try:
#print "FETCH", url.url_without_host()
response = self.client.get(url.url_without_host())
except AssertionError, e:
if "This view can only be used in mobile mode" in "%s" % e:
print "Skipping mobile mode view checking"
return
else:
raise
duration = time.time() - started
self.update_stats(url, duration, len(response.content))
content_type = response.headers.get('content-type')
#print self.name, content_type, len(response.text)
if content_type.startswith("text/html"):
self.check_response(url, response)
elif content_type.startswith("text/plain"):
pass # no specific test
elif content_type.startswith("text/csv"):
pass # no specific test
elif content_type in [ "image/png", "image/gif" ]:
pass # no specific test
elif content_type in [ "application/pdf" ]:
pass # no specific test
elif content_type in [ "application/x-rpm", "application/x-deb", "application/x-debian-package",
"application/x-gzip", "application/x-msdos-program", "application/x-msi",
"application/x-tgz", "application/x-redhat-package-manager",
"application/x-pkg",
"text/x-chdr", "text/x-c++src", "text/x-sh", ]:
pass # no specific test
else:
self.error(url, "Unknown content type: %s" % (content_type))
return
def update_stats(self, url, duration, content_size):
stats = self.crawler.stats.setdefault(url.neutral_url(), {
"first_duration" : duration,
"first_content_size" : content_size,
})
avg_duration = (duration + stats.get("avg_duration", duration)) / 2.0
avg_content_size = (content_size + stats.get("avg_content_size", content_size)) / 2.0
stats.update({
"orig_url" : url.orig_url,
"referer_url" : url.referer_url,
"num_visited" : stats.get("num_visited", 0) + 1,
"last_duration" : duration,
"last_content_size" : content_size,
"avg_duration" : avg_duration,
"avg_content_size" : avg_content_size,
})
def error(self, url, s):
s = "[%s - found on %s] %s" % (url.url, url.referer_url, s)
self.crawler.error(s)
def check_response(self, url, response):
soup = BeautifulSoup(response.text, "lxml")
# The referenced resources (images, stylesheets, javascript files) are checked by
# the generic web client handler. This only needs to reaslize the crawling.
self.check_content(url, response, soup)
self.check_links(url, soup)
self.check_frames(url, soup)
self.check_iframes(url, soup)
def check_content(self, url, response, soup):
ignore_texts = [
"This view can only be used in mobile mode.",
]
for element in soup.select("div.error"):
inner_html = "%s" % element
skip = False
for ignore_text in ignore_texts:
if ignore_text in inner_html:
skip = True
break
if not skip:
self.error(url, "Found error: %s" % (element))
def check_frames(self, url, soup):
self.check_referenced(url, soup, "frame", "src")
def check_iframes(self, url, soup):
self.check_referenced(url, soup, "iframe", "src")
def check_links(self, url, soup):
self.check_referenced(url, soup, "a", "href")
def check_referenced(self, referer_url, soup, tag, attr):
elements = soup.find_all(tag)
for element in elements:
orig_url = element.get(attr)
url = self.normalize_url(self.crawler.site.internal_url, orig_url)
if url is None:
continue
try:
self.verify_is_valid_url(url)
except InvalidUrl, e:
#print self.name, "skip invalid", url, e
self.crawler.skipped.add(url)
continue
# Ensure that this url has not been crawled yet
crawl_it = False
with self.crawler.handled_lock:
if url not in self.crawler.handled:
crawl_it = True
self.crawler.handled.add(url)
if crawl_it:
#file("/tmp/todo", "a").write("%s (%s)\n" % (url, referer_url.url))
self.crawler.todo.put(Url(url, orig_url=orig_url, referer_url=referer_url.url))
def verify_is_valid_url(self, url):
parsed = urlsplit(url)
if parsed.scheme != "http":
raise InvalidUrl("invalid scheme: %r" % (parsed,))
# skip external urls
if url.startswith("http://") and not url.startswith(self.crawler.site.internal_url):
raise InvalidUrl("Skipping external URL: %s" % url)
# skip non check_mk urls
if not parsed.path.startswith("/%s/check_mk" % self.crawler.site.id) \
or "../pnp4nagios/" in parsed.path \
or "../nagvis/" in parsed.path \
or "../nagios/" in parsed.path:
raise InvalidUrl("Skipping non Check_MK URL: %s %s" % (url, parsed))
# skip current url with link to index
if "index.py?start_url=" in url:
raise InvalidUrl("Skipping link to index with current URL: %s" % url)
if "logout.py" in url:
raise InvalidUrl("Skipping logout URL: %s" % url)
if "_transid=" in url:
raise InvalidUrl("Skipping action URL: %s" % url)
if "selection=" in url:
raise InvalidUrl("Skipping selection URL: %s" % url)
# Don't follow filled in filter form views
if "view.py" in url and "filled_in=filter" in url:
raise InvalidUrl("Skipping filled in filter URL: %s" % url)
# Don't follow the view editor
if "edit_view.py" in url:
raise InvalidUrl("Skipping view editor URL: %s" % url)
# Skip agent download files
if parsed.path.startswith("/%s/check_mk/agents/" % self.crawler.site.id):
raise InvalidUrl("Skipping agent download file: %s" % url)
def normalize_url(self, base_url, url):
url = urljoin(base_url, url.rstrip("#"))
parsed = list(urlsplit(url))
parsed[3] = urlencode(sorted(parse_qsl(parsed[3], keep_blank_values=True)))
return urlunsplit(parsed)
class SetQueue(Queue.Queue):
def _init(self, maxsize):
self.queue = set()
def _put(self, item):
self.queue.add(item)
def _get(self):
return self.queue.pop()
class TestCrawler(object):
@pytest.mark.type("gui_crawl")
def test_crawl(self, site):
self.stats = {}
self.todo = SetQueue()
self.started = time.time()
self.visited = []
self.skipped = set()
# Contains all already seen and somehow handled URLs. Something like the
# summary of self.todo and self.handled but todo contains Url() objects.
self.handled = set()
self.handled_lock = threading.Lock()
self.errors = []
self.site = site
self.num_workers = 10
self.load_stats()
self.todo.put(Url(site.internal_url))
self.handled.add(site.internal_url)
self.crawl()
self.report()
def stats_file(self):
return var_dir() + "/crawl.stats"
def report_file(self):
return var_dir() + "/crawl.report"
def web_log_file(self):
return var_dir() + "/crawl-web.log"
def apache_error_log_file(self):
return var_dir() + "/crawl-apache_error_log.log"
def load_stats(self):
try:
self.stats = eval(file(self.stats_file()).read())
except IOError, e:
if e.errno == 2:
pass # Not existing files are OK
else:
raise
def save_stats(self):
if not os.path.exists(var_dir()):
os.makedirs(var_dir())
file(self.stats_file()+".tmp", "w").write(pprint.pformat(self.stats) + "\n")
os.rename(self.stats_file()+".tmp", self.stats_file())
def update_total_stats(self, finished):
stats = self.stats.setdefault("_TOTAL_", {})
stats["last_num_visited"] = len(self.visited)
stats["last_duration"] = time.time() - self.started
stats["last_errors"] = self.errors
stats["last_finished"] = finished
if finished:
if stats.get("last_finished_num_visited", 0) > 0:
perc = float(stats["last_num_visited"]) * 100 / stats["last_finished_num_visited"]
if perc < 80.0:
self.error("Finished and walked %d URLs, previous run walked %d URLs. That "
"is %0.2f %% of the previous run. Something seems to be wrong."
% (stats["last_num_visited"], stats["last_finished_num_visited"],
perc))
stats["last_finished_num_visited"] = stats["last_num_visited"]
stats["last_finished_duration"] = stats["last_duration"]
stats["last_finished_errors"] = stats["last_errors"]
def report(self):
with file(self.report_file()+".tmp", "w") as f:
f.write("Skipped URLs:\n")
for skipped_url in sorted(self.skipped):
f.write(" %s\n" % skipped_url)
f.write("\n")
f.write("Visited URLs:\n")
for visited_url in self.visited:
f.write(" %s\n" % visited_url)
f.write("\n")
if self.errors:
f.write("Crawled %d URLs in %d seconds. Failures:\n%s\n" %
(len(self.visited), time.time() - self.started, "\n".join(self.errors)))
# Copy the previous file for analysis
#if os.path.exists(self.report_file()):
# open(self.report_file()+".old", "w").write(open(self.report_file()).read())
os.rename(self.report_file()+".tmp", self.report_file())
if self.errors:
for site_path, test_path in [
("var/log/web.log", self.web_log_file()),
("var/log/apache/error_log", self.apache_error_log_file()),
]:
if self.site.file_exists(site_path):
open(test_path+".tmp", "w").write(self.site.read_file(site_path))
os.rename(test_path+".tmp", test_path)
pytest.fail("Crawled %d URLs in %d seconds. Failures:\n%s" %
(len(self.visited), time.time() - self.started, "\n".join(self.errors)))
def error(self, msg):
print(msg)
self.errors.append(msg)
def crawl(self):
finished = False
workers = []
try:
for i in range(self.num_workers):
t = Worker(i, self)
t.start()
workers.append(t)
start = time.time()
last_tick, last_num_visited = time.time(), 0
while True:
now = time.time()
duration = max(now - start, 1)
num_visited = len(self.visited)
num_idle = len([ w for w in workers if w.idle ])
rate_runtime = num_visited / duration
if now > last_tick and num_visited > last_num_visited:
rate_tick = (num_visited - last_num_visited) / (now - last_tick)
else:
rate_tick = 0
last_tick = now
last_num_visited = num_visited
print("Workers: %d (Idle: %d), Rate: %0.2f/s (1sec: %0.2f/s), Duration: %d sec, "
"Visited: %s, Todo: %d" %
(self.num_workers, num_idle, rate_runtime, rate_tick,
duration, num_visited, self.todo.qsize()))
if self.todo.qsize() == 0 and all([ w.idle for w in workers ]):
break
else:
time.sleep(1)
finished = True
except KeyboardInterrupt:
for t in workers:
t.stop()
print "Waiting for workers to finish..."
finally:
self.update_total_stats(finished)
self.save_stats()
|
huiyiqun/check_mk
|
tests/integration/web/test_crawl.py
|
Python
|
gpl-2.0
| 15,021
|
[
"VisIt"
] |
88ef05ab00919d801d3b996b19e9a2a8f028c5e4faf8f3764b4e9945d9cb200c
|
# -*- coding: utf-8 -*-
from collections import OrderedDict, Counter
import csv
from datetime import datetime, timedelta
import hashlib
import json
import os
import platform
import plistlib
import re
import shutil
import sqlite3
from subprocess import call
import sys
import time
import urllib
import uuid
import webbrowser
from flask import Flask, render_template, request, flash, url_for, redirect, Response, send_from_directory
import pyesedb
import vss
from werkzeug.utils import secure_filename
class BrowsingHistory(object):
"""A class to load, modify and export a *copy* of the web browsing history.
Supported browsers:
- Google Chrome
- Firefox
- Safari
- Internet Explorer (>= 10) / Edge
"""
def __init__(self):
self.os = platform.system()
self.os_release = platform.release()
self.os_full = ' '.join([self.os, self.os_release])
self._upload_dir = self._root_path('uploads')
self._file_path = os.path.split(os.path.realpath(__file__))[0]
self._browser_name = None
self._browser_specs = None
self._db = None
self._min_date = self._min_date(num_days=60)
self.date_range = None
self.num_domains = None
self.ready = self._state()
self._domains = None
self._keywords = None
self._entries = None
def _handle_platform(self):
"""Helper function to handle platform name.
:return str: Platform name
"""
if self.os in ['Linux', 'Darwin']:
return self.os
elif self.os == 'Windows':
pat = re.compile(r'Windows\s\d{1,2}')
match = re.findall(pat, self.os_full)
if match:
return match[0]
def _build_path(self, _os):
"""Helper function to build and check the path to the browsing history.
:param _os: Operation system
:return str or boolean: path or False
"""
if (self._browser_name == 'IE11' and self.os in ['Linux', 'Darwin']) or (self._browser_name == 'Safari' and self.os in ['Linux', 'Windows']):
return False
else:
user = os.getlogin()
for p in self._browser_specs['path'][_os]:
if self._browser_name == 'Firefox':
# checking for profile name
pat = re.compile(r'\w+.default([\w\-\_\.]+)?')
if os.path.isdir(p.format(user)):
for item in os.listdir(p.format(user)):
if re.findall(pat, item):
profile_name = item
path = os.path.join(p.format(user), profile_name, self._browser_specs['file_name'][0])
if os.path.isfile(path):
return path
else:
continue
else:
if os.path.isdir(p.format(user)):
for f in self._browser_specs['file_name']:
path = os.path.join(p.format(user), f)
if os.path.isfile(path):
return path
else:
continue
return False
def _create_db(self):
"""Creates an empty temporary sqlite database in the 'tmp' directory.
:return: Database path or None
"""
try:
conn = sqlite3.connect('tmp/browsing_history.db')
cur = conn.cursor()
with open(self._root_path('data/schema.sql'), 'r') as sql_script:
queries = sql_script.read()
cur.executescript(queries)
conn.commit()
except BaseException:
return None
finally:
cur.close()
conn.close()
return 'tmp/browsing_history.db'
def _unique_id_generator(self):
"""Helper function to generate unique identifiers.
:return: integer value
"""
unique_id = 1
while True:
yield unique_id
unique_id += 1
def _extract_history_plist(self, pl):
"""Extracts Safari browsing history (History.plist) file.
:param pl: File path (string)
:return: Two lists of tuples
"""
visits = []
urls = []
g = self._unique_id_generator()
with open(pl, 'rb') as f:
d = plistlib.load(f)
for item in d['WebHistoryDates']:
date = self._convert_timestamp(float(item['lastVisitedDate']), browser_name=self._browser_name, tz='utc')
# Filter by url and minimum date
if self._is_url(item['']) and date >= self._min_date:
last_visit_date = date
visit_date = last_visit_date
url = item['']
title = item['title']
visit_count = item['visitCount']
if 'redirectURLs' in item.keys():
redirect_urls = ' '.join(item['redirectURLs'])
else:
redirect_urls = None
url_id = next(g)
_id = url_id
urls.append((_id, last_visit_date, redirect_urls, title, url, visit_count))
visits.append((url_id, visit_date))
else:
continue
return urls, visits
def _copy_webcachev01_dat(self, file_path):
'''
Creates a shadow copy of WebCacheVxx.dat and copies it into the upload folder.
:param file_path: The file path of WebCacheVxx.dat
:return: Boolean value
'''
# Adapted from sblosser's example code:
# https://github.com/sblosser/pyshadowcopy
# on 2017-07-31
# Create a set that contains the LOCAL disks you want to shadow
drv = file_path[0]
local_drives = set()
local_drives.add(drv)
# Initialize the Shadow Copies
try:
sc = vss.ShadowCopy(local_drives)
# An open and locked file we want to read
locked_file = file_path
shadow_path = sc.shadow_path(locked_file)
try:
shutil.copy(shadow_path, self._root_path('uploads'))
except BaseException as e:
print(e)
sc.delete()
return False
finally:
sc.delete()
return True
except BaseException:
return False
def _extract_webcachev01_dat(self, file_path):
"""Extracts data from WebCacheVxx.dat.
:param str file_path: The file path of WebCacheVxx.dat
:return lists urls, visits: Two lists of tuples
"""
# Adapted from Jon Glass' blog:
# http://jon.glass/blog/attempts-to-parse-webcachev01-dat/
# on 2017-07-31
if self._copy_webcachev01_dat(file_path):
file_name = os.path.split(file_path)[1]
elif 'WebCacheV01.dat' in os.listdir(self._upload_dir):
file_name = 'WebCacheV01.dat'
elif 'WebCacheV24.dat' in os.listdir(self._upload_dir):
file_path = 'WebCacheV24.dat'
else:
return False
visits = []
urls = {}
pat = re.compile(r'(?<=@)http[\w:\_\-/.]+')
esedb_file = pyesedb.file()
try:
with open(os.path.join(self._root_path('uploads'), file_name), "rb") as f:
esedb_file.open_file_object(f)
containers_table = esedb_file.get_table_by_name("Containers")
g = self._unique_id_generator()
for i in range(0, containers_table.get_number_of_records()):
if containers_table.get_record(i).get_value_data_as_string(8) == 'History':
container_id = containers_table.get_record(i).get_value_data_as_integer(0)
history_table = esedb_file.get_table_by_name("Container_" + str(container_id))
for j in range(0, history_table.get_number_of_records()):
if history_table.get_record(j).is_long_value(17):
url = history_table.get_record(j).get_value_data_as_long_value(17).get_data_as_string()
else:
url = history_table.get_record(j).get_value_data_as_string(17)
date = self._convert_timestamp(history_table.get_record(j).get_value_data_as_integer(13), browser_name=self._browser_name, tz='utc')
# Filter by url and minimum date
if re.findall(pat, url) and date >= self._min_date:
url = re.findall(pat, url)[0]
if url not in urls.keys():
unique_id = next(g)
urls[url] = {}
urls[url]['unique_id'] = unique_id
urls[url]['access_count'] = history_table.get_record(j).get_value_data_as_integer(8)
urls[url]['redirect_urls'] = history_table.get_record(j).get_value_data_as_string(22)
entry_id = history_table.get_record(j).get_value_data_as_integer(0)
accessed_time = date
unique_entry_id = int(str(container_id) + str(unique_id))
visits.append((accessed_time, unique_entry_id, urls[url]['unique_id']))
else:
access_count = history_table.get_record(j).get_value_data_as_integer(8)
if access_count > 0:
urls[url]['access_count'] += access_count
else:
continue
esedb_file.close()
urls = [(value['access_count'], value['redirect_urls'], value['unique_id'], key) for key, value in
urls.items()]
return urls, visits
except PermissionError:
return False
def _import_data(self, file_path=None):
"""Imports data from a file into the database.
:param file_path: The file path of the browsing history database file (e.g., sqlite database file or a plist property list file).
:return: boolean value
"""
if file_path:
file_path = file_path
else:
file_path = self._build_path(self._handle_platform())
if file_path:
db_tables = tuple(self._browser_specs['tables'].keys())
translate = self._load_json(self._root_path('data'), 'table_names')
conn = False
if self._browser_name == 'Safari':
file_name = os.path.split(file_path)[1]
if file_name == 'History.db':
safari8_tables = self._browser_specs['tables_s8']
db_tables = tuple(safari8_tables.keys())
if os.path.split(file_path)[0] != self._upload_dir:
try:
shutil.copy(file_path, self._root_path('uploads'))
except shutil.Error as e:
print(e)
return False
file_path = os.path.join(self._root_path('uploads'), file_name)
try:
conn = sqlite3.connect(file_path)
except sqlite3.OperationalError as e:
print(e)
return False
else:
urls, visits = self._extract_history_plist(file_path)
elif self._browser_name == 'IE11':
try:
urls, visits = self._extract_webcachev01_dat(file_path)
except TypeError:
return False
elif self._browser_name == 'Chrome':
if os.path.split(file_path)[0] != self._upload_dir:
try:
shutil.copy(file_path, self._root_path('uploads'))
except shutil.Error as e:
print(e)
return False
file_path = os.path.join(self._root_path('uploads'), self._browser_specs['file_name'][0])
try:
conn = sqlite3.connect(file_path)
except sqlite3.OperationalError as e:
print(e)
return False
elif self._browser_name == 'Firefox':
try:
conn = sqlite3.connect(file_path)
except sqlite3.OperationalError as e:
print(e)
return False
new_db = sqlite3.connect(self._db)
new_db_cur = new_db.cursor()
for table in db_tables:
if conn and self._browser_name == 'Safari':
od = OrderedDict(sorted(safari8_tables[table].items(), key=lambda t: t[0]))
else:
od = OrderedDict(sorted(self._browser_specs['tables'][table].items(), key=lambda t: t[0]))
if conn:
conn.create_function('REGEXP', 2, self._regexp)
c = conn.cursor()
if translate[table] == 'visits':
if self._browser_name == 'Chrome':
q = "SELECT {0} FROM visits WHERE ((visits.visit_time/1000000)-11644473600) >= {1};".format(', '.join(od.keys()), self._min_date)
elif self._browser_name == 'Firefox':
q = "SELECT {0} FROM moz_historyvisits WHERE (visit_date/1000000) >= {1};".format(', '.join(od.keys()), self._min_date)
elif self._browser_name == 'Safari':
q = "SELECT {0} FROM history_visits WHERE (history_visits.visit_time + 978307200) >= {1};".format(', '.join(od.keys()), self._min_date)
else:
raise ValueError("Browser name {0} doesn't match.".format(self._browser_name))
else:
if self._browser_name == 'Chrome':
q = "SELECT {0} FROM urls, visits WHERE urls.id = visits.url AND ((visits.visit_time/1000000)-11644473600) >= {1} AND NOT REGEXP('^file:', urls.url);".format(', '.join(od.keys()), self._min_date)
elif self._browser_name == 'Firefox':
q = "SELECT {0} FROM moz_places, moz_historyvisits WHERE moz_places.id = moz_historyvisits.place_id AND (moz_historyvisits.visit_date/1000000) >= {1} AND NOT REGEXP('^file:///', moz_places.url);".format(', '.join(od.keys()), self._min_date)
elif self._browser_name == 'Safari':
q = "SELECT {0} FROM history_items, history_visits WHERE history_items.id = history_visits.history_item AND (history_visits.visit_time + 978307200) >= {1} AND NOT REGEXP('^file:', history_items.url);".format(', '.join(od.keys()), self._min_date)
else:
raise ValueError("Browser name {0} doesn't match.".format(self._browser_name))
rq = c.execute(q)
r = rq.fetchall()
else:
if translate[table] == 'visits':
r = visits
else:
r = urls
# Insert data into new database
try:
if conn and self._browser_name == 'Safari':
placeholders = ', '.join(['?' for x in range(len(safari8_tables[table].values()))])
else:
placeholders = ', '.join(['?' for x in range(len(self._browser_specs['tables'][table].values()))])
query = 'INSERT OR IGNORE INTO {0} ({1}) VALUES ({2});'.format(translate[table], ', '.join(od.values()),
placeholders)
new_db_cur.executemany(query, r)
new_db.commit()
except sqlite3.OperationalError as e:
print('sqlite3.OperationalError: ', e)
return False
if conn:
c.close()
conn.close()
new_db_cur.close()
new_db.close()
return True
else:
return False
def _regexp(self, p, s):
pat = re.compile(p)
if re.match(pat, s):
return True
else:
return False
def _load_json(self, path, name):
"""Helper function to load the json browser spec files.
:param path: Path name
:param name: File name (without file extension)
:return: json object
"""
with open('{0}.json'.format(os.path.join(path, name)), 'r') as file:
return json.load(file)
def _save_json(self, data, path, name):
"""Helper function to write json object to a json file.
:param data: json object
:param path: path name
:param name: file name (without file extension)
:return: nothing
"""
with open('{0}.json'.format(os.path.join(path, name)), 'w') as file:
json.dump(data, fp=file)
def load(self, file_path=None, browser_name=None, min_date=None):
"""Loads the browsing history.
:param str file_path: The file path of the browsing history
:param str browser_name: The browser name
:param min_date: The start date of the import
:return: boolean value
"""
self._db = self._create_db()
if browser_name == None:
self._browser_name = self._load_json('tmp', 'browser_name')['browser_name']
else:
self._browser_name = browser_name
self._save_json({'browser_name': self._browser_name}, 'tmp', 'browser_name')
self._browser_specs = self._load_json(self._root_path('data'), self._browser_name)
if min_date:
self._min_date = min_date
if self._db:
if file_path:
status = self._import_data(file_path=file_path)
else:
status = self._import_data()
if status:
self.date_range = self._date_range()
self.num_domains = len(self.visits(date=False, n=None, ascending=False, plot=False))
self.ready = True
return True
else:
return False
def _state(self):
"""Helper function to keep track of the current state of the temporary database.
:return: boolean value
"""
db = 'tmp/browsing_history.db'
if os.path.isfile(db):
try:
self._db = db
self._browser_name = self._load_json('tmp', 'browser_name')['browser_name']
self._browser_specs = self._load_json(self._root_path('data'), self._browser_name)
self.date_range = self._date_range()
self.num_domains = len(self.visits(date=False, n=None, ascending=False, plot=False))
return True
except TypeError:
return False
else:
return False
def _query(self, q):
"""Helper function to query the sqlite database.
:param str q: Sqlite query
:return: List of tuples
"""
if self._db:
with sqlite3.connect(self._db) as conn: # connection to db
c = conn.cursor()
c.execute(q)
return c.fetchall()
else:
return []
def _update_db(self, x, kind='domains'):
"""Update function for the sqlite database.
:param list x: URL ids
:param str kind: What kind of data should be updated. domains (default), keywords, urls
:return: nothing
"""
#self.ready = False
try:
conn = sqlite3.connect(self._db)
c = conn.cursor()
if isinstance(x, str):
if kind == 'keywords':
pat = re.compile(r'(?:\?q=|\?p=|\?query=|search?q=|\?q\d=|\&q\d=|\?k=|\?text=|\&q=|key=|\?search=|\&search=|\&searchTerm=|\?searchTerm=)([a-zA-Z0-9äöüïéàèáÜÄÖ\%\+\-\*\s]+)', re.IGNORECASE)
_ids = self._keywords[x]['ids']
else:
_ids = self._domains[x]['ids']
elif isinstance(x, list) and kind == 'urls':
_ids = x
else:
raise ValueError('Input type unsupported: expects string or list')
for i in _ids:
entry = c.execute("SELECT url, rev_host FROM urls WHERE id = ?;", (i,)).fetchall()
url = self._is_url(entry[0][0], r=True)
if url:
hashed_url = self._hash_domain(url)
unique_id = '{0}-{1}-{2}'.format('anonymisiert', hashed_url, i)
if kind == 'keywords':
new_entry = re.sub(pat, unique_id, entry[0][0])
c.execute('UPDATE urls SET url = ?, title = ? WHERE id = ?;', (new_entry, '***', i))
conn.commit()
elif kind == 'urls':
domain = '{0}/{1}'.format(self._stem_url(entry[0][0]), '***')
c.execute('UPDATE urls SET url = ?, title = ?, redirect_urls = ? WHERE id = ?;', (domain, '***', '***', i))
conn.commit()
elif kind == 'domains':
c.execute('UPDATE urls SET url = ?, title = ?, rev_host = ?, redirect_urls = ? WHERE id = ?;', (unique_id, '***', '***', '***', i))
conn.commit()
else:
raise ValueError('{0} is not a valid kind.'.format(kind))
else:
continue
except sqlite3.OperationalError as e:
print(e)
finally:
c.close()
conn.close()
def _date_conv(self, date):
"""Helper function to convert the date(s).
:param date: string or list in %Y-%m-%d format
:return: start (int), end (int), date (string)
"""
if isinstance(date, list):
date_str = 'between {0} and {1}'.format(date[0], date[1])
t = int(time.mktime(datetime.strptime(date[0], "%Y-%m-%d").timetuple()) * 1000000)
tk = int(time.mktime(datetime.strptime(date[1], "%Y-%m-%d").timetuple()) * 1000000)
elif isinstance(date, str):
date_str = 'on {0}'.format(date)
t = int(time.mktime(datetime.strptime(date, "%Y-%m-%d").timetuple()))
tk = datetime.strptime(date, "%Y-%m-%d") + timedelta(days=1)
tk = int(time.mktime(tk.timetuple()) * 1000000)
return t, tk, date_str
def visits(self, date=False, n=25, ascending=False, plot=False):
"""Function to load all URLs from the database for a certain date or date range.
:param str date: A date (e.g., '2016-10-15') or a date range as list (e.g., ['2016-10-15','2016-10-25'])
:param int n: the number of websites that should be plotted, default = top 25; for all websites set n = None
:param boolean ascending: order
:param plot: boolean value
:return: OrderedDict
"""
if date:
t, tk, date_str = self._date_conv(date)
else:
date_str = 'between {0} and {1}'.format(self.date_range[0], self.date_range[1])
if date:
visits = self._query(
"SELECT url, visit_count, urls.id FROM urls, visits WHERE urls.id = visits.url_id AND visit_date >= {0} AND visit_date < {1};".format(
t, tk))
else:
visits = self._query("SELECT url, visit_count, urls.id FROM urls, visits WHERE urls.id = visits.url_id;")
d = {}
unique_id = set()
for visit in visits:
domain = self._stem_url(visit[0])
count = visit[1]
if domain not in d.keys():
d[domain] = 0
if visit[2] not in unique_id:
unique_id.add(visit[2])
d[domain] += count
total_n = sum(d.values())
if n == None:
n = total_n
if ascending == False:
title = 'Top {0} visited websites {1} (n={2})'.format(n, date_str, total_n)
od = OrderedDict(sorted(d.items(), key=lambda t: t[1])[-n:])
else:
title = 'Least {0} visited websites {1} (n={2})'.format(n, date_str, total_n)
od = OrderedDict(sorted(d.items(), key=lambda t: t[1])[:n])
source = {'x': list(od.keys()), 'y': list(od.values()),
'perc': [round((v / total_n) * 100, 2) for v in list(od.values())]}
if plot == True:
self._vbarplot(source, title)
else:
return od
def entries(self, sort_by='date', q=None):
"""Function to load all entries from the database.
:param str sort_by: Order. Domains or frequency
:param str q: Search term
:param stem_urls: Boolean value. Whether to return domains or urls
:return: OrderedDict
"""
d = {}
if q == None:
visits = self._query("SELECT urls.id, visit_date, url, visit_count FROM visits, urls WHERE visits.url_id = urls.id;")
else:
visits = self._query("SELECT urls.id, visit_date, url, visit_count FROM visits, urls WHERE visits.url_id = urls.id AND url LIKE '%{0}%';".format(q))
# Filtering URLs only
visits = [(e[0], self._get_date(e[1]), e[2], e[3], e[1]) for e in visits]
# Sorting
if sort_by == 'domains' or sort_by == None:
visits = sorted(visits, key=lambda t: t[2])
elif sort_by == 'frequency':
visits = sorted(visits, key=lambda t: t[3], reverse=True)
elif sort_by == 'date' or sort_by == None:
visits = sorted(visits, key=lambda t: t[4], reverse=True)
self._entries = visits
return visits
def select_domains(self, sort_by='domains', q=None, stem_urls=True):
"""Function to load all URLs from the database.
:param str sort_by: Order. Domains or frequency
:param str q: Search term
:param boolean stem_urls: Whether to return domains or urls
:return: OrderedDict
"""
d = {}
if q == None:
visits = self._query("SELECT id, url, visit_count FROM urls;")
else:
visits = self._query("SELECT id, url, visit_count FROM urls WHERE url LIKE '%{0}%';".format(q))
for visit in visits:
if stem_urls:
domain = self._stem_url(visit[1])
else:
domain = visit[1]
count = visit[2]
if domain in d.keys():
d[domain]['ids'].append(visit[0])
else:
d[domain] = {'ids': [], 'count': 0}
d[domain]['ids'].append(visit[0])
d[domain]['count'] += count
if sort_by == 'domains' or sort_by == None:
od = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
elif sort_by == 'frequency':
od = OrderedDict(sorted(d.items(), key=lambda t: t[1]['count'], reverse=True))
self._domains = od
return od
def search_terms(self, sort_by='keywords', q=None):
"""Extracts search terms from urls in the database.
:param str sort_by: specifies how the OrderedDict should be sorted. Default is keywords.
:param str q: optional argument for a specific search term
:return: OrderedDict
"""
d = {}
pat = re.compile(r'(?:\?q=|\?p=|\?query=|search?q=|\?q\d=|\&q\d=|\?k=|\?text=|\&q=|key=|\?search=|\&search=|\&searchTerm=|\?searchTerm=)([a-zA-Z0-9äöüïéàèáÜÄÖ\%\+\-\*\s\.\,]+)', re.IGNORECASE)
if q:
entries = self._query("SELECT id, url FROM urls WHERE url LIKE '%{0}%';".format(q))
else:
entries = self._query('SELECT id, url FROM urls;')
for entry in entries:
domain = self._stem_url(entry[1])
matches = re.findall(pat, entry[1])
if matches:
for match in matches:
term = urllib.parse.unquote_plus(match)
if term not in d.keys():
d[term] = {'ids': [], 'count': 1, 'urls': [domain], 'match': match}
d[term]['ids'].append(entry[0])
else:
d[term]['ids'].append(entry[0])
d[term]['count'] += 1
if domain not in d[term]['urls']:
d[term]['urls'].append(domain)
if sort_by == 'keywords' or sort_by == None:
od = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
elif sort_by == 'frequency':
od = OrderedDict(sorted(d.items(), key=lambda t: t[1]['count'], reverse=True))
self._keywords = od
return od
def export(self):
"""Writes the browsing history to a CSV file.
:return: Boolean value
"""
data = self._query(
"SELECT url_id, visits.id, url, title, rev_host, visit_count, typed, last_visit_date, redirect_urls, referrer, visit_date, visit_type FROM visits, urls WHERE visits.url_id = urls.id;")
if data:
data = [t + (self._browser_name, self.os_full) for t in data]
header = ['url_id', 'visits_id', 'url', 'title', 'rev_host', 'visit_count', 'typed', 'last_visit_date',
'redirect_urls', 'referrer', 'visit_date', 'visit_type', 'browser', 'operation system']
with open(os.path.join(self._file_path,'tmp', 'Export_Browserverlauf.csv'), 'w', encoding='utf-8') as f:
writer = csv.writer(f, delimiter=';', lineterminator='\n')
writer.writerow(header)
writer.writerows(data)
return True
else:
return False
def _date_range(self):
"""Helper function.
:return: Minimum and maximum date (timestamps)
"""
min_date, max_date = self._query("SELECT min(visit_date), max(visit_date) FROM visits;")[0]
if min_date and max_date:
min_date = self._get_date(min_date)
max_date = self._get_date(max_date)
return (min_date, max_date)
else:
return (' ', ' ')
def _hash_domain(self, domain):
"""Helper function to hash the domain.
:param domain: Domain (string)
:return: Hashed domain
"""
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + domain.encode()).hexdigest() + '-' + salt
def _get_date(self, timestamp):
"""Helper function to convert timestamps into date strings.
:param timestamp: Timestamp
:return: Date string (e.g., '13.05.2014 08:34:45')
"""
date = datetime.fromtimestamp(timestamp)
return date.strftime('%d.%m.%Y %H:%M:%S')
def _convert_timestamp(self, timestamp, browser_name=None, tz='utc'):
"""Helper function to convert different timestamps formats into date strings or POSIX timestamp.
:param timestamp: Timestamp
:return: POSIX timestamp (UTC)
"""
if browser_name == 'Chrome':
date = datetime(1601, 1, 1) + timedelta(microseconds=timestamp)
elif browser_name == 'IE11':
date = datetime(1601, 1, 1) + timedelta(microseconds=timestamp * 0.1)
elif browser_name == 'Safari':
date = datetime(2001, 1, 1) + timedelta(seconds=timestamp)
elif browser_name == 'Firefox':
date = datetime.fromtimestamp(timestamp / 1000000)
else:
date = datetime.fromtimestamp(timestamp)
return date.timestamp()
def _get_dto(self, timestamp):
"""Helper function to convert a timestamp to a datetime object
:param timestamp: Timestamp
:return: Datetime object
"""
return datetime.fromtimestamp(timestamp / 1000000)
def _min_date(self, num_days):
"""Helper function to determine the minimum date
:param int num_days: Number days to go back in time
:return: timestamp (UTC)
"""
today = datetime.today()
days = timedelta(num_days)
min_date = today - days
return min_date.timestamp()
def _stem_url(self, url):
"""Helper function to stem URLs.
:param str url: URL
:return str: Domain
"""
anonym_pattern = re.compile('anonymisiert-[\w]+\-[\w]+')
stemmed_url = self._is_url(url, r=True)
if stemmed_url:
if stemmed_url[:4] == 'www.':
return stemmed_url[4:]
else:
return stemmed_url
else:
# checking for domain made anonymous
if re.findall(anonym_pattern, url):
return re.findall(anonym_pattern, url)[0]
else:
# check if url is already stemmed
if url[:-5:-1] == '***/':
return url[:-4]
else:
return url
def _is_url(self, url, r=False):
"""Helper function to check if a string is an URL.
:param url: URL (string)
:param r: Whether the URL should be return or not
:return: URL (string) or boolean value
"""
url_pattern = re.compile('(?<=\:\/\/)[a-z0-9\.\-\:]+')
match = re.findall(url_pattern, url)
if match:
if r:
return match[0]
else:
return True
else:
return False
def _root_path(self, relative_path):
"""Helper function for path handling after bundling with pyinstaller.
:param str: relative path
"""
# Adapted from max' StackOverflow answer:
# https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile/13790741#13790741
# on 2017-07-31
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
def root_path(relative_path):
"""Helper function for path handling after app bundling
:param str: relative path
"""
# Adapted from StackOverflow answer: https://stackoverflow.com/questions/7674790/bundling-data-files-with-pyinstaller-onefile/13790741#13790741; 2017-07-31
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath('.')
return os.path.join(base_path, relative_path)
if not os.path.isdir(root_path('uploads')):
os.mkdir(root_path('uploads'))
ALLOWED_EXTENSIONS = set(['sqlite', 'dat', 'plist', 'History', 'db'])
FILE_PATH = os.path.split(os.path.realpath(__file__))[0]
bh = BrowsingHistory()
app = Flask(__name__, root_path=root_path('.'))
app.secret_key = '8927-bdjbj20AWER$_'
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
# Taken from Flask doc example: http://flask.pocoo.org/docs/0.12/
if '.' in filename:
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
else:
return filename in ALLOWED_EXTENSIONS
def _os_name(_os_full):
pat = re.compile(r'(?<=Darwin\s)\d{1,2}')
match = re.findall(pat, _os_full)
if match:
if match[0] == '10':
return 'Mac OS X Snow Leopard'
elif match[0] == '11':
return 'Mac OS X Lion'
elif match[0] == '12':
return 'OS X Mountain Lion'
elif match[0] == '13':
return 'OS X Mavericks'
elif match[0] == '14':
return 'OS X Yosemite'
elif match[0] == '15':
return 'OS X El Capitan'
elif match[0] == '16':
return 'macOS Sierra'
elif match[0] == '17':
return 'macOS High Sierra'
else:
return _os_full
@app.route('/exit')
def shutdown_server():
# Taken from Flask doc example: http://flask.pocoo.org/docs/0.12/
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Das Programm wurde beendet. Sie können das Fenster schliessen.'
@app.route('/', methods=['GET', 'POST'])
def index():
# Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/
os_name = _os_name(bh.os_full)
if request.method == 'GET':
if request.args.get('load'):
return render_template('index.html', os=os_name)
elif request.args.get('notFound'):
flash('Der Browserverlauf wurde nicht gefunden. Bitte wählen Sie die Datei manuell aus.')
not_found = True
return render_template('index.html', os=os_name)
elif request.args.get('fileError'):
flash('Die Datei konnte nicht gelesen werden.')
not_found = True
return render_template('index.html', os=os_name)
else:
if bh.ready:
return redirect(url_for('dashboard'))
else:
return render_template('index.html', os=os_name)
elif request.method == 'POST':
browser_name = request.form.get('browser_name')
if 'file' in request.files:
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(url_for('index', notFound=True, os=os_name))
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(url_for('index', notFound=True, os=os_name))
if file and allowed_file(file.filename):
if len(os.listdir(root_path('uploads'))) >= 1:
for f in os.listdir(root_path('uploads')):
os.remove(os.path.join(root_path('uploads'), f))
filename = secure_filename(file.filename)
file.save(os.path.join(root_path('uploads'), filename))
state = bh.load(file_path=os.path.join(root_path('uploads'), filename), browser_name=browser_name)
else:
return redirect(url_for('index', fileError=True, os=os_name))
else:
state = bh.load(browser_name=browser_name)
if state:
return redirect(url_for('dashboard'))
else:
return redirect(url_for('index', notFound=True, client=browser_name, os=os_name))
@app.route('/load')
def load():
return redirect(url_for('index', load=True))
@app.route('/dashboard')
def dashboard():
if bh.ready == False:
return redirect(url_for('index'))
else:
date_range = bh.date_range
num_domains = bh.num_domains
browser_name = bh._browser_name
top_10 = bh.visits(date = False, n = 10, ascending = False, plot = False)
top_10 = OrderedDict(sorted(top_10.items(), key=lambda t: t[1], reverse=True))
return render_template('dashboard.html', date_range=date_range, num_domains=num_domains, browser_name=browser_name, top_10=top_10)
@app.route('/select', methods=['GET', 'POST'])
def select():
if request.method == 'POST':
selection = request.form.getlist('check')
for domain in selection:
bh._update_db(domain, kind='domains')
domains = bh.select_domains()
elif request.method == 'GET':
if request.args.get('sort') or request.args.get('q'):
domains = bh.select_domains(sort_by=request.args.get('sort'), q=request.args.get('q'))
else:
domains = bh.select_domains()
return render_template('select_domains.html', domains=domains)
@app.route('/search-terms', methods=['POST', 'GET'])
def search_terms():
if request.method == 'POST':
selection = request.form.getlist('check')
for search_term in selection:
bh._update_db(search_term, kind='keywords')
search_terms = bh.search_terms()
elif request.method == 'GET':
if request.args.get('sort') or request.args.get('q'):
search_terms = bh.search_terms(sort_by=request.args.get('sort'), q=request.args.get('q'))
else:
search_terms = bh.search_terms()
return render_template('search_terms.html', search_terms=search_terms)
@app.route('/export')
def export():
# Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/
if bh.export():
return send_from_directory(os.path.join(FILE_PATH, 'tmp'), 'Export_Browserverlauf.csv', as_attachment=True)
else:
flash('Export nicht möglich. Bitte laden Sie zuerst einen Browserverlauf.')
return render_template('index.html', os=' '.join([bh.os, bh.os_release]))
@app.route('/log')
def get_log():
# Adapted from Flask doc example: http://flask.pocoo.org/docs/0.12/
if 'server.log' in os.listdir(os.path.join(FILE_PATH, 'tmp')):
return send_from_directory(os.path.join(FILE_PATH, 'tmp'), 'server.log', as_attachment=True)
else:
flash('Es ist kein Log-File gefunden worden.')
return render_template('index.html', os=' '.join([bh.os, bh.os_release]))
@app.route('/faqs')
def faqs():
return render_template('faqs.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/entries', methods=['POST', 'GET'])
def list_entries():
if request.method == 'GET':
if request.args.get('sort') or request.args.get('q'):
urls = bh.entries(sort_by=request.args.get('sort'), q=request.args.get('q'))
else:
urls = bh.entries()
elif request.method == 'POST':
selection = request.form.getlist('check')
bh._update_db(selection, kind='urls')
urls = bh.entries()
return render_template('entries.html', domains=urls)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(405)
def page_not_found(e):
return render_template('405.html'), 405
@app.errorhandler(500)
def page_not_found(e):
return render_template('500.html'), 500
if __name__ == '__main__':
print('STATUS: BrowsingHistoryEditor wird gestartet ...')
if not app.debug:
import logging
from logging import FileHandler
file_handler = FileHandler(os.path.join(FILE_PATH, 'tmp', 'server.log'))
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
logging.basicConfig(filename=os.path.join(FILE_PATH, 'tmp', 'server.log'), level=logging.DEBUG)
webbrowser.open('http://localhost:5000', new=2)
print('STATUS: BrowsingHistoryEditor läuft auf http://localhost:5000 (Drücken Sie CTRL+C, um das Programm zu beenden)')
app.run(host='localhost', port=5000, debug=False)
|
grwllrnc/BrowsingHistoryEditor
|
main.py
|
Python
|
mit
| 44,796
|
[
"VisIt"
] |
688800071c8bb99be8bbeb30931cea2f2d2f71e4a069f6b525a64d73c699f507
|
import os, sys, re, inspect, types, errno, pprint, subprocess, io, shutil, time, copy
import path_tool
path_tool.activate_module('FactorySystem')
path_tool.activate_module('argparse')
from ParseGetPot import ParseGetPot
from socket import gethostname
#from options import *
from util import *
from RunParallel import RunParallel
from CSVDiffer import CSVDiffer
from XMLDiffer import XMLDiffer
from Tester import Tester
from PetscJacobianTester import PetscJacobianTester
from InputParameters import InputParameters
from Factory import Factory
from Parser import Parser
from Warehouse import Warehouse
import argparse
from optparse import OptionParser, OptionGroup, Values
from timeit import default_timer as clock
class TestHarness:
@staticmethod
def buildAndRun(argv, app_name, moose_dir):
if '--store-timing' in argv:
harness = TestTimer(argv, app_name, moose_dir)
else:
harness = TestHarness(argv, app_name, moose_dir)
harness.findAndRunTests()
sys.exit(harness.error_code)
def __init__(self, argv, app_name, moose_dir):
self.factory = Factory()
# Build a Warehouse to hold the MooseObjects
self.warehouse = Warehouse()
# Get dependant applications and load dynamic tester plugins
# If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
sys.path.append(os.path.join(moose_dir, 'framework', 'scripts')) # For find_dep_apps.py
# Use the find_dep_apps script to get the dependant applications for an app
import find_dep_apps
depend_app_dirs = find_dep_apps.findDepApps(app_name)
dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])
# Finally load the plugins!
self.factory.loadPlugins(dirs, 'testers', Tester)
self.test_table = []
self.num_passed = 0
self.num_failed = 0
self.num_skipped = 0
self.num_pending = 0
self.host_name = gethostname()
self.moose_dir = moose_dir
self.base_dir = os.getcwd()
self.run_tests_dir = os.path.abspath('.')
self.code = '2d2d6769726c2d6d6f6465'
self.error_code = 0x0
# Assume libmesh is a peer directory to MOOSE if not defined
if os.environ.has_key("LIBMESH_DIR"):
self.libmesh_dir = os.environ['LIBMESH_DIR']
else:
self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
self.file = None
# Parse arguments
self.parseCLArgs(argv)
self.checks = {}
self.checks['platform'] = getPlatforms()
# The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
# to select whether they want to probe for libMesh configuration options.
if self.options.skip_config_checks:
self.checks['compiler'] = set(['ALL'])
self.checks['petsc_version'] = 'N/A'
self.checks['library_mode'] = set(['ALL'])
self.checks['mesh_mode'] = set(['ALL'])
self.checks['dtk'] = set(['ALL'])
self.checks['unique_ids'] = set(['ALL'])
self.checks['vtk'] = set(['ALL'])
self.checks['tecplot'] = set(['ALL'])
self.checks['dof_id_bytes'] = set(['ALL'])
self.checks['petsc_debug'] = set(['ALL'])
self.checks['curl'] = set(['ALL'])
self.checks['tbb'] = set(['ALL'])
self.checks['superlu'] = set(['ALL'])
self.checks['unique_id'] = set(['ALL'])
self.checks['cxx11'] = set(['ALL'])
self.checks['asio'] = set(['ALL'])
else:
self.checks['compiler'] = getCompilers(self.libmesh_dir)
self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
self.checks['tecplot'] = getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
self.checks['curl'] = getLibMeshConfigOption(self.libmesh_dir, 'curl')
self.checks['tbb'] = getLibMeshConfigOption(self.libmesh_dir, 'tbb')
self.checks['superlu'] = getLibMeshConfigOption(self.libmesh_dir, 'superlu')
self.checks['unique_id'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
self.checks['cxx11'] = getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
self.checks['asio'] = getIfAsioExists(self.moose_dir)
# Override the MESH_MODE option if using '--parallel-mesh' option
if self.options.parallel_mesh == True or \
(self.options.cli_args != None and \
self.options.cli_args.find('--parallel-mesh') != -1):
option_set = set(['ALL', 'PARALLEL'])
self.checks['mesh_mode'] = option_set
method = set(['ALL', self.options.method.upper()])
self.checks['method'] = method
self.initialize(argv, app_name)
"""
Recursively walks the current tree looking for tests to run
Error codes:
0x0 - Success
0x0* - Parser error
0x1* - TestHarness error
"""
def findAndRunTests(self, find_only=False):
self.error_code = 0x0
self.preRun()
self.start_time = clock()
try:
# PBS STUFF
if self.options.pbs:
# Check to see if we are using the PBS Emulator.
# Its expensive, so it must remain outside of the os.walk for loop.
self.options.PBSEmulator = self.checkPBSEmulator()
if self.options.pbs and os.path.exists(self.options.pbs):
self.options.processingPBS = True
self.processPBSResults()
else:
self.options.processingPBS = False
self.base_dir = os.getcwd()
for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
# Prune submdule paths when searching for tests
if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
dirnames[:] = []
# walk into directories that aren't contrib directories
if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
for file in filenames:
# set cluster_handle to be None initially (happens for each test)
self.options.cluster_handle = None
# See if there were other arguments (test names) passed on the command line
if file == self.options.input_file_name: #and self.test_match.search(file):
saved_cwd = os.getcwd()
sys.path.append(os.path.abspath(dirpath))
os.chdir(dirpath)
if self.prunePath(file):
continue
# Build a Parser to parse the objects
parser = Parser(self.factory, self.warehouse)
# Parse it
self.error_code = self.error_code | parser.parse(file)
# Retrieve the tests from the warehouse
testers = self.warehouse.getActiveObjects()
# Augment the Testers with additional information directly from the TestHarness
for tester in testers:
self.augmentParameters(file, tester)
# Short circuit this loop if we've only been asked to parse Testers
# Note: The warehouse will accumulate all testers in this mode
if find_only:
self.warehouse.markAllObjectsInactive()
continue
# Clear out the testers, we won't need them to stick around in the warehouse
self.warehouse.clear()
if self.options.enable_recover:
testers = self.appendRecoverableTests(testers)
# Handle PBS tests.cluster file
if self.options.pbs:
(tester, command) = self.createClusterLauncher(dirpath, testers)
if command is not None:
self.runner.run(tester, command)
else:
# Go through the Testers and run them
for tester in testers:
# Double the alloted time for tests when running with the valgrind option
tester.setValgrindMode(self.options.valgrind_mode)
# When running in valgrind mode, we end up with a ton of output for each failed
# test. Therefore, we limit the number of fails...
if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif self.num_failed > self.options.max_fails:
(should_run, reason) = (False, 'Max Fails Exceeded')
elif tester.parameters().isValid('error_code'):
(should_run, reason) = (False, 'skipped (Parser Error)')
else:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
command = tester.getCommand(self.options)
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
if reason != '':
if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
os.chdir(saved_cwd)
sys.path.pop()
except KeyboardInterrupt:
print '\nExiting due to keyboard interrupt...'
sys.exit(0)
self.runner.join()
# Wait for all tests to finish
if self.options.pbs and self.options.processingPBS == False:
print '\n< checking batch status >\n'
self.options.processingPBS = True
self.processPBSResults()
self.cleanup()
if self.num_failed:
self.error_code = self.error_code | 0x10
return
def createClusterLauncher(self, dirpath, testers):
self.options.test_serial_number = 0
command = None
tester = None
# Create the tests.cluster input file
# Loop through each tester and create a job
for tester in testers:
(should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
if should_run:
if self.options.cluster_handle == None:
self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
self.options.cluster_handle.write('[Jobs]\n')
# This returns the command to run as well as builds the parameters of the test
# The resulting command once this loop has completed is sufficient to launch
# all previous jobs
command = tester.getCommand(self.options)
self.options.cluster_handle.write('[]\n')
self.options.test_serial_number += 1
else: # This job is skipped - notify the runner
if (reason != ''):
self.handleTestResult(tester.parameters(), '', reason)
self.runner.jobSkipped(tester.parameters()['test_name'])
# Close the tests.cluster file
if self.options.cluster_handle is not None:
self.options.cluster_handle.close()
self.options.cluster_handle = None
# Return the final tester/command (sufficient to run all tests)
return (tester, command)
def prunePath(self, filename):
test_dir = os.path.abspath(os.path.dirname(filename))
# Filter tests that we want to run
# Under the new format, we will filter based on directory not filename since it is fixed
prune = True
if len(self.tests) == 0:
prune = False # No filter
else:
for item in self.tests:
if test_dir.find(item) > -1:
prune = False
# Return the inverse of will_run to indicate that this path should be pruned
return prune
def augmentParameters(self, filename, tester):
params = tester.parameters()
# We are going to do some formatting of the path that is printed
# Case 1. If the test directory (normally matches the input_file_name) comes first,
# we will simply remove it from the path
# Case 2. If the test directory is somewhere in the middle then we should preserve
# the leading part of the path
test_dir = os.path.abspath(os.path.dirname(filename))
relative_path = test_dir.replace(self.run_tests_dir, '')
relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
relative_path = re.sub('^[/:]*', '', relative_path) # Trim slashes and colons
formatted_name = relative_path + '.' + tester.name()
params['test_name'] = formatted_name
params['test_dir'] = test_dir
params['relative_path'] = relative_path
params['executable'] = self.executable
params['hostname'] = self.host_name
params['moose_dir'] = self.moose_dir
params['base_dir'] = self.base_dir
if params.isValid('prereq'):
if type(params['prereq']) != list:
print "Option 'prereq' needs to be of type list in " + params['test_name']
sys.exit(1)
params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]
# This method splits a lists of tests into two pieces each, the first piece will run the test for
# approx. half the number of timesteps and will write out a restart file. The second test will
# then complete the run using the MOOSE recover option.
def appendRecoverableTests(self, testers):
new_tests = []
for part1 in testers:
if part1.parameters()['recover'] == True:
# Clone the test specs
part2 = copy.deepcopy(part1)
# Part 1:
part1_params = part1.parameters()
part1_params['test_name'] += '_part1'
part1_params['cli_args'].append('--half-transient :Outputs/checkpoint=true')
part1_params['skip_checks'] = True
# Part 2:
part2_params = part2.parameters()
part2_params['prereq'].append(part1.parameters()['test_name'])
part2_params['delete_output_before_running'] = False
part2_params['cli_args'].append('--recover')
part2_params.addParam('caveats', ['recover'], "")
new_tests.append(part2)
testers.extend(new_tests)
return testers
## Finish the test by inspecting the raw output
def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
caveats = []
test = tester.specs # Need to refactor
if test.isValid('caveats'):
caveats = test['caveats']
if self.options.pbs and self.options.processingPBS == False:
(reason, output) = self.buildPBSBatch(output, tester)
elif self.options.dry_run:
reason = 'DRY_RUN'
output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
else:
(reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)
if self.options.scaling and test['scale_refine']:
caveats.append('scaled')
did_pass = True
if reason == '':
# It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
if self.options.extra_info:
checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
for check in checks:
if not 'ALL' in test[check]:
caveats.append(', '.join(test[check]))
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] OK'
elif self.options.pbs and self.options.processingPBS == False:
result = 'LAUNCHED'
else:
result = 'OK'
elif reason == 'DRY_RUN':
result = 'DRY_RUN'
else:
result = 'FAILED (%s)' % reason
did_pass = False
if self.options.pbs and self.options.processingPBS == False and did_pass == True:
# Handle the launch result, but do not add it to the results table (except if we learned that QSUB failed to launch for some reason)
self.handleTestResult(tester.specs, output, result, start, end, False)
return did_pass
else:
self.handleTestResult(tester.specs, output, result, start, end)
return did_pass
def getTiming(self, output):
time = ''
m = re.search(r"Active time=(\S+)", output)
if m != None:
return m.group(1)
def getSolveTime(self, output):
time = ''
m = re.search(r"solve().*", output)
if m != None:
return m.group().split()[5]
def checkExpectError(self, output, expect_error):
if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
#print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
return False
else:
return True
# PBS Defs
def checkPBSEmulator(self):
try:
qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_output = qstat_process.communicate()
except OSError:
# qstat binary is not available
print 'qstat not available. Perhaps you need to load the PBS module?'
sys.exit(1)
if len(qstat_output[1]):
# The PBS Emulator has no --version argument, and thus returns output to stderr
return True
else:
return False
def processPBSResults(self):
# If batch file exists, check the contents for pending tests.
if os.path.exists(self.options.pbs):
# Build a list of launched jobs
batch_file = open(self.options.pbs)
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
# Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
for job in batch_list:
file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
# Build a Warehouse to hold the MooseObjects
warehouse = Warehouse()
# Build a Parser to parse the objects
parser = Parser(self.factory, warehouse)
# Parse it
parser.parse(file)
# Retrieve the tests from the warehouse
testers = warehouse.getAllObjects()
for tester in testers:
self.augmentParameters(file, tester)
for tester in testers:
# Build the requested Tester object
if job[1] == tester.parameters()['test_name']:
# Create Test Type
# test = self.factory.create(tester.parameters()['type'], tester)
# Get job status via qstat
qstat = ['qstat', '-f', '-x', str(job[0])]
qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
if qstat_stdout != None:
output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
else:
return ('QSTAT NOT FOUND', '')
# Report the current status of JOB_ID
if output_value == 'F':
# F = Finished. Get the exit code reported by qstat
exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))
# Read the stdout file
if os.path.exists(job[2]):
output_file = open(job[2], 'r')
# Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
outfile = output_file.read()
output_file.close()
self.testOutputAndFinish(tester, exit_code, outfile)
else:
# I ran into this scenario when the cluster went down, but launched/completed my job :)
self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)
elif output_value == 'R':
# Job is currently running
self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
elif output_value == 'E':
# Job is exiting
self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
elif output_value == 'Q':
# Job is currently queued
self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
else:
return ('BATCH FILE NOT FOUND', '')
def buildPBSBatch(self, output, tester):
# Create/Update the batch file
if 'command not found' in output:
return ('QSUB NOT FOUND', '')
else:
# Get the Job information from the ClusterLauncher
results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
if len(results) != 0:
file_name = self.options.pbs
job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
for result in results:
(test_dir, job_id, test_name) = result
qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
qstat_stdout = qstat_command.communicate()[0]
# Get the Output_Path from qstat stdout
if qstat_stdout != None:
output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
else:
job_list.close()
return ('QSTAT NOT FOUND', '')
# Write job_id, test['test_name'], and Ouput_Path to the batch file
job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name + '\n')
# Return to TestHarness and inform we have launched the job
job_list.close()
return ('', 'LAUNCHED')
else:
return ('QSTAT INVALID RESULTS', output)
def cleanPBSBatch(self):
# Open the PBS batch file and assign it to a list
if os.path.exists(self.options.pbs_cleanup):
batch_file = open(self.options.pbs_cleanup, 'r')
batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
batch_file.close()
del batch_list[-1:]
else:
print 'PBS batch file not found:', self.options.pbs_cleanup
sys.exit(1)
# Loop through launched jobs and delete whats found.
for job in batch_list:
if os.path.exists(job[2]):
batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
if os.path.exists('/'.join(batch_dir)):
shutil.rmtree('/'.join(batch_dir))
if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
os.remove(self.options.pbs_cleanup)
# END PBS Defs
## Update global variables and print output based on the test result
# Containing OK means it passed, skipped means skipped, anything else means it failed
def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
timing = ''
if self.options.timing:
timing = self.getTiming(output)
elif self.options.store_time:
timing = self.getSolveTime(output)
# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
self.test_table.append( (specs, output, result, timing, start, end) )
if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
self.num_passed += 1
elif result.find('skipped') != -1:
self.num_skipped += 1
elif result.find('deleted') != -1:
self.num_skipped += 1
elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
self.num_pending += 1
else:
self.num_failed += 1
self.postRun(specs, timing)
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(specs['test_name'], result, timing, start, end, self.options)
if self.options.verbose or ('FAILED' in result and not self.options.quiet):
output = output.replace('\r', '\n') # replace the carriage returns with newlines
lines = output.split('\n');
color = ''
if 'EXODIFF' in result or 'CSVDIFF' in result:
color = 'YELLOW'
elif 'FAILED' in result:
color = 'RED'
else:
color = 'GREEN'
test_name = colorText(specs['test_name'] + ": ", color, colored=self.options.colored, code=self.options.code)
output = test_name + ("\n" + test_name).join(lines)
print output
# Print result line again at the bottom of the output for failed tests
if self.options.show_directory:
print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
else:
print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"
if not 'skipped' in result:
if self.options.file:
if self.options.show_directory:
self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
else:
self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
self.file.write(output)
if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
f = open(fname, 'w')
f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
f.write(output)
f.close()
# Write the app_name to a file, if the tests passed
def writeState(self, app_name):
# If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
if os.environ.has_key("BITTEN_STATUS_MOOSE"):
result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
result_file.close()
# Print final results, close open files, and exit with the correct error code
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
if self.options.show_directory:
print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
else:
print printResult(test['test_name'], result, timing, start, end, self.options)
time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)
if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
# Mask off TestHarness error codes to report parser errors
if self.error_code & 0x0F:
summary += ', <r>FATAL PARSER ERROR</r>'
print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()
if self.num_failed == 0:
self.writeState(self.executable)
def initialize(self, argv, app_name):
# Initialize the parallel runner with how many tests to run in parallel
self.runner = RunParallel(self, self.options.jobs, self.options.load)
## Save executable-under-test name to self.executable
self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method
# Save the output dir since the current working directory changes during tests
self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)
# Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
if self.options.output_dir:
try:
os.makedirs(self.output_dir)
except OSError, ex:
if ex.errno == errno.EEXIST: pass
else: raise
# Open the file to redirect output to and set the quiet option for file output
if self.options.file:
self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
if self.options.file or self.options.fail_files or self.options.sep_files:
self.options.quiet = True
## Parse command line options and assign them to self.options
def parseCLArgs(self, argv):
parser = argparse.ArgumentParser(description='A tool used to test MOOSE based applications')
parser.add_argument('test_name', nargs=argparse.REMAINDER)
parser.add_argument('--opt', action='store_const', dest='method', const='opt', help='test the app_name-opt binary')
parser.add_argument('--dbg', action='store_const', dest='method', const='dbg', help='test the app_name-dbg binary')
parser.add_argument('--devel', action='store_const', dest='method', const='devel', help='test the app_name-devel binary')
parser.add_argument('--oprof', action='store_const', dest='method', const='oprof', help='test the app_name-oprof binary')
parser.add_argument('--pro', action='store_const', dest='method', const='pro', help='test the app_name-pro binary')
parser.add_argument('-j', '--jobs', nargs=1, metavar='int', action='store', type=int, dest='jobs', default=1, help='run test binaries in parallel')
parser.add_argument('-e', action='store_true', dest='extra_info', help='Display "extra" information including all caveats and deleted tests')
parser.add_argument('-c', '--no-color', action='store_false', dest='colored', help='Do not show colored output')
parser.add_argument('--heavy', action='store_true', dest='heavy_tests', help='Run tests marked with HEAVY : True')
parser.add_argument('--all-tests', action='store_true', dest='all_tests', help='Run normal tests and tests marked with HEAVY : True')
parser.add_argument('-g', '--group', action='store', type=str, dest='group', default='ALL', help='Run only tests in the named group')
parser.add_argument('--not_group', action='store', type=str, dest='not_group', help='Run only tests NOT in the named group')
# parser.add_argument('--dofs', action='store', dest='dofs', help='This option is for automatic scaling which is not currently implemented in MOOSE 2.0')
parser.add_argument('--dbfile', nargs='?', action='store', dest='dbFile', help='Location to timings data base file. If not set, assumes $HOME/timingDB/timing.sqlite')
parser.add_argument('-l', '--load-average', action='store', type=float, dest='load', default=64.0, help='Do not run additional tests if the load average is at least LOAD')
parser.add_argument('-t', '--timing', action='store_true', dest='timing', help='Report Timing information for passing tests')
parser.add_argument('-s', '--scale', action='store_true', dest='scaling', help='Scale problems that have SCALE_REFINE set')
parser.add_argument('-i', nargs=1, action='store', type=str, dest='input_file_name', default='tests', help='The default test specification file to look for (default="tests").')
parser.add_argument('--libmesh_dir', nargs=1, action='store', type=str, dest='libmesh_dir', help='Currently only needed for bitten code coverage')
parser.add_argument('--skip-config-checks', action='store_true', dest='skip_config_checks', help='Skip configuration checks (all tests will run regardless of restrictions)')
parser.add_argument('--parallel', '-p', nargs='?', action='store', type=int, dest='parallel', const=1, help='Number of processors to use when running mpiexec')
parser.add_argument('--n-threads', nargs=1, action='store', type=int, dest='nthreads', default=1, help='Number of threads to use when running mpiexec')
parser.add_argument('-d', action='store_true', dest='debug_harness', help='Turn on Test Harness debugging')
parser.add_argument('--recover', action='store_true', dest='enable_recover', help='Run a test in recover mode')
parser.add_argument('--valgrind', action='store_const', dest='valgrind_mode', const='NORMAL', help='Run normal valgrind tests')
parser.add_argument('--valgrind-heavy', action='store_const', dest='valgrind_mode', const='HEAVY', help='Run heavy valgrind tests')
parser.add_argument('--valgrind-max-fails', nargs=1, type=int, dest='valgrind_max_fails', default=5, help='The number of valgrind tests allowed to fail before any additional valgrind tests will run')
parser.add_argument('--max-fails', nargs=1, type=int, dest='max_fails', default=50, help='The number of tests allowed to fail before any additional tests will run')
parser.add_argument('--pbs', nargs='?', metavar='batch_file', dest='pbs', const='generate', help='Enable launching tests via PBS. If no batch file is specified one will be created for you')
parser.add_argument('--pbs-cleanup', nargs=1, metavar='batch_file', help='Clean up the directories/files created by PBS. You must supply the same batch_file used to launch PBS.')
parser.add_argument('--re', action='store', type=str, dest='reg_exp', help='Run tests that match --re=regular_expression')
# Options that pass straight through to the executable
parser.add_argument('--parallel-mesh', action='store_true', dest='parallel_mesh', help='Pass "--parallel-mesh" to executable')
parser.add_argument('--error', action='store_true', help='Run the tests with warnings as errors (Pass "--error" to executable)')
parser.add_argument('--error-unused', action='store_true', help='Run the tests with errors on unused parameters (Pass "--error-unused" to executable)')
# Option to use for passing unwrapped options to the executable
parser.add_argument('--cli-args', nargs='?', type=str, dest='cli_args', help='Append the following list of arguments to the command line (Encapsulate the command in quotes)')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', help="Pass --dry-run to print commands to run, but don't actually run them")
outputgroup = parser.add_argument_group('Output Options', 'These options control the output of the test harness. The sep-files options write output to files named test_name.TEST_RESULT.txt. All file output will overwrite old files')
outputgroup.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='show the output of every test')
outputgroup.add_argument('-q', '--quiet', action='store_true', dest='quiet', help='only show the result of every test, don\'t show test output even if it fails')
outputgroup.add_argument('--no-report', action='store_false', dest='report_skipped', help='do not report skipped tests')
outputgroup.add_argument('--show-directory', action='store_true', dest='show_directory', help='Print test directory path in out messages')
outputgroup.add_argument('-o', '--output-dir', nargs=1, metavar='directory', dest='output_dir', default='', help='Save all output files in the directory, and create it if necessary')
outputgroup.add_argument('-f', '--file', nargs=1, action='store', dest='file', help='Write verbose output of each test to FILE and quiet output to terminal')
outputgroup.add_argument('-x', '--sep-files', action='store_true', dest='sep_files', help='Write the output of each test to a separate file. Only quiet output to terminal. This is equivalant to \'--sep-files-fail --sep-files-ok\'')
outputgroup.add_argument('--sep-files-ok', action='store_true', dest='ok_files', help='Write the output of each passed test to a separate file')
outputgroup.add_argument('-a', '--sep-files-fail', action='store_true', dest='fail_files', help='Write the output of each FAILED test to a separate file. Only quiet output to terminal.')
outputgroup.add_argument("--store-timing", action="store_true", dest="store_time", help="Store timing in the SQL database: $HOME/timingDB/timing.sqlite A parent directory (timingDB) must exist.")
outputgroup.add_argument("--revision", nargs=1, action="store", type=str, dest="revision", help="The current revision being tested. Required when using --store-timing.")
outputgroup.add_argument("--yaml", action="store_true", dest="yaml", help="Dump the parameters for the testers in Yaml Format")
outputgroup.add_argument("--dump", action="store_true", dest="dump", help="Dump the parameters for the testers in GetPot Format")
code = True
if self.code.decode('hex') in argv:
del argv[argv.index(self.code.decode('hex'))]
code = False
self.options = parser.parse_args(argv[1:])
self.tests = self.options.test_name
self.options.code = code
# Convert all list based options of length one to scalars
for key, value in vars(self.options).items():
if type(value) == list and len(value) == 1:
tmp_str = getattr(self.options, key)
setattr(self.options, key, value[0])
self.checkAndUpdateCLArgs()
## Called after options are parsed from the command line
# Exit if options don't make any sense, print warnings if they are merely weird
def checkAndUpdateCLArgs(self):
opts = self.options
if opts.output_dir and not (opts.file or opts.sep_files or opts.fail_files or opts.ok_files):
print 'WARNING: --output-dir is specified but no output files will be saved, use -f or a --sep-files option'
if opts.group == opts.not_group:
print 'ERROR: The group and not_group options cannot specify the same group'
sys.exit(1)
if opts.store_time and not (opts.revision):
print 'ERROR: --store-timing is specified but no revision'
sys.exit(1)
if opts.store_time:
# timing returns Active Time, while store_timing returns Solve Time.
# Thus we need to turn off timing.
opts.timing = False
opts.scaling = True
if opts.valgrind_mode and (opts.parallel > 1 or opts.nthreads > 1):
print 'ERROR: --parallel and/or --threads can not be used with --valgrind'
sys.exit(1)
# Update any keys from the environment as necessary
if not self.options.method:
if os.environ.has_key('METHOD'):
self.options.method = os.environ['METHOD']
else:
self.options.method = 'opt'
if not self.options.valgrind_mode:
self.options.valgrind_mode = ''
# Update libmesh_dir to reflect arguments
if opts.libmesh_dir:
self.libmesh_dir = opts.libmesh_dir
# Generate a batch file if PBS argument supplied with out a file
if opts.pbs == 'generate':
largest_serial_num = 0
for name in os.listdir('.'):
m = re.search('pbs_(\d{3})', name)
if m != None and int(m.group(1)) > largest_serial_num:
largest_serial_num = int(m.group(1))
opts.pbs = "pbs_" + str(largest_serial_num+1).zfill(3)
# When running heavy tests, we'll make sure we use --no-report
if opts.heavy_tests:
self.options.report_skipped = False
def postRun(self, specs, timing):
return
def preRun(self):
if self.options.yaml:
self.factory.printYaml("Tests")
sys.exit(0)
elif self.options.dump:
self.factory.printDump("Tests")
sys.exit(0)
if self.options.pbs_cleanup:
self.cleanPBSBatch()
sys.exit(0)
def getOptions(self):
return self.options
#################################################################################################################################
# The TestTimer TestHarness
# This method finds and stores timing for individual tests. It is activated with --store-timing
#################################################################################################################################
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
class TestTimer(TestHarness):
def __init__(self, argv, app_name, moose_dir):
TestHarness.__init__(self, argv, app_name, moose_dir)
try:
from sqlite3 import dbapi2 as sqlite
except:
print 'Error: --store-timing requires the sqlite3 python module.'
sys.exit(1)
self.app_name = app_name
self.db_file = self.options.dbFile
if not self.db_file:
home = os.environ['HOME']
self.db_file = os.path.join(home, 'timingDB/timing.sqlite')
if not os.path.exists(self.db_file):
print 'Warning: creating new database at default location: ' + str(self.db_file)
self.createDB(self.db_file)
else:
print 'Warning: Assuming database location ' + self.db_file
def createDB(self, fname):
from sqlite3 import dbapi2 as sqlite
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def preRun(self):
from sqlite3 import dbapi2 as sqlite
# Delete previous data if app_name and repo revision are found
con = sqlite.connect(self.db_file)
cr = con.cursor()
cr.execute('delete from timing where app_name = ? and revision = ?', (self.app_name, self.options.revision))
con.commit()
# After the run store the results in the database
def postRun(self, test, timing):
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(self.db_file)
cr = con.cursor()
timestamp = int(time.time())
load = os.getloadavg()[0]
# accumulate the test results
data = []
sum_time = 0
num = 0
parse_failed = False
# Were only interested in storing scaled data
if timing != None and test['scale_refine'] != 0:
sum_time += float(timing)
num += 1
data.append( (self.app_name, test['test_name'].split('/').pop(), self.options.revision, timestamp, timing, test['scale_refine'], load) )
# Insert the data into the database
cr.executemany('insert into timing values (?,?,?,?,?,?,?)', data)
con.commit()
|
giopastor/moose
|
python/TestHarness/TestHarness.py
|
Python
|
lgpl-2.1
| 43,648
|
[
"MOOSE",
"VTK"
] |
9e927497387b03dcd8a74da6b3d72a63dbc470df04edf88070f34ce1f3d562ed
|
#!/usr/bin/env python
###############################################################################
#
# __template__.py - Description!
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Josh Daly"
__copyright__ = "Copyright 2014"
__credits__ = ["Josh Daly"]
__license__ = "GPL3"
__version__ = "0.0.1"
__maintainer__ = "Josh Daly"
__email__ = ""
__status__ = "Development"
###############################################################################
import argparse
import sys
import glob
from multiprocessing import Pool
from subprocess import Popen, PIPE
from Bio import SeqIO
from Bio.Seq import Seq
#import os
#import errno
#import numpy as np
#np.seterr(all='raise')
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import axes3d, Axes3D
#from pylab import plot,subplot,axis,stem,show,figure
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# put classes here
###############################################################################
###############################################################################
###############################################################################
###############################################################################
def runCommand(cmd):
"""Run a command and take care of stdout
expects 'cmd' to be a string like "foo -b ar"
returns (stdout, stderr)
"""
p = Popen(cmd.split(' '), stdout=PIPE)
return p.communicate()
def doWork( args ):
""" Main wrapper"""
# objects
vectorsFasta = {}
vectorsBlast = []
# read in fasta file
for accession,sequence in SeqIO.to_dict(SeqIO.parse(args.fasta,"fasta")).items():
vectorsFasta[accession] = sequence.seq
# read in vector counts file
with open(args.vectorCounts, 'r') as fh:
for l in fh:
tabs = l.rstrip().split("\t")
count = int(tabs[0])
pidsqid = tabs[2]
if count >= args.threshold:
print pidsqid
"""
# parse fasta file using biopython
for accession,sequence in SeqIO.to_dict(SeqIO.parse(c_file,"fasta")).items():
if accession in genomes_dict:
pass
else:
#print accession
genomes_dict[accession] = [len(sequence),img_id, sequence.seq
"""
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f','--fasta', help="...")
parser.add_argument('-v','--vectorCounts', help="...")
parser.add_argument('-t','--threshold', type=int, default=0,help="...")
#parser.add_argument('input_file2', help="gut_img_ids")
#parser.add_argument('input_file3', help="oral_img_ids")
#parser.add_argument('input_file4', help="ids_present_gut_and_oral.csv")
#parser.add_argument('output_file', help="output file")
#parser.add_argument('positional_arg3', nargs='+', help="Multiple values")
#parser.add_argument('-X', '--optional_X', action="store_true", default=False, help="flag")
#parser.add_argument('-X', '--optional_X', action="store_true", type=int,default=False, help="flag")
# parse the arguments
args = parser.parse_args()
# do what we came here to do
doWork(args)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
|
JoshDaly/scriptShed
|
parse_blast_vectors.trackm.py
|
Python
|
gpl-2.0
| 4,899
|
[
"Biopython"
] |
364fb867e91e74750c7b770896e649be285f0ce62778578ca7ec96100bb6b6dd
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "[email protected],[email protected]"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import subprocess
import os
import re
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.bond_valence import BVAnalyzer
from monty.tempfile import ScratchDir
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(map(Element, [
"Li", "Na", "K", # alkali metals
"Be", "Mg", "Ca", # alkaline metals
"Al", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ge", "As",
"Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb",
"Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi",
"La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er",
"Tm", "Yb", "Lu"
]))
_gulp_kw = {
# Control of calculation type
"angle", "bond", "cosmo", "cosmic", "cost", "defect", "distance",
"eem", "efg", "fit", "free_energy", "gasteiger", "genetic",
"gradients", "md", "montecarlo", "noautobond", "noenergy", "optimise",
"pot", "predict", "preserve_Q", "property", "phonon", "qeq", "qbond",
"single", "sm", "static_first", "torsion", "transition_state",
# Geometric variable specification
"breathe", "bulk_noopt", "cellonly", "conp", "conv", "isotropic",
"orthorhombic", "nobreathe", "noflgs", "shell", "unfix",
# Algorithm
"c6", "dipole", "fbfgs", "fix_molecule", "full", "hill", "kfull",
"marvinSE", "madelung", "minimum_image", "molecule", "molmec", "molq",
"newda", "noanisotropic_2b", "nod2sym", "nodsymmetry",
"noelectrostatics", "noexclude", "nofcentral", "nofirst_point",
"noksymmetry", "nolist_md", "nomcediff", "nonanal", "noquicksearch",
"noreal", "norecip", "norepulsive", "nosasinitevery", "nosderv",
"nozeropt", "numerical", "qiter", "qok", "spatial", "storevectors",
"nomolecularinternalke", "voight", "zsisa",
# Optimisation method
"conjugate", "dfp", "lbfgs", "numdiag", "positive", "rfo", "unit",
# Output control
"average", "broaden_dos", "cartesian", "compare", "conserved",
"dcharge", "dynamical_matrix",
"eigenvectors", "global", "hessian", "hexagonal", "intensity", "linmin",
"meanke", "nodensity_out", "nodpsym", "nofirst_point", "nofrequency",
"nokpoints", "operators", "outcon", "prt_eam", "prt_two",
"prt_regi_before", "qsas", "restore", "save", "terse",
# Structure control
"full", "hexagonal", "lower_symmetry", "nosymmetry",
# PDF control
"PDF", "PDFcut", "PDFbelow", "PDFkeep", "coreinfo", "nowidth", "nopartial",
# Miscellaneous
"nomodcoord", "oldunits", "zero_potential"
}
class GulpIO:
"""
To generate GULP input and process output
"""
def keyword_line(self, *args):
r"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
\\*args: 1st line keywords
"""
# if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
def structure_lines(self, structure, cell_flg=True, frac_flg=True,
anion_shell_flg=True, cation_shell_flg=False,
symm_flg=True):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = "{0:6f} {1:6f} {2:6f} {3:6f} {4:6f} {5:6f}".format(
l.a, l.b, l.c, l.alpha, l.beta, l.gamma
)
gin += lat_str + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if ((specie in _anions and anion_shell_flg) or
(specie in _cations and cation_shell_flg)):
shel_site_desc = specie.symbol + " shel " + " ".join(
coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
return gin
def specie_potential_lines(self, structure, potential, **kwargs):
r"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
\\*\\*kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented."
"\nUse library_line instead")
def library_line(self, file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = 'GULP_LIB' in os.environ.keys()
def readable(f):
return os.path.isfile(f) and os.access(f, os.R_OK)
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = 'library ' + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = 'library ' + fpath
elif gulplib_set: # Check the GULP_LIB path
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = 'library ' + file_name
if gin:
return gin + "\n"
else:
raise GulpError('GULP Library not found')
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
def buckingham_potential(self, structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
# If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
# Try bush library first
bpb = BuckinghamPotential('bush')
bpl = BuckinghamPotential('lewis')
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r'[1-9,+,\-]', '', key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]['oxi']:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]['inp_str']
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
# Try lewis library next if element is not in bush
# use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + '_' + str(int(val_dict[key])) + '+'
if k not in bpl.species_dict.keys():
# use_lewis = False
raise GulpError("Element {} not in library".format(k))
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += 'spring\n'
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
# gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure, cell_flg=periodic, frac_flg=periodic,
anion_shell_flg=False, cation_shell_flg=False, symm_flg=not uc
)
gin += self.tersoff_potential(structure)
return gin
def tersoff_potential(self, structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key in el_val_dict.keys():
if key != "O" and el_val_dict[key] % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(el_val_dict[key]) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key in el_val_dict.keys():
if key != "O":
metal = key + "(" + str(int(el_val_dict[key])) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
def get_energy(self, gout):
"""
Args:
gout ():
Returns:
Energy
"""
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
else:
raise GulpError("Energy not found in Gulp output")
def get_relaxed_structure(self, gout):
"""
Args:
gout ():
Returns:
(Structure) relaxed structure.
"""
# Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
elif "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
else:
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != '--':
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
else:
i += 1
# Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == 'c':
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise IOError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller:
"""
Class to run gulp from commandline
"""
def __init__(self, cmd='gulp'):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
with ScratchDir("."):
p = subprocess.Popen(
self._gulp_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd='gulp'):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd='gulp',
keywords=('optimise', 'conp', 'qok'),
valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure,
gulp_cmd='gulp',
keywords=('optimise', 'conp'),
valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
"""
Args:
msg (str): Message
"""
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential:
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
"""
Args:
bush_lewis_flag (str): Flag for using Bush or Lewis potential.
"""
assert bush_lewis_flag in {'bush', 'lewis'}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
with open(os.path.join(os.environ["GULP_LIB"], pot_file), 'rt') as f:
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {'inp_str': '', 'oxi': 0}
species_dict[elmnt]['inp_str'] += row
species_dict[elmnt]['oxi'] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split('_')[0]
# oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split('_')[0]
# oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(
row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
# Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential:
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
"""
Init TersoffPotential
"""
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials"), "r") as f:
data = dict()
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
gVallverdu/pymatgen
|
pymatgen/command_line/gulp_caller.py
|
Python
|
mit
| 26,619
|
[
"GULP",
"pymatgen"
] |
4c73ae2ab8d53f59079effd37c0ba04fbc6a8e794075e50dc9490fd1a0045be0
|
"""Support for Ecobee Thermostats."""
import collections
from typing import Optional
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
STATE_ON,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.temperature import convert
from .const import _LOGGER, DOMAIN, ECOBEE_MODEL_TO_NAME, MANUFACTURER
from .util import ecobee_date, ecobee_time
ATTR_COOL_TEMP = "cool_temp"
ATTR_END_DATE = "end_date"
ATTR_END_TIME = "end_time"
ATTR_FAN_MIN_ON_TIME = "fan_min_on_time"
ATTR_FAN_MODE = "fan_mode"
ATTR_HEAT_TEMP = "heat_temp"
ATTR_RESUME_ALL = "resume_all"
ATTR_START_DATE = "start_date"
ATTR_START_TIME = "start_time"
ATTR_VACATION_NAME = "vacation_name"
DEFAULT_RESUME_ALL = False
PRESET_TEMPERATURE = "temp"
PRESET_VACATION = "vacation"
PRESET_HOLD_NEXT_TRANSITION = "next_transition"
PRESET_HOLD_INDEFINITE = "indefinite"
AWAY_MODE = "awayMode"
PRESET_HOME = "home"
PRESET_SLEEP = "sleep"
# Order matters, because for reverse mapping we don't want to map HEAT to AUX
ECOBEE_HVAC_TO_HASS = collections.OrderedDict(
[
("heat", HVAC_MODE_HEAT),
("cool", HVAC_MODE_COOL),
("auto", HVAC_MODE_AUTO),
("off", HVAC_MODE_OFF),
("auxHeatOnly", HVAC_MODE_HEAT),
]
)
ECOBEE_HVAC_ACTION_TO_HASS = {
# Map to None if we do not know how to represent.
"heatPump": CURRENT_HVAC_HEAT,
"heatPump2": CURRENT_HVAC_HEAT,
"heatPump3": CURRENT_HVAC_HEAT,
"compCool1": CURRENT_HVAC_COOL,
"compCool2": CURRENT_HVAC_COOL,
"auxHeat1": CURRENT_HVAC_HEAT,
"auxHeat2": CURRENT_HVAC_HEAT,
"auxHeat3": CURRENT_HVAC_HEAT,
"fan": CURRENT_HVAC_FAN,
"humidifier": None,
"dehumidifier": CURRENT_HVAC_DRY,
"ventilator": CURRENT_HVAC_FAN,
"economizer": CURRENT_HVAC_FAN,
"compHotWater": None,
"auxHotWater": None,
}
PRESET_TO_ECOBEE_HOLD = {
PRESET_HOLD_NEXT_TRANSITION: "nextTransition",
PRESET_HOLD_INDEFINITE: "indefinite",
}
SERVICE_CREATE_VACATION = "create_vacation"
SERVICE_DELETE_VACATION = "delete_vacation"
SERVICE_RESUME_PROGRAM = "resume_program"
SERVICE_SET_FAN_MIN_ON_TIME = "set_fan_min_on_time"
DTGROUP_INCLUSIVE_MSG = (
f"{ATTR_START_DATE}, {ATTR_START_TIME}, {ATTR_END_DATE}, "
f"and {ATTR_END_TIME} must be specified together"
)
CREATE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
vol.Required(ATTR_COOL_TEMP): vol.Coerce(float),
vol.Required(ATTR_HEAT_TEMP): vol.Coerce(float),
vol.Inclusive(
ATTR_START_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_date,
vol.Inclusive(
ATTR_START_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG
): ecobee_time,
vol.Inclusive(ATTR_END_DATE, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_date,
vol.Inclusive(ATTR_END_TIME, "dtgroup", msg=DTGROUP_INCLUSIVE_MSG): ecobee_time,
vol.Optional(ATTR_FAN_MODE, default="auto"): vol.Any("auto", "on"),
vol.Optional(ATTR_FAN_MIN_ON_TIME, default=0): vol.All(
int, vol.Range(min=0, max=60)
),
}
)
DELETE_VACATION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VACATION_NAME): vol.All(cv.string, vol.Length(max=12)),
}
)
RESUME_PROGRAM_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_RESUME_ALL, default=DEFAULT_RESUME_ALL): cv.boolean,
}
)
SET_FAN_MIN_ON_TIME_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_FAN_MIN_ON_TIME): vol.Coerce(int),
}
)
SUPPORT_FLAGS = (
SUPPORT_TARGET_TEMPERATURE
| SUPPORT_PRESET_MODE
| SUPPORT_AUX_HEAT
| SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_FAN_MODE
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ecobee thermostat."""
data = hass.data[DOMAIN]
devices = [Thermostat(data, index) for index in range(len(data.ecobee.thermostats))]
async_add_entities(devices, True)
def create_vacation_service(service):
"""Create a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
for thermostat in devices:
if thermostat.entity_id == entity_id:
thermostat.create_vacation(service.data)
thermostat.schedule_update_ha_state(True)
break
def delete_vacation_service(service):
"""Delete a vacation on the target thermostat."""
entity_id = service.data[ATTR_ENTITY_ID]
vacation_name = service.data[ATTR_VACATION_NAME]
for thermostat in devices:
if thermostat.entity_id == entity_id:
thermostat.delete_vacation(vacation_name)
thermostat.schedule_update_ha_state(True)
break
def fan_min_on_time_set_service(service):
"""Set the minimum fan on time on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
fan_min_on_time = service.data[ATTR_FAN_MIN_ON_TIME]
if entity_id:
target_thermostats = [
device for device in devices if device.entity_id in entity_id
]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.set_fan_min_on_time(str(fan_min_on_time))
thermostat.schedule_update_ha_state(True)
def resume_program_set_service(service):
"""Resume the program on the target thermostats."""
entity_id = service.data.get(ATTR_ENTITY_ID)
resume_all = service.data.get(ATTR_RESUME_ALL)
if entity_id:
target_thermostats = [
device for device in devices if device.entity_id in entity_id
]
else:
target_thermostats = devices
for thermostat in target_thermostats:
thermostat.resume_program(resume_all)
thermostat.schedule_update_ha_state(True)
hass.services.async_register(
DOMAIN,
SERVICE_CREATE_VACATION,
create_vacation_service,
schema=CREATE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_DELETE_VACATION,
delete_vacation_service,
schema=DELETE_VACATION_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_SET_FAN_MIN_ON_TIME,
fan_min_on_time_set_service,
schema=SET_FAN_MIN_ON_TIME_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_RESUME_PROGRAM,
resume_program_set_service,
schema=RESUME_PROGRAM_SCHEMA,
)
class Thermostat(ClimateDevice):
"""A thermostat class for Ecobee."""
def __init__(self, data, thermostat_index):
"""Initialize the thermostat."""
self.data = data
self.thermostat_index = thermostat_index
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
self._name = self.thermostat["name"]
self.vacation = None
self._last_active_hvac_mode = HVAC_MODE_AUTO
self._operation_list = []
if (
self.thermostat["settings"]["heatStages"]
or self.thermostat["settings"]["hasHeatPump"]
):
self._operation_list.append(HVAC_MODE_HEAT)
if self.thermostat["settings"]["coolStages"]:
self._operation_list.append(HVAC_MODE_COOL)
if len(self._operation_list) == 2:
self._operation_list.insert(0, HVAC_MODE_AUTO)
self._operation_list.append(HVAC_MODE_OFF)
self._preset_modes = {
comfort["climateRef"]: comfort["name"]
for comfort in self.thermostat["program"]["climates"]
}
self._fan_modes = [FAN_AUTO, FAN_ON]
self.update_without_throttle = False
async def async_update(self):
"""Get the latest state from the thermostat."""
if self.update_without_throttle:
await self.data.update(no_throttle=True)
self.update_without_throttle = False
else:
await self.data.update()
self.thermostat = self.data.ecobee.get_thermostat(self.thermostat_index)
if self.hvac_mode is not HVAC_MODE_OFF:
self._last_active_hvac_mode = self.hvac_mode
@property
def available(self):
"""Return if device is available."""
return self.thermostat["runtime"]["connected"]
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the Ecobee Thermostat."""
return self.thermostat["name"]
@property
def unique_id(self):
"""Return a unique identifier for this ecobee thermostat."""
return self.thermostat["identifier"]
@property
def device_info(self):
"""Return device information for this ecobee thermostat."""
try:
model = f"{ECOBEE_MODEL_TO_NAME[self.thermostat['modelNumber']]} Thermostat"
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/home-assistant/issues/27172 "
"Unrecognized model number: %s",
self.name,
self.thermostat["modelNumber"],
)
return None
return {
"identifiers": {(DOMAIN, self.thermostat["identifier"])},
"name": self.name,
"manufacturer": MANUFACTURER,
"model": model,
}
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat["runtime"]["actualTemperature"] / 10.0
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
return None
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_AUTO:
return None
if self.hvac_mode == HVAC_MODE_HEAT:
return self.thermostat["runtime"]["desiredHeat"] / 10.0
if self.hvac_mode == HVAC_MODE_COOL:
return self.thermostat["runtime"]["desiredCool"] / 10.0
return None
@property
def fan(self):
"""Return the current fan status."""
if "fan" in self.thermostat["equipmentStatus"]:
return STATE_ON
return HVAC_MODE_OFF
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat["runtime"]["desiredFanMode"]
@property
def fan_modes(self):
"""Return the available fan modes."""
return self._fan_modes
@property
def preset_mode(self):
"""Return current preset mode."""
events = self.thermostat["events"]
for event in events:
if not event["running"]:
continue
if event["type"] == "hold":
if event["holdClimateRef"] in self._preset_modes:
return self._preset_modes[event["holdClimateRef"]]
# Any hold not based on a climate is a temp hold
return PRESET_TEMPERATURE
if event["type"].startswith("auto"):
# All auto modes are treated as holds
return event["type"][4:].lower()
if event["type"] == "vacation":
self.vacation = event["name"]
return PRESET_VACATION
return self._preset_modes[self.thermostat["program"]["currentClimateRef"]]
@property
def hvac_mode(self):
"""Return current operation."""
return ECOBEE_HVAC_TO_HASS[self.thermostat["settings"]["hvacMode"]]
@property
def hvac_modes(self):
"""Return the operation modes list."""
return self._operation_list
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return self.thermostat["runtime"]["actualHumidity"]
@property
def hvac_action(self):
"""Return current HVAC action.
Ecobee returns a CSV string with different equipment that is active.
We are prioritizing any heating/cooling equipment, otherwase look at
drying/fanning. Idle if nothing going on.
We are unable to map all actions to HA equivalents.
"""
if self.thermostat["equipmentStatus"] == "":
return CURRENT_HVAC_IDLE
actions = [
ECOBEE_HVAC_ACTION_TO_HASS[status]
for status in self.thermostat["equipmentStatus"].split(",")
if ECOBEE_HVAC_ACTION_TO_HASS[status] is not None
]
for action in (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_DRY,
CURRENT_HVAC_FAN,
):
if action in actions:
return action
return CURRENT_HVAC_IDLE
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
status = self.thermostat["equipmentStatus"]
return {
"fan": self.fan,
"climate_mode": self._preset_modes[
self.thermostat["program"]["currentClimateRef"]
],
"equipment_running": status,
"fan_min_on_time": self.thermostat["settings"]["fanMinOnTime"],
}
@property
def is_aux_heat(self):
"""Return true if aux heater."""
return "auxHeat" in self.thermostat["equipmentStatus"]
def set_preset_mode(self, preset_mode):
"""Activate a preset."""
if preset_mode == self.preset_mode:
return
self.update_without_throttle = True
# If we are currently in vacation mode, cancel it.
if self.preset_mode == PRESET_VACATION:
self.data.ecobee.delete_vacation(self.thermostat_index, self.vacation)
if preset_mode == PRESET_AWAY:
self.data.ecobee.set_climate_hold(
self.thermostat_index, "away", "indefinite"
)
elif preset_mode == PRESET_TEMPERATURE:
self.set_temp_hold(self.current_temperature)
elif preset_mode in (PRESET_HOLD_NEXT_TRANSITION, PRESET_HOLD_INDEFINITE):
self.data.ecobee.set_climate_hold(
self.thermostat_index,
PRESET_TO_ECOBEE_HOLD[preset_mode],
self.hold_preference(),
)
elif preset_mode == PRESET_NONE:
self.data.ecobee.resume_program(self.thermostat_index)
elif preset_mode in self.preset_modes:
climate_ref = None
for comfort in self.thermostat["program"]["climates"]:
if comfort["name"] == preset_mode:
climate_ref = comfort["climateRef"]
break
if climate_ref is not None:
self.data.ecobee.set_climate_hold(
self.thermostat_index, climate_ref, self.hold_preference()
)
else:
_LOGGER.warning("Received unknown preset mode: %s", preset_mode)
else:
self.data.ecobee.set_climate_hold(
self.thermostat_index, preset_mode, self.hold_preference()
)
@property
def preset_modes(self):
"""Return available preset modes."""
return list(self._preset_modes.values())
def set_auto_temp_hold(self, heat_temp, cool_temp):
"""Set temperature hold in auto mode."""
if cool_temp is not None:
cool_temp_setpoint = cool_temp
else:
cool_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
if heat_temp is not None:
heat_temp_setpoint = heat_temp
else:
heat_temp_setpoint = self.thermostat["runtime"]["desiredCool"] / 10.0
self.data.ecobee.set_hold_temp(
self.thermostat_index,
cool_temp_setpoint,
heat_temp_setpoint,
self.hold_preference(),
)
_LOGGER.debug(
"Setting ecobee hold_temp to: heat=%s, is=%s, cool=%s, is=%s",
heat_temp,
isinstance(heat_temp, (int, float)),
cool_temp,
isinstance(cool_temp, (int, float)),
)
self.update_without_throttle = True
def set_fan_mode(self, fan_mode):
"""Set the fan mode. Valid values are "on" or "auto"."""
if fan_mode.lower() != STATE_ON and fan_mode.lower() != HVAC_MODE_AUTO:
error = "Invalid fan_mode value: Valid values are 'on' or 'auto'"
_LOGGER.error(error)
return
cool_temp = self.thermostat["runtime"]["desiredCool"] / 10.0
heat_temp = self.thermostat["runtime"]["desiredHeat"] / 10.0
self.data.ecobee.set_fan_mode(
self.thermostat_index,
fan_mode,
cool_temp,
heat_temp,
self.hold_preference(),
)
_LOGGER.info("Setting fan mode to: %s", fan_mode)
def set_temp_hold(self, temp):
"""Set temperature hold in modes other than auto.
Ecobee API: It is good practice to set the heat and cool hold
temperatures to be the same, if the thermostat is in either heat, cool,
auxHeatOnly, or off mode. If the thermostat is in auto mode, an
additional rule is required. The cool hold temperature must be greater
than the heat hold temperature by at least the amount in the
heatCoolMinDelta property.
https://www.ecobee.com/home/developer/api/examples/ex5.shtml
"""
if self.hvac_mode == HVAC_MODE_HEAT or self.hvac_mode == HVAC_MODE_COOL:
heat_temp = temp
cool_temp = temp
else:
delta = self.thermostat["settings"]["heatCoolMinDelta"] / 10
heat_temp = temp - delta
cool_temp = temp + delta
self.set_auto_temp_hold(heat_temp, cool_temp)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
if self.hvac_mode == HVAC_MODE_AUTO and (
low_temp is not None or high_temp is not None
):
self.set_auto_temp_hold(low_temp, high_temp)
elif temp is not None:
self.set_temp_hold(temp)
else:
_LOGGER.error("Missing valid arguments for set_temperature in %s", kwargs)
def set_humidity(self, humidity):
"""Set the humidity level."""
self.data.ecobee.set_humidity(self.thermostat_index, humidity)
def set_hvac_mode(self, hvac_mode):
"""Set HVAC mode (auto, auxHeatOnly, cool, heat, off)."""
ecobee_value = next(
(k for k, v in ECOBEE_HVAC_TO_HASS.items() if v == hvac_mode), None
)
if ecobee_value is None:
_LOGGER.error("Invalid mode for set_hvac_mode: %s", hvac_mode)
return
self.data.ecobee.set_hvac_mode(self.thermostat_index, ecobee_value)
self.update_without_throttle = True
def set_fan_min_on_time(self, fan_min_on_time):
"""Set the minimum fan on time."""
self.data.ecobee.set_fan_min_on_time(self.thermostat_index, fan_min_on_time)
self.update_without_throttle = True
def resume_program(self, resume_all):
"""Resume the thermostat schedule program."""
self.data.ecobee.resume_program(
self.thermostat_index, "true" if resume_all else "false"
)
self.update_without_throttle = True
def hold_preference(self):
"""Return user preference setting for hold time."""
# Values returned from thermostat are 'useEndTime4hour',
# 'useEndTime2hour', 'nextTransition', 'indefinite', 'askMe'
default = self.thermostat["settings"]["holdAction"]
if default == "nextTransition":
return default
# add further conditions if other hold durations should be
# supported; note that this should not include 'indefinite'
# as an indefinite away hold is interpreted as away_mode
return "nextTransition"
def create_vacation(self, service_data):
"""Create a vacation with user-specified parameters."""
vacation_name = service_data[ATTR_VACATION_NAME]
cool_temp = convert(
service_data[ATTR_COOL_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
heat_temp = convert(
service_data[ATTR_HEAT_TEMP],
self.hass.config.units.temperature_unit,
TEMP_FAHRENHEIT,
)
start_date = service_data.get(ATTR_START_DATE)
start_time = service_data.get(ATTR_START_TIME)
end_date = service_data.get(ATTR_END_DATE)
end_time = service_data.get(ATTR_END_TIME)
fan_mode = service_data[ATTR_FAN_MODE]
fan_min_on_time = service_data[ATTR_FAN_MIN_ON_TIME]
kwargs = {
key: value
for key, value in {
"start_date": start_date,
"start_time": start_time,
"end_date": end_date,
"end_time": end_time,
"fan_mode": fan_mode,
"fan_min_on_time": fan_min_on_time,
}.items()
if value is not None
}
_LOGGER.debug(
"Creating a vacation on thermostat %s with name %s, cool temp %s, heat temp %s, "
"and the following other parameters: %s",
self.name,
vacation_name,
cool_temp,
heat_temp,
kwargs,
)
self.data.ecobee.create_vacation(
self.thermostat_index, vacation_name, cool_temp, heat_temp, **kwargs
)
def delete_vacation(self, vacation_name):
"""Delete a vacation with the specified name."""
_LOGGER.debug(
"Deleting a vacation on thermostat %s with name %s",
self.name,
vacation_name,
)
self.data.ecobee.delete_vacation(self.thermostat_index, vacation_name)
def turn_on(self):
"""Set the thermostat to the last active HVAC mode."""
_LOGGER.debug(
"Turning on ecobee thermostat %s in %s mode",
self.name,
self._last_active_hvac_mode,
)
self.set_hvac_mode(self._last_active_hvac_mode)
|
Teagan42/home-assistant
|
homeassistant/components/ecobee/climate.py
|
Python
|
apache-2.0
| 23,976
|
[
"VisIt"
] |
b093ce9505bc0378c5d0f65b88c2067744a782e9c88e63f309bc499b0c2bb284
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Zsolt Foldvari
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2009 Brian Matherly
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011-2017 Paul Franklin
# Copyright (C) 2012 Craig Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Report output generator based on Cairo.
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from math import radians
import re
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (BaseDoc, TextDoc, DrawDoc, ParagraphStyle,
TableCellStyle, SOLID, FONT_SANS_SERIF, FONT_SERIF,
FONT_MONOSPACE, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT)
from gramps.gen.plug.report import utils
from gramps.gen.errors import PluginError
from gramps.gen.plug.docbackend import CairoBackend
from gramps.gen.utils.image import resize_to_buffer
from gramps.gui.utils import SystemFonts
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".libcairodoc")
#-------------------------------------------------------------------------
#
# Pango modules
#
#-------------------------------------------------------------------------
from gi.repository import Pango, PangoCairo
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
# each element draws some extra information useful for debugging
DEBUG = False
#------------------------------------------------------------------------
#
# Font selection
#
#------------------------------------------------------------------------
_TTF_FREEFONT = {
FONT_SERIF: 'FreeSerif',
FONT_SANS_SERIF: 'FreeSans',
FONT_MONOSPACE: 'FreeMono',
}
_MS_TTFONT = {
FONT_SERIF: 'Times New Roman',
FONT_SANS_SERIF: 'Arial',
FONT_MONOSPACE: 'Courier New',
}
_GNOME_FONT = {
FONT_SERIF: 'Serif',
FONT_SANS_SERIF: 'Sans',
FONT_MONOSPACE: 'Monospace',
}
font_families = _GNOME_FONT
# FIXME debug logging does not work here.
def set_font_families():
"""Set the used font families depending on availability.
"""
global font_families
fonts = SystemFonts()
family_names = fonts.get_system_fonts()
fam = [f for f in _TTF_FREEFONT.values() if f in family_names]
if len(fam) == len(_TTF_FREEFONT):
font_families = _TTF_FREEFONT
log.debug('Using FreeFonts: %s' % font_families)
return
fam = [f for f in _MS_TTFONT.values() if f in family_names]
if len(fam) == len(_MS_TTFONT):
font_families = _MS_TTFONT
log.debug('Using MS TrueType fonts: %s' % font_families)
return
fam = [f for f in _GNOME_FONT.values() if f in family_names]
if len(fam) == len(_GNOME_FONT):
font_families = _GNOME_FONT
log.debug('Using Gnome fonts: %s' % font_families)
return
log.debug('No fonts found.')
set_font_families()
#------------------------------------------------------------------------
#
# Converter functions
#
#------------------------------------------------------------------------
def fontstyle_to_fontdescription(font_style):
"""Convert a FontStyle instance to a Pango.FontDescription one.
Font color and underline are not implemented in Pango.FontDescription,
and have to be set with Pango.Layout.set_attributes(attrlist) method.
"""
if font_style.get_bold():
f_weight = Pango.Weight.BOLD
else:
f_weight = Pango.Weight.NORMAL
if font_style.get_italic():
f_style = Pango.Style.ITALIC
else:
f_style = Pango.Style.NORMAL
font_description = Pango.FontDescription(font_families[font_style.face])
font_description.set_size(int(round(font_style.get_size() * Pango.SCALE)))
font_description.set_weight(f_weight)
font_description.set_style(f_style)
return font_description
def tabstops_to_tabarray(tab_stops, dpi):
"""Convert a list of tabs given in cm to a Pango.TabArray.
"""
tab_array = Pango.TabArray.new(initial_size=len(tab_stops),
positions_in_pixels=False)
for index, tab_stop in enumerate(tab_stops):
location = tab_stop * dpi * Pango.SCALE / 2.54
tab_array.set_tab(index, Pango.TabAlign.LEFT, int(location))
return tab_array
def raw_length(s):
"""
Return the length of the raw string after all pango markup has been removed.
"""
s = re.sub('<.*?>', '', s)
s = s.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
s = s.replace('"', '"')
s = s.replace(''', "'")
return len(s)
###------------------------------------------------------------------------
###
### Table row style
###
###------------------------------------------------------------------------
##class RowStyle(list):
##"""Specifies the format of a table row.
##RowStyle extents the available styles in
##The RowStyle contains the width of each column as a percentage of the
##width of the full row. Note! The width of the row is not known until
##divide() or draw() method is called.
##"""
##def __init__(self):
##self.columns = []
##def set_columns(self, columns):
##"""Set the number of columns.
##@param columns: number of columns that should be used.
##@param type: int
##"""
##self.columns = columns
##def get_columns(self):
##"""Return the number of columns.
##"""
##return self.columns
##def set_column_widths(self, clist):
##"""Set the width of all the columns at once.
##@param clist: list of width of columns in % of the full row.
##@param tyle: list
##"""
##self.columns = len(clist)
##for i in range(self.columns):
##self.colwid[i] = clist[i]
##def set_column_width(self, index, width):
##"""
##Set the width of a specified column to the specified width.
##@param index: column being set (index starts at 0)
##@param width: percentage of the table width assigned to the column
##"""
##self.colwid[index] = width
##def get_column_width(self, index):
##"""
##Return the column width of the specified column as a percentage of
##the entire table width.
##@param index: column to return (index starts at 0)
##"""
##return self.colwid[index]
class FrameStyle:
"""Define the style properties of a Frame.
- width: Width of the frame in cm.
- height: Height of the frame in cm.
- align: Horizontal position to entire page.
Available values: 'left','center', 'right'.
- spacing: Tuple of spacing around the frame in cm. Order of values:
(left, right, top, bottom).
"""
def __init__(self, width=0, height=0, align='left', spacing=(0, 0, 0, 0)):
self.width = width
self.height = height
self.align = align
self.spacing = spacing
#------------------------------------------------------------------------
#
# Document element classes
#
#------------------------------------------------------------------------
class GtkDocBaseElement:
"""Base of all document elements.
Support document element structuring and can render itself onto
a Cairo surface.
There are two categories of methods:
1. hierarchy building methods (add_child, get_children, set_parent,
get_parent);
2. rendering methods (divide, draw).
The hierarchy building methods generally don't have to be overridden in
the subclass, while the rendering methods (divide, draw) must be
implemented in the subclasses.
"""
_type = 'BASE'
_allowed_children = []
def __init__(self, style=None):
self._parent = None
self._children = []
self._style = style
def get_type(self):
"""Get the type of this element.
"""
return self._type
def set_parent(self, parent):
"""Set the parent element of this element.
"""
self._parent = parent
def get_parent(self):
"""Get the parent element of this element.
"""
return self._parent
def add_child(self, element):
"""Add a child element.
Returns False if the child cannot be added (e.g. not an allowed type),
or True otherwise.
"""
# check if it is an allowed child for this type
if element.get_type() not in self._allowed_children:
log.debug("%r is not an allowed child for %r" %
(element.__class__, self.__class__))
return False
# append the child and set its parent
self._children.append(element)
element.set_parent(self)
return True
def get_children(self):
"""Get the list of children of this element.
"""
return self._children
def get_marks(self):
"""Get the list of index marks for this element.
"""
marks = []
for child in self._children:
marks.extend(child.get_marks())
return marks
def divide(self, layout, width, height, dpi_x, dpi_y):
"""Divide the element into two depending on available space.
@param layout: pango layout to write on
@param type: Pango.Layout
@param width: width of available space for this element
@param type: device points
@param height: height of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: the divided element, and the height of the first part
@rtype: (GtkDocXXX-1, GtkDocXXX-2), device points
"""
raise NotImplementedError
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
"""Draw itself onto a cairo surface.
@param cairo_context: context to draw on
@param type: cairo.Context class
@param pango_layout: pango layout to write on
@param type: Pango.Layout class
@param width: width of available space for this element
@param type: device points
@param dpi_x: the horizontal resolution
@param type: dots per inch
@param dpi_y: the vertical resolution
@param type: dots per inch
@return: height of the element
@rtype: device points
"""
raise NotImplementedError
class GtkDocDocument(GtkDocBaseElement):
"""The whole document or a page.
"""
_type = 'DOCUMENT'
_allowed_children = ['PARAGRAPH', 'PAGEBREAK', 'TABLE', 'IMAGE', 'FRAME',
'TOC', 'INDEX']
def draw(self, cairo_context, pango_layout, width, dpi_x, dpi_y):
x = y = elem_height = 0
for elem in self._children:
cairo_context.translate(x, elem_height)
elem_height = elem.draw(cairo_context, pango_layout,
width, dpi_x, dpi_y)
y += elem_height
return y
def has_toc(self):
for elem in self._children:
if elem.get_type() == 'TOC':
return True
return False
def has_index(self):
for elem in self._children:
if elem.get_type() == 'INDEX':
return True
return False
class GtkDocPagebreak(GtkDocBaseElement):
"""Implement a page break.
"""
_type = 'PAGEBREAK'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (None, None), 0
class GtkDocTableOfContents(GtkDocBaseElement):
"""Implement a table of contents.
"""
_type = 'TOC'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocAlphabeticalIndex(GtkDocBaseElement):
"""Implement an alphabetical index.
"""
_type = 'INDEX'
_allowed_children = []
def divide(self, layout, width, height, dpi_x, dpi_y):
return (self, None), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
return 0
class GtkDocParagraph(GtkDocBaseElement):
"""Paragraph.
"""
_type = 'PARAGRAPH'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, leader=None):
GtkDocBaseElement.__init__(self, style)
if leader:
self._text = leader + '\t'
# FIXME append new tab to the existing tab list
self._style.set_tabs([-1 * self._style.get_first_indent()])
else:
self._text = ''
self._plaintext = None
self._attrlist = None
self._marklist = []
def add_text(self, text):
if self._plaintext is not None:
raise PluginError('CairoDoc: text is already parsed.'
' You cannot add text anymore')
self._text = self._text + text
def add_mark(self, mark):
"""
Add an index mark to this paragraph
"""
self._marklist.append((mark, raw_length(self._text)))
def get_marks(self):
"""
Return a list of index marks for this paragraph
"""
return [elem[0] for elem in self._marklist]
def __set_marklist(self, marklist):
"""
Internal method to allow for splitting of paragraphs
"""
self._marklist = marklist
def __set_plaintext(self, plaintext):
"""
Internal method to allow for splitting of paragraphs
"""
if not isinstance(plaintext, str):
self._plaintext = plaintext.decode('utf-8')
else:
self._plaintext = plaintext
def __set_attrlist(self, attrlist):
"""
Internal method to allow for splitting of paragraphs
"""
self._attrlist = attrlist
def __parse_text(self):
"""
Parse the markup text. This method will only do this if not
done already
"""
if self._plaintext is None:
parse_ok, self._attrlist, self._plaintext, accel_char= \
Pango.parse_markup(self._text, -1, '\000')
def divide(self, layout, width, height, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here LEFT ...
layout.set_alignment(Pango.Alignment.LEFT)
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
text_height = height - t_margin - 2 * v_padding
# calculate where to cut the paragraph
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
line_count = layout.get_line_count()
spacing = layout.get_spacing() / Pango.SCALE
# if all paragraph fits we don't need to cut
if layout_height - spacing <= text_height:
paragraph_height = layout_height + spacing +t_margin + (2 * v_padding)
if height - paragraph_height > b_margin:
paragraph_height += b_margin
return (self, None), paragraph_height
# we need to cut paragraph:
# 1. if paragraph part of a cell, we do not divide if only small part,
# of paragraph can be shown, instead move to next page
if line_count < 4 and self._parent._type == 'CELL':
return (None, self), 0
lineiter = layout.get_iter()
linenr = 0
linerange = lineiter.get_line_yrange()
# 2. if nothing fits, move to next page without split
# there is a spacing above and under the text
if linerange[1] - linerange[0] + 2.*spacing \
> text_height * Pango.SCALE:
return (None, self), 0
# 3. split the paragraph
startheight = linerange[0]
endheight = linerange[1]
splitline = -1
if lineiter.at_last_line():
#only one line of text that does not fit
return (None, self), 0
while not lineiter.at_last_line():
#go to next line, see if all fits, if not split
lineiter.next_line()
linenr += 1
linerange = lineiter.get_line_yrange()
if linerange[1] - startheight + 2.*spacing \
> text_height * Pango.SCALE:
splitline = linenr
break
endheight = linerange[1]
if splitline == -1:
print('CairoDoc STRANGE ')
return (None, self), 0
#we split at splitline
# get index of first character which doesn't fit on available height
layout_line = layout.get_line(splitline)
index = layout_line.start_index
# and divide the text, first create the second part
new_style = ParagraphStyle(self._style)
new_style.set_top_margin(0)
#we split a paragraph, text should begin in correct position: no indent
#as if the paragraph just continues from normal text
new_style.set_first_indent(0)
new_paragraph = GtkDocParagraph(new_style)
#index is in bytecode in the text..
new_paragraph.__set_plaintext(self._plaintext.encode('utf-8')[index:])
#now recalculate the attrilist:
newattrlist = layout.get_attributes().copy()
newattrlist.filter(self.filterattr, index)
## GTK3 PROBLEM: get_iterator no longer available!!
## REFERENCES:
## http://www.gramps-project.org/bugs/view.php?id=6208
## https://bugzilla.gnome.org/show_bug.cgi?id=646788
## workaround: https://github.com/matasbbb/pitivit/commit/da815339e5ce3631b122a72158ba9ffcc9ee4372
## OLD EASY CODE:
## oldattrlist = newattrlist.get_iterator()
## while oldattrlist.next():
## vals = oldattrlist.get_attrs()
## #print (vals)
## for attr in vals:
## newattr = attr.copy()
## newattr.start_index -= index if newattr.start_index > index \
## else 0
## newattr.end_index -= index
## newattrlist.insert(newattr)
## ## START OF WORKAROUND
oldtext = self._text
pos = 0
realpos = 0
markstarts = []
#index is in bytecode in the text.. !!
while pos < index:
if realpos >= len(oldtext):
break
char = oldtext[realpos]
if char == '<' and oldtext[realpos+1] != '/':
# a markup starts
end = realpos + oldtext[realpos:].find('>') + 1
markstarts += [oldtext[realpos:end]]
realpos = end
elif char == '<':
#this is the closing tag, we did not stop yet, so remove tag!
realpos = realpos + oldtext[realpos:].find('>') + 1
markstarts.pop()
else:
pos += len(char.encode('utf-8'))
realpos += 1
#now construct the marked up text to use
newtext = ''.join(markstarts)
newtext += oldtext[realpos:]
#have it parsed
parse_ok, newattrlist, _plaintext, accel_char= \
Pango.parse_markup(newtext, -1, '\000')
## ##END OF WORKAROUND
new_paragraph.__set_attrlist(newattrlist)
# then update the first one
self.__set_plaintext(self._plaintext.encode('utf-8')[:index])
self._style.set_bottom_margin(0)
# split the list of index marks
para1 = []
para2 = []
for mark, position in self._marklist:
if position < index:
para1.append((mark, position))
else:
para2.append((mark, position - index))
self.__set_marklist(para1)
new_paragraph.__set_marklist(para2)
paragraph_height = endheight - startheight + spacing + t_margin + 2 * v_padding
return (self, new_paragraph), paragraph_height
def filterattr(self, attr, index):
"""callback to filter out attributes in the removed piece at beginning
"""
if attr.start_index > index or \
(attr.start_index < index and attr.end_index > index):
return False
return True
def draw(self, cr, layout, width, dpi_x, dpi_y):
self.__parse_text()
l_margin = self._style.get_left_margin() * dpi_x / 2.54
r_margin = self._style.get_right_margin() * dpi_x / 2.54
t_margin = self._style.get_top_margin() * dpi_y / 2.54
b_margin = self._style.get_bottom_margin() * dpi_y / 2.54
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
f_indent = self._style.get_first_indent() * dpi_x / 2.54
# calculate real width available for text
text_width = width - l_margin - 2 * h_padding - r_margin
if f_indent < 0:
text_width -= f_indent
layout.set_width(int(text_width * Pango.SCALE))
# set paragraph properties
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
layout.set_indent(int(f_indent * Pango.SCALE))
layout.set_tabs(tabstops_to_tabarray(self._style.get_tabs(), dpi_x))
#
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here LEFT ...
layout.set_alignment(Pango.Alignment.LEFT)
layout.set_justify(True)
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_text(self._plaintext, -1)
layout.set_attributes(self._attrlist)
layout_width, layout_height = layout.get_pixel_size()
# render the layout onto the cairo surface
x = l_margin + h_padding
if f_indent < 0:
x += f_indent
# 3/4 of the spacing is added above the text, 1/4 is added below
cr.move_to(x, t_margin + v_padding + spacing * 0.75)
cr.set_source_rgb(*utils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
# calculate the full paragraph height
height = layout_height + spacing + t_margin + 2*v_padding + b_margin
# draw the borders
if self._style.get_top_border():
cr.move_to(l_margin, t_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_right_border():
cr.move_to(width - r_margin, t_margin)
cr.rel_line_to(0, height - t_margin - b_margin)
if self._style.get_bottom_border():
cr.move_to(l_margin, height - b_margin)
cr.rel_line_to(width - l_margin - r_margin, 0)
if self._style.get_left_border():
cr.move_to(l_margin, t_margin)
cr.line_to(0, height - t_margin - b_margin)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(l_margin, t_margin,
width-l_margin-r_margin, height-t_margin-b_margin)
cr.stroke()
return height
class GtkDocTable(GtkDocBaseElement):
"""Implement a table.
"""
_type = 'TABLE'
_allowed_children = ['ROW']
def divide(self, layout, width, height, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# calculate the height of each row
table_height = 0
row_index = 0
while row_index < len(self._children):
row = self._children[row_index]
(r1, r2), row_height = row.divide(layout, table_width, height,
dpi_x, dpi_y)
if r2 is not None:
#break the table in two parts
break
table_height += row_height
row_index += 1
height -= row_height
# divide the table if any row did not fit
new_table = None
if row_index < len(self._children):
new_table = GtkDocTable(self._style)
#add the split row
new_table.add_child(r2)
list(map(new_table.add_child, self._children[row_index+1:]))
del self._children[row_index+1:]
return (self, new_table), table_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
#calculate real table width
table_width = width * self._style.get_width() / 100
# TODO is a table always left aligned??
table_height = 0
# draw all the rows
for row in self._children:
cr.save()
cr.translate(0, table_height)
row_height = row.draw(cr, layout, table_width, dpi_x, dpi_y)
cr.restore()
table_height += row_height
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(0, 0, table_width, table_height)
cr.stroke()
return table_height
class GtkDocTableRow(GtkDocBaseElement):
"""Implement a row in a table.
"""
_type = 'ROW'
_allowed_children = ['CELL']
def divide(self, layout, width, height, dpi_x, dpi_y):
# the highest cell gives the height of the row
cell_heights = []
dividedrow = False
cell_width_iter = self._style.__iter__()
new_row = GtkDocTableRow(self._style)
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += next(cell_width_iter)
cell_width = cell_width * width / 100
(c1, c2), cell_height = cell.divide(layout, cell_width, height,
dpi_x, dpi_y)
cell_heights.append(cell_height)
if c2 is None:
emptycell = GtkDocTableCell(c1._style, c1.get_span())
new_row.add_child(emptycell)
else:
dividedrow = True
new_row.add_child(c2)
# save height [inch] of the row to be able to draw exact cell border
row_height = max(cell_heights)
self.height = row_height / dpi_y
# return the new row if dividing was needed
if dividedrow:
if row_height == 0:
for cell in self._children:
cell._style.set_top_border(False)
cell._style.set_left_border(False)
cell._style.set_right_border(False)
return (self, new_row), row_height
else:
return (self, None), row_height
def draw(self, cr, layout, width, dpi_x, dpi_y):
cr.save()
# get the height of this row
row_height = self.height * dpi_y
# draw all the cells in the row
cell_width_iter = self._style.__iter__()
for cell in self._children:
cell_width = 0
for i in range(cell.get_span()):
cell_width += next(cell_width_iter)
cell_width = cell_width * width / 100
cell.draw(cr, layout, cell_width, row_height, dpi_x, dpi_y)
cr.translate(cell_width, 0)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 0, 1.0)
cr.rectangle(0, 0, width, row_height)
cr.stroke()
return row_height
class GtkDocTableCell(GtkDocBaseElement):
"""Implement a cell in a table row.
"""
_type = 'CELL'
_allowed_children = ['PARAGRAPH', 'IMAGE']
def __init__(self, style, span=1):
GtkDocBaseElement.__init__(self, style)
self._span = span
def get_span(self):
return self._span
def divide(self, layout, width, height, dpi_x, dpi_y):
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
width -= 2 * h_padding
available_height = height
# calculate height of each child
cell_height = 0
new_cell = None
e2 = None
childnr = 0
for child in self._children:
if new_cell is None:
(e1, e2), child_height = child.divide(layout, width,
available_height, dpi_x, dpi_y)
cell_height += child_height
available_height -= child_height
if e2 is not None:
#divide the cell
new_style = TableCellStyle(self._style)
if e1 is not None:
new_style.set_top_border(False)
new_cell = GtkDocTableCell(new_style, self._span)
new_cell.add_child(e2)
# then update this cell
self._style.set_bottom_border(False)
if e1 is not None:
childnr += 1
else:
#cell has been divided
new_cell.add_child(child)
self._children = self._children[:childnr]
# calculate real height
if cell_height != 0:
cell_height += 2 * v_padding
# a cell can't be divided, return the height
return (self, new_cell), cell_height
def draw(self, cr, layout, width, cell_height, dpi_x, dpi_y):
"""Draw a cell.
This draw method is a bit different from the others, as common
cell height of all cells in a row is also given as parameter.
This is needed to be able to draw proper vertical borders around
each cell, i.e. the border should be as long as the highest cell
in the given row.
"""
h_padding = self._style.get_padding() * dpi_x / 2.54
v_padding = self._style.get_padding() * dpi_y / 2.54
# calculate real available width
i_width = width - 2 * h_padding
# draw children
cr.save()
cr.translate(h_padding, v_padding)
for child in self._children:
child_height = child.draw(cr, layout, i_width, dpi_x, dpi_y)
cr.translate(0, child_height)
cr.restore()
# draw the borders
if self._style.get_top_border():
cr.move_to(0, 0)
cr.rel_line_to(width , 0)
if self._style.get_right_border():
cr.move_to(width, 0)
cr.rel_line_to(0, cell_height)
if self._style.get_bottom_border():
cr.move_to(0, cell_height)
cr.rel_line_to(width, 0)
if self._style.get_left_border():
cr.move_to(0, 0)
cr.line_to(0, cell_height)
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.stroke()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, cell_height)
cr.stroke()
return cell_height
class GtkDocPicture(GtkDocBaseElement):
"""Implement an image.
"""
_type = 'IMAGE'
_allowed_children = []
def __init__(self, style, filename, width, height, crop=None):
GtkDocBaseElement.__init__(self, style)
self._filename = filename
self._width = width
self._height = height
self._crop = crop
def divide(self, layout, width, height, dpi_x, dpi_y):
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
# image can't be divided, a new page must begin
# if it can't fit on the current one
if img_height <= height:
return (self, None), img_height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
from gi.repository import Gtk, Gdk
img_width = self._width * dpi_x / 2.54
img_height = self._height * dpi_y / 2.54
if self._style == 'right':
l_margin = width - img_width
elif self._style == 'center':
l_margin = (width - img_width) / 2.0
else:
l_margin = 0
# load the image and get its extents
pixbuf = resize_to_buffer(self._filename, [img_width, img_height],
self._crop)
pixbuf_width = pixbuf.get_width()
pixbuf_height = pixbuf.get_height()
# calculate the scale to fit image into the set extents
scale = min(img_width / pixbuf_width, img_height / pixbuf_height)
# draw the image
cr.save()
cr.translate(l_margin, 0)
cr.scale(scale, scale)
Gdk.cairo_set_source_pixbuf(cr, pixbuf,
(img_width / scale - pixbuf_width) / 2,
(img_height / scale - pixbuf_height) / 2)
cr.rectangle(0 , 0, img_width / scale, img_height / scale)
##gcr.set_source_pixbuf(pixbuf,
##(img_width - pixbuf_width) / 2,
##(img_height - pixbuf_height) / 2)
##cr.rectangle(0 , 0, img_width, img_height)
##cr.scale(scale, scale)
cr.fill()
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(l_margin, 0, img_width, img_height)
cr.stroke()
return (img_height)
class GtkDocFrame(GtkDocBaseElement):
"""Implement a frame.
"""
_type = 'FRAME'
_allowed_children = ['LINE', 'POLYGON', 'BOX', 'TEXT']
def divide(self, layout, width, height, dpi_x, dpi_y):
frame_width = round(self._style.width * dpi_x / 2.54)
frame_height = round(self._style.height * dpi_y / 2.54)
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
# frame can't be divided, a new page must begin
# if it can't fit on the current one
if frame_height + t_margin + b_margin <= height:
return (self, None), frame_height + t_margin + b_margin
elif frame_height + t_margin <= height:
return (self, None), height
else:
return (None, self), 0
def draw(self, cr, layout, width, dpi_x, dpi_y):
frame_width = self._style.width * dpi_x / 2.54
frame_height = self._style.height * dpi_y / 2.54
l_margin = self._style.spacing[0] * dpi_x / 2.54
r_margin = self._style.spacing[1] * dpi_x / 2.54
t_margin = self._style.spacing[2] * dpi_y / 2.54
b_margin = self._style.spacing[3] * dpi_y / 2.54
if self._style.align == 'left':
x_offset = l_margin
elif self._style.align == 'right':
x_offset = width - r_margin - frame_width
elif self._style.align == 'center':
x_offset = (width - frame_width) / 2.0
else:
raise ValueError
# draw each element in the frame
cr.save()
cr.translate(x_offset, t_margin)
cr.rectangle(0, 0, frame_width, frame_height)
cr.clip()
for elem in self._children:
elem.draw(cr, layout, frame_width, dpi_x, dpi_y)
cr.restore()
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(1.0, 0, 0)
cr.rectangle(x_offset, t_margin, frame_width, frame_height)
cr.stroke()
return frame_height + t_margin + b_margin
class GtkDocLine(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'LINE'
_allowed_children = []
def __init__(self, style, x1, y1, x2, y2):
GtkDocBaseElement.__init__(self, style)
self._start = (x1, y1)
self._end = (x2, y2)
def draw(self, cr, layout, width, dpi_x, dpi_y):
start = (self._start[0] * dpi_x / 2.54, self._start[1] * dpi_y / 2.54)
end = (self._end[0] * dpi_x / 2.54, self._end[1] * dpi_y / 2.54)
line_color = utils.rgb_color(self._style.get_color())
cr.save()
cr.set_source_rgb(*line_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.move_to(*start)
cr.line_to(*end)
cr.stroke()
cr.restore()
return 0
class GtkDocPolygon(GtkDocBaseElement):
"""Implement a line.
"""
_type = 'POLYGON'
_allowed_children = []
def __init__(self, style, path):
GtkDocBaseElement.__init__(self, style)
self._path = path
def draw(self, cr, layout, width, dpi_x, dpi_y):
path = [(x * dpi_x / 2.54, y * dpi_y / 2.54) for (x, y) in self._path]
path_start = path.pop(0)
path_stroke_color = utils.rgb_color(self._style.get_color())
path_fill_color = utils.rgb_color(self._style.get_fill_color())
cr.save()
cr.move_to(*path_start)
for (x, y) in path:
cr.line_to(x, y)
cr.close_path()
cr.set_source_rgb(*path_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*path_stroke_color)
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
cr.stroke()
cr.restore()
return 0
class GtkDocBox(GtkDocBaseElement):
"""Implement a box with optional shadow around it.
"""
_type = 'BOX'
_allowed_children = []
def __init__(self, style, x, y, width, height):
GtkDocBaseElement.__init__(self, style)
self._x = x
self._y = y
self._width = width
self._height = height
def draw(self, cr, layout, width, dpi_x, dpi_y):
box_x = self._x * dpi_x / 2.54
box_y = self._y * dpi_y / 2.54
box_width = self._width * dpi_x / 2.54
box_height = self._height * dpi_y / 2.54
box_stroke_color = utils.rgb_color((0, 0, 0))
box_fill_color = utils.rgb_color(self._style.get_fill_color())
shadow_color = utils.rgb_color((192, 192, 192))
cr.save()
cr.set_line_width(self._style.get_line_width())
# TODO line style
line_style = self._style.get_line_style()
if line_style != SOLID:
cr.set_dash(self._style.get_dash_style(line_style), 0)
if self._style.get_shadow():
shadow_x = box_x + self._style.get_shadow_space() * dpi_x / 2.54
shadow_y = box_y + self._style.get_shadow_space() * dpi_y / 2.54
cr.set_source_rgb(*shadow_color)
cr.rectangle(shadow_x, shadow_y, box_width, box_height)
cr.fill()
cr.rectangle(box_x, box_y, box_width, box_height)
cr.set_source_rgb(*box_fill_color)
cr.fill_preserve()
cr.set_source_rgb(*box_stroke_color)
cr.stroke()
cr.restore()
return 0
class GtkDocText(GtkDocBaseElement):
"""Implement a text on graphical reports.
"""
_type = 'TEXT'
_allowed_children = []
# line spacing is not defined in ParagraphStyle
spacingfractionfont = 0.2
def __init__(self, style, vertical_alignment, text, x, y,
angle=0, mark=None):
GtkDocBaseElement.__init__(self, style)
self._align_y = vertical_alignment
self._text = text
self._x = x
self._y = y
self._angle = angle
self._marklist = []
if mark:
self._marklist = [mark]
def draw(self, cr, layout, width, dpi_x, dpi_y):
text_x = self._x * dpi_x / 2.54
text_y = self._y * dpi_y / 2.54
# turn off text wrapping
layout.set_width(-1)
# set paragraph properties
align = self._style.get_alignment_text()
if align == 'left':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.LEFT)
elif align == 'right':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.RIGHT)
elif align == 'center':
layout.set_justify(False)
layout.set_alignment(Pango.Alignment.CENTER)
elif align == 'justify':
#We have a problem, in pango, justify works only on full lines,
# and we need an alignment for the partial lines. We don't know
# for justify what alignment the user wants however. We assume
# here CENTER ...
layout.set_alignment(Pango.Alignment.CENTER)
layout.set_justify(True)
else:
raise ValueError
#
font_style = self._style.get_font()
layout.set_font_description(fontstyle_to_fontdescription(font_style))
#set line spacing based on font:
spacing = font_style.get_size() * self.spacingfractionfont
layout.set_spacing(int(round(spacing * Pango.SCALE)))
# layout the text
layout.set_markup(self._text)
layout_width, layout_height = layout.get_pixel_size()
# calculate horizontal and vertical alignment shift
if align == 'left':
align_x = 0
elif align == 'right':
align_x = - layout_width
elif align == 'center' or align == 'justify':
align_x = - layout_width / 2
else:
raise ValueError
if self._align_y == 'top':
align_y = 0
elif self._align_y == 'center':
align_y = - layout_height / 2
elif self._align_y == 'bottom':
align_y = - layout_height
else:
raise ValueError
# render the layout onto the cairo surface
cr.save()
cr.translate(text_x, text_y)
cr.rotate(radians(self._angle))
cr.move_to(align_x, align_y)
cr.set_source_rgb(*utils.rgb_color(font_style.get_color()))
PangoCairo.show_layout(cr, layout)
cr.restore()
return layout_height
def get_marks(self):
"""
Return the index mark for this text
"""
return self._marklist
#------------------------------------------------------------------------
#
# CairoDoc class
#
#------------------------------------------------------------------------
class CairoDoc(BaseDoc, TextDoc, DrawDoc):
"""Act as an abstract document that can render onto a cairo context.
Maintains an abstract model of the document. The root of this abstract
document is self._doc. The model is build via the subclassed BaseDoc, and
the implemented TextDoc, DrawDoc interface methods.
It can render the model onto cairo context pages, according to the received
page style.
"""
# BaseDoc implementation
EXT = 'pdf'
def open(self, filename):
fe = filename.split('.')
if len(fe) == 1:
filename = filename + '.' + self.EXT
elif fe[-1] != self.EXT:
# NOTE: the warning will be bogus
# if the EXT isn't properly overridden by derived class
log.warning(_(
"""Mismatch between selected extension %(ext)s and actual format.
Writing to %(filename)s in format %(impliedext)s.""") %
{'ext' : fe[-1],
'filename' : filename,
'impliedext' : self.EXT} )
self._backend = CairoBackend(filename)
self._doc = GtkDocDocument()
self._active_element = self._doc
self._pages = []
self._elements_to_paginate = []
self._links_error = False
def close(self):
self.run()
# TextDoc implementation
def page_break(self):
self._active_element.add_child(GtkDocPagebreak())
def start_bold(self):
self.__write_text('<b>', markup=True)
def end_bold(self):
self.__write_text('</b>', markup=True)
def start_superscript(self):
self.__write_text('<small><sup>', markup=True)
def end_superscript(self):
self.__write_text('</sup></small>', markup=True)
def start_paragraph(self, style_name, leader=None):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
new_paragraph = GtkDocParagraph(style, leader)
self._active_element.add_child(new_paragraph)
self._active_element = new_paragraph
def end_paragraph(self):
self._active_element = self._active_element.get_parent()
def start_table(self, name, style_name):
style_sheet = self.get_style_sheet()
style = style_sheet.get_table_style(style_name)
new_table = GtkDocTable(style)
self._active_element.add_child(new_table)
self._active_element = new_table
# we need to remember the column width list from the table style.
# this is an ugly hack, but got no better idea.
self._active_row_style = list(map(style.get_column_width,
list(range(style.get_columns()))))
if self.get_rtl_doc():
self._active_row_style.reverse()
def end_table(self):
self._active_element = self._active_element.get_parent()
def start_row(self):
new_row = GtkDocTableRow(self._active_row_style)
self._active_element.add_child(new_row)
self._active_element = new_row
def end_row(self):
if self.get_rtl_doc():
self._active_element._children.reverse()
self._active_element = self._active_element.get_parent()
def start_cell(self, style_name, span=1):
style_sheet = self.get_style_sheet()
style = style_sheet.get_cell_style(style_name)
new_cell = GtkDocTableCell(style, span)
self._active_element.add_child(new_cell)
self._active_element = new_cell
def end_cell(self):
self._active_element = self._active_element.get_parent()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the cairo doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. CairoDoc does nothing different for html notes
links: bool, true if URLs should be made clickable
"""
text = str(styledtext)
s_tags = styledtext.get_tags()
#FIXME: following split should be regex to match \n\s*\n instead?
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n\n')
if format == 1:
#preformatted, retain whitespace. Cairo retains \n automatically,
#so use \n\n for paragraph detection
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
self.__write_text(line, markup=True, links=links)
self.end_paragraph()
elif format == 0:
#flowed
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
self.start_paragraph(style_name)
#flowed, normal whitespace goes away, but we keep linebreak
lines = line.split('\n')
newlines = []
for singleline in lines:
newlines.append(' '.join(singleline.split()))
self.__write_text('\n'.join(newlines), markup=True, links=links)
self.end_paragraph()
def __markup(self, text, markup=None):
if not markup:
# We need to escape the text here for later Pango.Layout.set_markup
# calls. This way we save the markup created by the report
# The markup in the note editor is not in the text so is not
# considered. It must be added by pango too
text = self._backend.ESCAPE_FUNC()(text)
return text
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: True if URLs should be made clickable
"""
if links == True:
import cairo
if cairo.cairo_version() < 11210 and self._links_error == False:
# Cairo v1.12 is suppose to be the first version
# that supports clickable links
print("""
WARNING: This version of cairo (%s) does NOT support clickable links.
The first version that is suppose to is v1.12. See the roadmap:
http://www.cairographics.org/roadmap/
The work around is to save to another format that supports clickable
links (like ODF) and write PDF from that format.
""" % cairo.version)
self._links_error = True
text = self.__markup(text, markup)
if mark:
self._active_element.add_mark(mark)
self._active_element.add_text(text)
def write_text(self, text, mark=None, links=False):
"""Write a normal piece of text according to the
present style
@param text: text to write.
@param mark: IndexMark to use for indexing
@param links: True if URLs should be made clickable
"""
self.__write_text(text, mark, links=links)
def write_markup(self, text, s_tags, mark=None):
"""
Writes the text in the current paragraph. Should only be used after a
start_paragraph and before an end_paragraph.
@param text: text to write. The text is assumed to be _not_ escaped
@param s_tags: assumed to be list of styledtexttags to apply to the
text
@param mark: IndexMark to use for indexing
"""
markuptext = self._backend.add_markup_from_styled(text, s_tags)
self.__write_text(markuptext, mark=mark, markup=True)
def add_media(self, name, pos, x_cm, y_cm, alt='',
style_name=None, crop=None):
new_image = GtkDocPicture(pos, name, x_cm, y_cm, crop=crop)
self._active_element.add_child(new_image)
if len(alt):
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
style.set_alignment(PARA_ALIGN_CENTER)
# Center the caption under the image
if pos == "right":
style.set_left_margin(self.get_usable_width() - new_image._width)
else:
style.set_right_margin(self.get_usable_width() - new_image._width)
new_paragraph = GtkDocParagraph(style)
new_paragraph.add_text('\n'.join(alt))
self._active_element.add_child(new_paragraph)
def insert_toc(self):
"""
Insert a Table of Contents at this point in the document.
"""
self._doc.add_child(GtkDocTableOfContents())
def insert_index(self):
"""
Insert an Alphabetical Index at this point in the document.
"""
self._doc.add_child(GtkDocAlphabeticalIndex())
# DrawDoc implementation
def start_page(self):
# if this is not the first page we need to "close" the previous one
children = self._doc.get_children()
if children and children[-1].get_type() != 'PAGEBREAK':
self._doc.add_child(GtkDocPagebreak())
new_frame_style = FrameStyle(width=self.get_usable_width(),
height=self.get_usable_height())
new_frame = GtkDocFrame(new_frame_style)
self._active_element.add_child(new_frame)
self._active_element = new_frame
def end_page(self):
self._active_element = self._active_element.get_parent()
def draw_line(self, style_name, x1, y1, x2, y2):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_line = GtkDocLine(style, x1, y1, x2, y2)
self._active_element.add_child(new_line)
def draw_path(self, style_name, path):
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_polygon = GtkDocPolygon(style, path)
self._active_element.add_child(new_polygon)
def draw_box(self, style_name, text, x, y, w, h, mark=None):
""" @param mark: IndexMark to use for indexing """
# we handle the box and...
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
new_box = GtkDocBox(style, x, y, w, h)
self._active_element.add_child(new_box)
# ...the text separately
paragraph_style_name = style.get_paragraph_style()
if paragraph_style_name:
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
# horizontal position of the text is not included in the style,
# we assume that it is the size of the shadow, or 0.2mm
if style.get_shadow():
x_offset = style.get_shadow_space()
else:
x_offset = 0.2
new_text = GtkDocText(paragraph_style, 'center',
self.__markup(text),
x + x_offset, y + h / 2, angle=0, mark=mark)
self._active_element.add_child(new_text)
def draw_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_LEFT)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def center_text(self, style_name, text, x, y, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'top',
self.__markup(text), x, y, angle=0, mark=mark)
self._active_element.add_child(new_text)
def rotate_text(self, style_name, text, x, y, angle, mark=None):
""" @param mark: IndexMark to use for indexing """
style_sheet = self.get_style_sheet()
style = style_sheet.get_draw_style(style_name)
paragraph_style_name = style.get_paragraph_style()
paragraph_style = style_sheet.get_paragraph_style(paragraph_style_name)
paragraph_style.set_alignment(PARA_ALIGN_CENTER)
new_text = GtkDocText(paragraph_style, 'center',
self.__markup('\n'.join(text)), x, y, angle, mark)
self._active_element.add_child(new_text)
# paginating and drawing interface
def run(self):
"""Create the physical output from the meta document.
It must be implemented in the subclasses. The idea is that with
different subclass different output could be generated:
e.g. Print, PDF, PS, PNG (which are currently supported by Cairo).
"""
raise NotImplementedError
def paginate_document(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the entire document.
"""
while not self.paginate(layout, page_width, page_height, dpi_x, dpi_y):
pass
def paginate(self, layout, page_width, page_height, dpi_x, dpi_y):
"""Paginate the meta document in chunks.
Only one document level element is handled at one run.
"""
# if first time run than initialize the variables
if not self._elements_to_paginate:
self._elements_to_paginate = self._doc.get_children()[:]
self._pages.append(GtkDocDocument())
self._available_height = page_height
# try to fit the next element to current page, divide it if needed
if not self._elements_to_paginate:
#this is a self._doc where nothing has been added. Empty page.
return True
elem = self._elements_to_paginate.pop(0)
(e1, e2), e1_h = elem.divide(layout,
page_width,
self._available_height,
dpi_x,
dpi_y)
# if (part of) it fits on current page add it
if e1 is not None:
self._pages[len(self._pages) - 1].add_child(e1)
# if elem was divided remember the second half to be processed
if e2 is not None:
self._elements_to_paginate.insert(0, e2)
# calculate how much space left on current page
self._available_height -= e1_h
# start new page if needed
if (e1 is None) or (e2 is not None):
self._pages.append(GtkDocDocument())
self._available_height = page_height
return len(self._elements_to_paginate) == 0
def draw_page(self, page_nr, cr, layout, width, height, dpi_x, dpi_y):
"""Draw a page on a Cairo context.
"""
if DEBUG:
cr.set_line_width(0.1)
cr.set_source_rgb(0, 1.0, 0)
cr.rectangle(0, 0, width, height)
cr.stroke()
self._pages[page_nr].draw(cr, layout, width, dpi_x, dpi_y)
|
gramps-project/gramps
|
gramps/plugins/lib/libcairodoc.py
|
Python
|
gpl-2.0
| 62,883
|
[
"Brian"
] |
6990571cb14bc9c7cbf85f7e5c17d49f9fcfbaee272ebeb1dabf420b904ba8d1
|
#!/usr/bin/env python
from math import sqrt
import gtk
from ase.gui.languages import translate as _
from ase.gui.widgets import pack, help
class Graphs(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
#self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.connect("destroy", lambda w: gtk.main_quit())
#self.window.connect('delete_event', self.exit)
self.set_title('Graphs')
vbox = gtk.VBox()
self.expr = pack(vbox, [gtk.Entry(40),
help('Help for plot ...')])[0]
self.expr.connect('activate', self.plot)
completion = gtk.EntryCompletion()
self.liststore = gtk.ListStore(str)
for s in ['fmax', 's, e-E[0]', 'i, d(0,1)']:
self.liststore.append([s])
completion.set_model(self.liststore)
self.expr.set_completion(completion)
completion.set_text_column(0)
button = pack(vbox, [gtk.Button('Plot'),
gtk.Label(' x, y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'xy')
button = pack(vbox, [gtk.Button('Plot'),
gtk.Label(' y1, y2, ...')])[0]
button.connect('clicked', self.plot, 'y')
button = pack(vbox, gtk.Button(_('clear')))
button.connect('clicked', self.clear)
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def plot(self, button=None, type=None, expr=None):
if expr is None:
expr = self.expr.get_text()
else:
self.expr.set_text(expr)
if expr not in [row[0] for row in self.liststore]:
self.liststore.append([expr])
data = self.gui.images.graph(expr)
import matplotlib
matplotlib.interactive(True)
matplotlib.use('GTK')
#matplotlib.use('GTK', warn=False)# Not avail. in 0.91 (it is in 0.98)
import pylab
pylab.ion()
x = 2.5
self.gui.graphs.append(pylab.figure(figsize=(x * 2.5**0.5, x)))
i = self.gui.frame
m = len(data)
if type is None:
if m == 1:
type = 'y'
else:
type = 'xy'
if type == 'y':
for j in range(m):
pylab.plot(data[j])
pylab.plot([i], [data[j, i]], 'o')
else:
for j in range(1, m):
pylab.plot(data[0], data[j])
pylab.plot([data[0, i]], [data[j, i]], 'o')
pylab.title(expr)
#pylab.show()
python = plot
def clear(self, button):
import pylab
for graph in self.gui.graphs:
pylab.close(graph)
self.gui.graphs = []
|
freephys/python_ase
|
ase/gui/graphs.py
|
Python
|
gpl-3.0
| 2,816
|
[
"ASE"
] |
6ae2a91afc801c1c45f83f5f440a0f2d6fbebc62a06d6a47bd665bb3e157d571
|
import GalaxyCommunication_data_manager
import ProtonCommunication_data_manager
from bioblend.galaxy import GalaxyInstance
import logging
def grou():
return ("this is the init.py script")
|
CARPEM/GalaxyDocker
|
data-manager-hegp/datamanagerpkg/datamanagerpkg/__init__.py
|
Python
|
mit
| 193
|
[
"Galaxy"
] |
76a0632f9abcd9839dca792ac18c75cb910a803c9e32f33cb1596d3e1fa5aa91
|
'''
=================================================================================================
R Cipher Suite
Includes all variants of the R cipher
=================================================================================================
Developed by: ProgramRandom, a division of RandomCorporations
A Page For This Project Will Be Created Soon On lakewood999.github.io
Visit my webpage at: https://lakewood999.github.io -- Note that this is my personal page, not the RandomCorporations page
=================================================================================================
What is the R cipher: This is just a random dipher that I came up with. I will not say this is a good cipher, or perfect cipher, but it's just something I decided to make. The R cipher is an improved version of the Caesar cipher
Root of the name: R cipher
-Well, cipher is just what it is, and R stands for random, or things being randomly generated
=================================================================================================
License:
You are free to use this script free of charge, however, I am not responsible for any types of problems caused by this script. By using this program, you agree to not hold be liable for any charges related to this programe.
You are free to modify, and distribute this software(free of charge), but you are NOT allowed to commercialize this software(sell). Please attribute this program to me if you are sharing it, or re-distributing it
=================================================================================================
Status:
This project is currently a WIP
-Variant "i" of the R cipher comping up
Version: Version 1: The X Update
R Cipher X - Progress: 100%
=================================================================================================
'''
import random
def letterout(x):
out = ""
x = str(x)
if x == "1":
out = "a"
elif x == "2":
out = "b"
elif x == "3":
out = "c"
elif x == "4":
out = "d"
elif x == "5":
out = "e"
elif x == "6":
out = "f"
elif x == "7":
out = "g"
elif x == "8":
out = "h"
elif x == "9":
out = "i"
elif x == "10":
out = "j"
elif x == "11":
out = "k"
elif x == "12":
out = "l"
elif x == "13":
out = "m"
elif x == "14":
out = "n"
elif x == "15":
out = "o"
elif x == "16":
out = "p"
elif x == "17":
out = "q"
elif x == "18":
out = "r"
elif x == "19":
out = "s"
elif x == "20":
out = "t"
elif x == "21":
out = "u"
elif x == "22":
out = "v"
elif x == "23":
out = "w"
elif x == "24":
out = "x"
elif x == "25":
out = "y"
elif x == "26":
out = "z"
return out
#This is script just returns the number depnding on the input--WIP Need to alternate
def numberout(x):
out = ""
if x == "a":
out = "1"
elif x == "":
out = "0"
elif x == "b":
out = "2"
elif x == "c":
out = "3"
elif x == "d":
out = "4"
elif x == "e":
out = "5"
elif x == "f":
out = "6"
elif x == "g":
out = "7"
elif x == "h":
out = "8"
elif x == "i":
out = "9"
elif x == "j":
out = "10"
elif x == "k":
out = "11"
elif x == "l":
out = "12"
elif x == "m":
out = "13"
elif x == "n":
out = "14"
elif x == "o":
out = "15"
elif x == "p":
out = "16"
elif x == "q":
out = "17"
elif x == "r":
out = "18"
elif x == "s":
out = "19"
elif x == "t":
out = "20"
elif x == "u":
out = "21"
elif x == "v":
out = "22"
elif x == "w":
out = "23"
elif x == "x":
out = "24"
elif x == "y":
out = "25"
elif x == "z":
out = "26"
return out
def rcipherx(x):
#This is script just returns the letter depnding on the input
#This is the function that encrypts the text
def encrypt(text):
encrypted = ""
key = ""
totalscan = len(text)
scan = 0
while scan < totalscan:
prekey = random.randint(1, 26)
letter = text[scan]
letternum = numberout(letter)
encryptout = ""
if letternum == "":
encryptout = " "
prekey = ""
else:
lettersum = prekey+int(letternum)
if lettersum > 26:
lettersum = lettersum % 26
encryptout = letterout(lettersum)
if key != "":
if prekey == "":
key = key
else:
key = key + ", " + str(prekey)
else:
if prekey == "":
key = key
else:
key = key + str(prekey)
encrypted += encryptout
scan += 1
print("Your encrypted message: "+encrypted)
print("Here is your key: "+key)
def decrypt(text):
decrypted = ""
key = input("What is the key(Key Numbers Must Be Separated By Commas With Spaces, e.g. 1, 2, 4): ")
keylist = key.split(', ')
print("Warning: Your key length must be equal to the number of characters in the text your are trying to decrypt, or this decryption will be unsuccessful")
totalscan = len(text)
scan = 0
keyscan = 0
while scan < totalscan:
letter = text[scan]
letternum = numberout(letter)
decryptout = ""
if letternum == "":
decryptout = " "
scan = scan +1
else:
decryptout = int(letternum) - int(keylist[keyscan])
if decryptout < 0:
decryptout = letterout(26-abs(decryptout))
else:
decryptout = letterout(decryptout)
scan = scan + 1
keyscan = keyscan+1
decrypted += str(decryptout)
print("Your decrpyted message is: "+decrypted)
print("This message was decrypted with a key of: "+key)
if x == "encrypt":
encrypt(input("Please type in the text you would like to encrypt: "))
elif x == "decrypt":
decrypt(input("Please type in the text you would like to decrypt: "))
#encrypt(input("Please type in the text you would like to encrypt: "))
#decrypt(input("Please type in the text you would like to decrypt: "))
#rcipherx()
|
lakewood999/Ciphers
|
rcipherx.py
|
Python
|
mit
| 5,693
|
[
"VisIt"
] |
4fecddce30c1e6ce7319517db521dde1fde99790573597847154bc1fe85e064f
|
# -*- coding: utf-8 -*-
#
# Buildbot documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 10 15:13:31 2010.
#
# This file is exec()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import pkg_resources
import sys
import textwrap
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
try:
from buildbot.util.raml import RamlSpec
from buildbot.reporters.telegram import TelegramContact
except ImportError:
sys.path.insert(2, os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir))
from buildbot.util.raml import RamlSpec
from buildbot.reporters.telegram import TelegramContact
# -- General configuration -----------------------------------------------
try:
import sphinxcontrib.blockdiag
assert sphinxcontrib.blockdiag
except ImportError:
raise RuntimeError("sphinxcontrib.blockdiag is not installed. "
"Please install documentation dependencies with "
"`pip install buildbot[docs]`")
try:
pkg_resources.require('docutils>=0.8')
except pkg_resources.ResolutionError:
raise RuntimeError("docutils is not installed or has incompatible version. "
"Please install documentation dependencies with `pip "
"install buildbot[docs]`")
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.extlinks',
'bbdocs.ext',
'bbdocs.api_index',
'sphinxcontrib.blockdiag',
'sphinxcontrib.jinja',
'sphinx_rtd_theme',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Buildbot'
copyright = u'Buildbot Team Members'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
if 'VERSION' in os.environ:
version = os.environ['VERSION']
else:
gl = {'__file__': '../buildbot/__init__.py'}
with open('../buildbot/__init__.py') as f:
exec(f.read(), gl)
version = gl['version']
# The full version, including alpha/beta/rc tags.
release = version
# blocksiag/seqdiag
blockdiag_html_image_format = 'svg'
blocdiag_transparency = True
# add a loud note about python 2
rst_prolog = textwrap.dedent("""\
.. caution:: Buildbot no longer supports Python 2.7 on the Buildbot master.
""")
# add a loud note for anyone looking at the latest docs
if release == 'latest':
rst_prolog += textwrap.dedent("""\
.. caution:: This page documents the latest, unreleased version of
Buildbot. For documentation for released versions, see
http://docs.buildbot.net/current/.
""")
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'release-notes/*.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
intersphinx_mapping = {
'python': ('https://python.readthedocs.io/en/latest/', None),
'sqlalchemy': ('https://sqlalchemy.readthedocs.io/en/latest/', None),
}
extlinks = {
'pull': ('https://github.com/buildbot/buildbot/pull/%s', 'pull request '),
'issue': ('https://github.com/buildbot/buildbot/issues/%s', 'issue # '),
# deprecated. Use issue instead, and point to Github
'bug': ('http://trac.buildbot.net/ticket/%s', 'bug #'),
# Renders as link with whole url, e.g.
# :src-link:`master`
# renders as
# "https://github.com/buildbot/buildbot/blob/master/master".
# Explicit title can be used for customizing how link looks like:
# :src-link:`master's directory <master>`
'src-link': ('https://github.com/buildbot/buildbot/tree/master/%s', None),
# "pretty" reference that looks like relative path in Buildbot source tree
# by default.
'src': ('https://github.com/buildbot/buildbot/tree/master/%s', ''),
'contrib-src': ('https://github.com/buildbot/buildbot-contrib/tree/master/%s', ''),
}
# Sphinx' link checker.
linkcheck_ignore = [
# Local URLs:
r'^http://localhost.*',
# Available only to logged-in users:
r'^https://github\.com/settings/applications$',
# Sites which uses SSL that Python 2 can't handle:
r'^https://opensource\.org/licenses/gpl-2.0\.php$',
r'^https://docs\.docker\.com/engine/installation/$',
# Looks like server doesn't like user agent:
r'^https://www\.microsoft\.com/en-us/download/details\.aspx\?id=17657$',
# Example domain.
r'^https?://(.+\.)?example\.org',
# Anchor check fails on rendered user files on GitHub, since GitHub uses
# custom prefix for anchors in user generated content.
r'https://github\.com/buildbot/guanlecoja-ui/tree/master#changelog',
r'http://mesosphere.github.io/marathon/docs/rest-api.html#post-v2-apps',
]
linkcheck_timeout = 10
linkcheck_retries = 3
linkcheck_workers = 20
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {'stickysidebar': 'true'}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [
'_themes'
]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = os.path.join('_images', 'full_logo.png')
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large or a png.
html_favicon = os.path.join('_static', 'icon.png')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['searchbox.html', 'localtoc.html', 'relations.html', 'sourcelink.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
html_use_index = True
html_use_modindex = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Buildbotdoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {}
# The paper size ('letter' or 'a4').
latex_elements['papersize'] = 'a4'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '11pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Buildbot.tex', u'Buildbot Documentation',
u'Brian Warner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = os.path.join('_images', 'header-text-transparent.png')
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# Three possible values for this option (see sphinx config manual) are:
# 1. 'no' - do not display URLs (default)
# 2. 'footnote' - display URLs in footnotes
# 3. 'inline' - display URLs inline in parentheses
latex_show_urls = 'inline'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'buildbot', u'Buildbot Documentation',
[u'Brian Warner'], 1)
]
jinja_contexts = {
"data_api": {'raml': RamlSpec()},
"telegram": {'commands': TelegramContact.describe_commands()},
}
raml_spec = RamlSpec()
for raml_typename, raml_type in sorted(raml_spec.types.items()):
jinja_contexts['data_api_' + raml_typename] = {
'raml': raml_spec,
'name': raml_typename,
'type': raml_type,
}
doc_path = 'developer/raml/{}.rst'.format(raml_typename)
if not os.path.exists(doc_path):
raise Exception('File {} for RAML type {} does not exist'.format(doc_path, raml_typename))
# Spell checker.
try:
import enchant # noqa # pylint: disable=unused-import
except ImportError as ex:
print("enchant module import failed:\n"
"{0}\n"
"Spell checking disabled.".format(ex),
file=sys.stderr)
else:
extensions.append('sphinxcontrib.spelling')
spelling_show_suggestions = True
|
anish/buildbot
|
master/docs/conf.py
|
Python
|
gpl-2.0
| 12,154
|
[
"Brian"
] |
5a4379da1a0dbe5f4b85b6543c310c7886764d1630f776eda354cb77c6cf8ca4
|
from django.db import IntegrityError
from experiments.models import Enrollment
from experiments.manager import experiment_manager
from experiments.dateutils import now, fix_awareness, datetime_from_timestamp, timestamp_from_datetime
from experiments.signals import user_enrolled
from experiments.experiment_counters import ExperimentCounter
from experiments import conf
from collections import namedtuple
from datetime import timedelta
import collections
import numbers
import logging
import json
logger = logging.getLogger('experiments')
def participant(request=None, session=None, user=None):
# This caches the experiment user on the request object because AuthenticatedUser can involve database lookups that
# it caches. Signals are attached to login/logout to clear the cache using clear_participant_cache
if request and hasattr(request, '_experiments_user'):
return request._experiments_user
else:
result = _get_participant(request, session, user)
if request:
request._experiments_user = result
return result
def clear_participant_cache(request):
if hasattr(request, '_experiments_user'):
del request._experiments_user
def _get_participant(request, session, user):
if request and hasattr(request, 'user') and not user:
user = request.user
if request and hasattr(request, 'session') and not session:
session = request.session
if request and conf.BOT_REGEX.search(request.META.get("HTTP_USER_AGENT", "")):
return DummyUser()
elif user and user.is_authenticated():
if getattr(user, 'is_confirmed_human', True):
return AuthenticatedUser(user, request)
else:
return DummyUser()
elif session:
return SessionUser(session, request)
else:
return DummyUser()
EnrollmentData = namedtuple('EnrollmentData', ['experiment', 'alternative', 'enrollment_date', 'last_seen'])
class WebUser(object):
"""Represents a user (either authenticated or session based) which can take part in experiments"""
def __init__(self):
self.experiment_counter = ExperimentCounter()
def enroll(self, experiment_name, alternatives, force_alternative=None):
"""
Enroll this user in the experiment if they are not already part of it. Returns the selected alternative
force_alternative: Optionally force a user in an alternative at enrollment time
"""
chosen_alternative = conf.CONTROL_GROUP
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
if experiment.is_displaying_alternatives():
if isinstance(alternatives, collections.Mapping):
if conf.CONTROL_GROUP not in alternatives:
experiment.ensure_alternative_exists(conf.CONTROL_GROUP, 1)
for alternative, weight in alternatives.items():
experiment.ensure_alternative_exists(alternative, weight)
else:
alternatives_including_control = alternatives + [conf.CONTROL_GROUP]
for alternative in alternatives_including_control:
experiment.ensure_alternative_exists(alternative)
assigned_alternative = self._get_enrollment(experiment)
if assigned_alternative:
chosen_alternative = assigned_alternative
elif experiment.is_accepting_new_users():
if force_alternative:
chosen_alternative = force_alternative
else:
chosen_alternative = experiment.random_alternative()
self._set_enrollment(experiment, chosen_alternative)
else:
chosen_alternative = experiment.default_alternative
return chosen_alternative
def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP
def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative)
def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count)
def confirm_human(self):
"""Mark that this is a real human being (not a bot) and thus results should be counted"""
pass
def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment)
def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
def _get_enrollment(self, experiment):
"""Get the name of the alternative this user is enrolled in for the specified experiment
`experiment` is an instance of Experiment. If the user is not currently enrolled returns None."""
raise NotImplementedError
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented."""
raise NotImplementedError
def is_enrolled(self, experiment_name, alternative):
"""Enroll this user in the experiment if they are not already part of it. Returns the selected alternative"""
"""Test if the user is enrolled in the supplied alternative for the given experiment.
The supplied alternative will be added to the list of possible alternatives for the
experiment if it is not already there. If the user is not yet enrolled in the supplied
experiment they will be enrolled, and an alternative chosen at random."""
chosen_alternative = self.enroll(experiment_name, [alternative])
return alternative == chosen_alternative
def _participant_identifier(self):
"Unique identifier for this user in the counter store"
raise NotImplementedError
def _get_all_enrollments(self):
"Return experiment, alternative tuples for all experiments the user is enrolled in"
raise NotImplementedError
def _cancel_enrollment(self, experiment):
"Remove the enrollment and any goals the user has against this experiment"
raise NotImplementedError
def _experiment_goal(self, experiment, alternative, goal_name, count):
"Record a goal against a particular experiment and alternative"
raise NotImplementedError
def _set_last_seen(self, experiment, last_seen):
"Set the last time the user was seen associated with this experiment"
raise NotImplementedError
class DummyUser(WebUser):
def _get_enrollment(self, experiment):
return None
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
pass
def is_enrolled(self, experiment_name, alternative):
return alternative == conf.CONTROL_GROUP
def incorporate(self, other_user):
for enrollment in other_user._get_all_enrollments():
other_user._cancel_enrollment(enrollment.experiment)
def _participant_identifier(self):
return ""
def _get_all_enrollments(self):
return []
def _is_enrolled_in_experiment(self, experiment):
return False
def _cancel_enrollment(self, experiment):
pass
def _get_goal_counts(self, experiment, alternative):
return {}
def _experiment_goal(self, experiment, alternative, goal_name, count):
pass
def _set_last_seen(self, experiment, last_seen):
pass
class AuthenticatedUser(WebUser):
def __init__(self, user, request=None):
self._enrollment_cache = {}
self.user = user
self.request = request
super(AuthenticatedUser, self).__init__()
def _get_enrollment(self, experiment):
if experiment.name not in self._enrollment_cache:
try:
self._enrollment_cache[experiment.name] = Enrollment.objects.get(user=self.user, experiment=experiment).alternative
except Enrollment.DoesNotExist:
self._enrollment_cache[experiment.name] = None
return self._enrollment_cache[experiment.name]
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
if experiment.name in self._enrollment_cache:
del self._enrollment_cache[experiment.name]
try:
enrollment, _ = Enrollment.objects.get_or_create(user=self.user, experiment=experiment, defaults={'alternative': alternative})
except IntegrityError:
# Already registered (db race condition under high load)
return
# Update alternative if it doesn't match
enrollment_changed = False
if enrollment.alternative != alternative:
enrollment.alternative = alternative
enrollment_changed = True
if enrollment_date:
enrollment.enrollment_date = enrollment_date
enrollment_changed = True
if last_seen:
enrollment.last_seen = last_seen
enrollment_changed = True
if enrollment_changed:
enrollment.save()
self.experiment_counter.increment_participant_count(experiment, alternative, self._participant_identifier())
user_enrolled.send(self, experiment=experiment.name, alternative=alternative, user=self.user, session=None)
def _participant_identifier(self):
return 'user:%d' % (self.user.pk, )
def _get_all_enrollments(self):
enrollments = Enrollment.objects.filter(user=self.user).select_related("experiment")
if enrollments:
for enrollment in enrollments:
yield EnrollmentData(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
def _cancel_enrollment(self, experiment):
try:
enrollment = Enrollment.objects.get(user=self.user, experiment=experiment)
except Enrollment.DoesNotExist:
pass
else:
self.experiment_counter.remove_participant(experiment, enrollment.alternative, self._participant_identifier())
enrollment.delete()
def _experiment_goal(self, experiment, alternative, goal_name, count):
self.experiment_counter.increment_goal_count(experiment, alternative, goal_name, self._participant_identifier(), count)
def _set_last_seen(self, experiment, last_seen):
Enrollment.objects.filter(user=self.user, experiment=experiment).update(last_seen=last_seen)
def _session_enrollment_latest_version(data):
try:
alternative, unused, enrollment_date, last_seen = data
if isinstance(enrollment_date, numbers.Number):
enrollment_date = datetime_from_timestamp(enrollment_date)
if isinstance(last_seen, numbers.Number):
last_seen = datetime_from_timestamp(last_seen)
if last_seen:
last_seen = fix_awareness(last_seen)
except ValueError: # Data from previous version
alternative, unused = data
enrollment_date = None
last_seen = None
return alternative, unused, enrollment_date, last_seen
class SessionUser(WebUser):
def __init__(self, session, request=None):
self.session = session
self.request = request
super(SessionUser, self).__init__()
def _get_enrollment(self, experiment):
enrollments = self.session.get('experiments_enrollments', None)
if enrollments and experiment.name in enrollments:
alternative, _, _, _ = _session_enrollment_latest_version(enrollments[experiment.name])
return alternative
return None
def _set_enrollment(self, experiment, alternative, enrollment_date=None, last_seen=None):
enrollments = self.session.get('experiments_enrollments', {})
enrollments[experiment.name] = (alternative, None, timestamp_from_datetime(enrollment_date or now()), timestamp_from_datetime(last_seen))
self.session['experiments_enrollments'] = enrollments
if self._is_verified_human():
self.experiment_counter.increment_participant_count(experiment, alternative, self._participant_identifier())
else:
logger.info(json.dumps({'type':'participant_unconfirmed', 'experiment': experiment.name, 'alternative': alternative, 'participant': self._participant_identifier()}))
user_enrolled.send(self, experiment=experiment.name, alternative=alternative, user=None, session=self.session)
def confirm_human(self):
self.session[conf.CONFIRM_HUMAN_SESSION_KEY] = True
logger.info(json.dumps({'type': 'confirm_human', 'participant': self._participant_identifier()}))
# Replay enrollments
for enrollment in self._get_all_enrollments():
self.experiment_counter.increment_participant_count(enrollment.experiment, enrollment.alternative, self._participant_identifier())
# Replay goals
if 'experiments_goals' in self.session:
try:
for experiment_name, alternative, goal_name, count in self.session['experiments_goals']:
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self.experiment_counter.increment_goal_count(experiment, alternative, goal_name, self._participant_identifier(), count)
except ValueError:
pass # Values from older version
finally:
del self.session['experiments_goals']
def _participant_identifier(self):
if 'experiments_session_key' not in self.session:
if not self.session.session_key:
self.session.save() # Force session key
self.session['experiments_session_key'] = self.session.session_key
return 'session:%s' % (self.session['experiments_session_key'], )
def _is_verified_human(self):
if conf.VERIFY_HUMAN:
return self.session.get(conf.CONFIRM_HUMAN_SESSION_KEY, False)
else:
return True
def _get_all_enrollments(self):
enrollments = self.session.get('experiments_enrollments', None)
if enrollments:
for experiment_name, data in list(enrollments.items()):
alternative, _, enrollment_date, last_seen = _session_enrollment_latest_version(data)
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
yield EnrollmentData(experiment, alternative, enrollment_date, last_seen)
def _cancel_enrollment(self, experiment):
alternative = self._get_enrollment(experiment)
if alternative:
self.experiment_counter.remove_participant(experiment, alternative, self._participant_identifier())
enrollments = self.session.get('experiments_enrollments', None)
del enrollments[experiment.name]
self.session['experiments_enrollments'] = enrollments
def _experiment_goal(self, experiment, alternative, goal_name, count):
if self._is_verified_human():
self.experiment_counter.increment_goal_count(experiment, alternative, goal_name, self._participant_identifier(), count)
else:
goals = self.session.get('experiments_goals', [])
goals.append((experiment.name, alternative, goal_name, count))
self.session['experiments_goals'] = goals
logger.info(json.dumps({'type': 'goal_hit_unconfirmed', 'goal': goal_name, 'goal_count': count, 'experiment': experiment.name, 'alternative': alternative, 'participant': self._participant_identifier()}))
def _set_last_seen(self, experiment, last_seen):
enrollments = self.session.get('experiments_enrollments', {})
alternative, unused, enrollment_date, _ = _session_enrollment_latest_version(enrollments[experiment.name])
enrollments[experiment.name] = (alternative, unused, timestamp_from_datetime(enrollment_date), timestamp_from_datetime(last_seen))
self.session['experiments_enrollments'] = enrollments
__all__ = ['participant']
|
bjarnoldus/django-experiments
|
experiments/utils.py
|
Python
|
mit
| 20,243
|
[
"VisIt"
] |
5f07cddd33ca3d8ea99c62b7bc8e41be9a7a1601cb2d7286543a1ee6a558523a
|
#! /usr/bin/env python3
# Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Read and write various settings.')
parser.add_argument('-r', '--revision', dest='revision', type=int, default=3, help='HashFast board major revision number')
parser.add_argument('-d', '--read-die-settings', dest='read_die_settings', action='store_true', help='read die settings')
parser.add_argument('-w', '--write-die-settings', dest='write_die_settings', type=str, nargs=4, metavar=('DIE:mVLT@FRQ'), help='write die settings')
return parser.parse_args()
if __name__ == '__main__':
# parse args before other imports
args = parse_args()
import sys
from hf.usb import usbbulk
from hf.load import talkusb
from hf.load.routines import settings
def main(args):
# query device
dev = usbbulk.poll_hf_bulk_device()
print (dev.info())
print (dev.init())
# Fix: talkusb patch
talkusb.epr = dev.epr
talkusb.epw = dev.epw
#talkusb.talkusb(hf.INIT, None, 0)
def printmsg(msg):
print(msg)
setter = settings.SettingsRoutine(talkusb.talkusb, 1, printmsg)
if args.read_die_settings:
setter.global_state = 'read'
while setter.one_cycle():
pass
if args.write_die_settings is not None:
if args.revision is 3:
setter.set_reference(25)
else:
setter.set_reference(125)
die_settings = args.write_die_settings
for die_setting in die_settings:
die, setting = die_setting.split(':')
vlt, frq = setting.split('@')
setter.setup(int(die), int(frq), int(vlt))
while setter.one_cycle():
pass
if __name__ == "__main__":
main(args)
|
HashFast/hashfast-tools
|
hftool.py
|
Python
|
bsd-3-clause
| 3,207
|
[
"EPW"
] |
b636c93029689978dc06451922848ae82b631ca4170f5fdaa2ed440fee0a3139
|
"""
################################################################################
#
# SOAPpy - Cayce Ullman ([email protected])
# Brian Matthews ([email protected])
# Gregory Warnes ([email protected])
# Christopher Blunck ([email protected])
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
#import xml.sax
import re
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
ident = '$Id: Server.py,v 1.21 2005/02/15 16:32:22 warnes Exp $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
if Config.specialArgs:
ordered_args = {}
named_args = {}
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset="%s"' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
|
krux/adspygoogle
|
adspygoogle/SOAPpy/Server.py
|
Python
|
apache-2.0
| 27,092
|
[
"Brian"
] |
dec22cfa49f0c3aa969ea623a179653b452cf9bd3bb408fcc33c60f8efa4efaf
|
#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to document the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very different for each subclass
"""
pass
@classmethod
def load_grid(cls, grid):
"""
load a data set from an arbitrary source
:param grid: the grid you want to load. This could be:
* A string with a full path to a file, which could be any
supported file type -- probably netcdf
* a netcdf4 Dataset object
* a OpenDAP url
* possibley other supported APIs.
:returns: returns an appropriate Grid object.
This method will attempt to identify the type of the input grid, and
call the appropriate loading methods.
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
@abstractmethod
def subset_rect(self, bounding_box):
"""
subset the grid to the cells contained by the bounding box specified
in lon-lat coordinates
:param bounding_box: The bounding box, specified in lon-lat coords:
( (min_lon, min_lat), (max_lon, max_lat))
:type bounding_box: a 2x2 numpy array of float64, or any sequence that
can be turned into one.
:returns: a new APIRUS object, of the same type as self.
"""
pass
@abstractmethod
def subset_poly(self, polygon):
"""
Subset the grid to the cells contained by an arbitrary polygon,
specified in lon-lat coordinates
:param polygon: The polygon, specified in lon-lat coords:
( (lon1, lat1), (lon2, lat2), ..... )
:type bounding_box: an Nx2 numpy array of float64, or any sequence that
can be turned into one.
:returns: a new APIRUS object, of the same type as self.
bounds are specified in lat-lon coords
"""
pass
@abstractmethod
def to_ugrid(self):
"""
return the same dataset, reformatted as a ugrid object. No
interpolation or transformation is done.
If self is already a ugrid object, it is returned unaltered
If self can not be lovelessly converted, a TypeError is raised.
"""
pass
|
ioos/APIRUS
|
apirus_abc.py
|
Python
|
cc0-1.0
| 3,009
|
[
"NetCDF"
] |
1f95c629338da0e2ec54a7a12fed900ba2a72153e369a733847b6e65bcddd14f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package implements various grain boundary analyses
"""
|
gVallverdu/pymatgen
|
pymatgen/analysis/gb/__init__.py
|
Python
|
mit
| 174
|
[
"pymatgen"
] |
31d92203566360a61f2612416f2f176d13a1796db50a9215ba649d4bc65f0b4a
|
"""
Test the Hartree-Fock Objective function
1. Hamiltonian expectation
2. The gradient values (local)
3. The Hessian values (local)
The local gradient is the gradient assuming $kappa = 0$. This is the case
in most electronic structure codes because the Hamiltonian (or orbitals)
are always rotated to the $kappa=0$ point after updating the parameters in
$kappa$.
"""
from itertools import product
import cirq
import numpy as np
import scipy as sp
import openfermion as of
from openfermioncirq.experiments.hfvqe.circuits import \
prepare_slater_determinant
from openfermioncirq.experiments.hfvqe.objective import get_matrix_of_eigs
from openfermioncirq.experiments.hfvqe.circuits import rhf_params_to_matrix
from openfermioncirq.experiments.hfvqe.molecular_example import make_h6_1_3
def get_opdm(wf, num_orbitals, transform=of.jordan_wigner):
opdm_hw = np.zeros((num_orbitals, num_orbitals),
dtype=np.complex128)
creation_ops = [
of.get_sparse_operator(transform(of.FermionOperator(((p, 1)))),
n_qubits=num_orbitals)
for p in range(num_orbitals)
]
# not using display style objects
for p, q in product(range(num_orbitals), repeat=2):
operator = creation_ops[p] @ creation_ops[q].conj().transpose()
opdm_hw[p, q] = wf.conj().T @ operator @ wf
return opdm_hw
def test_global_gradient_h4():
"""
Test the gradient at the solution given by psi4
"""
# first get molecule
rhf_objective, molecule, params, _, _ = make_h6_1_3()
# molecule = h4_linear_molecule(1.0)
nocc = molecule.n_electrons // 2
occ = list(range(nocc))
virt = list(range(nocc, molecule.n_orbitals))
qubits = cirq.LineQubit.range(molecule.n_orbitals)
u = sp.linalg.expm(rhf_params_to_matrix(params,
len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(prepare_slater_determinant(qubits, u[:, :nocc].T))
simulator = cirq.Simulator(dtype=np.complex128)
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_alpha = get_opdm(wf, molecule.n_orbitals)
opdm = np.zeros((molecule.n_qubits, molecule.n_qubits), dtype=np.complex128)
opdm[::2, ::2] = opdm_alpha
opdm[1::2, 1::2] = opdm_alpha
grad = rhf_objective.global_gradient_opdm(params, opdm_alpha)
# get finite difference gradient
finite_diff_grad = np.zeros(9)
epsilon = 0.0001
for i in range(9):
params_epsilon = params.copy()
params_epsilon[i] += epsilon
u = sp.linalg.expm(rhf_params_to_matrix(params_epsilon, len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(
prepare_slater_determinant(qubits, u[:, :nocc].T))
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_pepsilon = get_opdm(wf, molecule.n_orbitals)
energy_plus_epsilon = rhf_objective.energy_from_opdm(opdm_pepsilon)
params_epsilon[i] -= 2 * epsilon
u = sp.linalg.expm(rhf_params_to_matrix(params_epsilon,
len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(
prepare_slater_determinant(qubits, u[:, :nocc].T))
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_pepsilon = get_opdm(wf, molecule.n_orbitals)
energy_minus_epsilon = rhf_objective.energy_from_opdm(opdm_pepsilon)
finite_diff_grad[i] = (energy_plus_epsilon -
energy_minus_epsilon) / (2 * epsilon)
assert np.allclose(finite_diff_grad, grad, atol=epsilon)
# random parameters now
params = np.random.randn(9)
u = sp.linalg.expm(rhf_params_to_matrix(params,
len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(prepare_slater_determinant(qubits, u[:, :nocc].T))
simulator = cirq.Simulator(dtype=np.complex128)
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_alpha = get_opdm(wf, molecule.n_orbitals)
opdm = np.zeros((molecule.n_qubits, molecule.n_qubits), dtype=np.complex128)
opdm[::2, ::2] = opdm_alpha
opdm[1::2, 1::2] = opdm_alpha
grad = rhf_objective.global_gradient_opdm(params, opdm_alpha)
# get finite difference gradient
finite_diff_grad = np.zeros(9)
epsilon = 0.0001
for i in range(9):
params_epsilon = params.copy()
params_epsilon[i] += epsilon
u = sp.linalg.expm(rhf_params_to_matrix(params_epsilon, len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(
prepare_slater_determinant(qubits, u[:, :nocc].T))
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_pepsilon = get_opdm(wf, molecule.n_orbitals)
energy_plus_epsilon = rhf_objective.energy_from_opdm(opdm_pepsilon)
params_epsilon[i] -= 2 * epsilon
u = sp.linalg.expm(rhf_params_to_matrix(params_epsilon,
len(qubits),
occ=occ,
virt=virt))
circuit = cirq.Circuit(
prepare_slater_determinant(qubits, u[:, :nocc].T))
wf = simulator.simulate(circuit).final_state.reshape((-1, 1))
opdm_pepsilon = get_opdm(wf, molecule.n_orbitals)
energy_minus_epsilon = rhf_objective.energy_from_opdm(opdm_pepsilon)
finite_diff_grad[i] = (energy_plus_epsilon -
energy_minus_epsilon) / (2 * epsilon)
assert np.allclose(finite_diff_grad, grad, atol=epsilon)
def test_get_matrix_of_eigs():
"""
Generate the matrix of [exp(i (li - lj)) - 1] / (i(li - lj)
:return:
"""
lam_vals = np.random.randn(4) + 1j * np.random.randn(4)
mat_eigs = np.zeros((lam_vals.shape[0],
lam_vals.shape[0]),
dtype=np.complex128)
for i, j in product(range(lam_vals.shape[0]), repeat=2):
if np.isclose(abs(lam_vals[i] - lam_vals[j]), 0):
mat_eigs[i, j] = 1
else:
mat_eigs[i, j] = (np.exp(1j * (lam_vals[i] - lam_vals[j])) - 1) / (
1j * (lam_vals[i] - lam_vals[j]))
test_mat_eigs = get_matrix_of_eigs(lam_vals)
assert np.allclose(test_mat_eigs, mat_eigs)
|
quantumlib/OpenFermion-Cirq
|
openfermioncirq/experiments/hfvqe/objective_test.py
|
Python
|
apache-2.0
| 6,832
|
[
"Psi4"
] |
6bc43f2cb8df2e47980318bc5e2fafb85723833e493ea684795eb51f0671c34b
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
from __future__ import division, absolute_import, print_function
import json
import re
import six
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
class BeatportAPIError(Exception):
pass
class BeatportObject(object):
def __init__(self, data):
self.beatport_id = data['id']
self.name = six.text_type(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], six.text_type(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [six.text_type(x['name'])
for x in data['genres']]
class BeatportClient(object):
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"http://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=['fieldType:{0}'.format(release_type)])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e.message))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
@six.python_2_unicode_compatible
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return u"<BeatportRelease: {0} - {1} ({2})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return six.text_type(self).encode('utf8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "http://beatport.com/release/{0}/{1}".format(
data['slug'], data['id'])
@six.python_2_unicode_compatible
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return (u"<BeatportTrack: {0} - {1} ({2})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return six.text_type(self).encode('utf8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = six.text_type(data['title'])
if 'mixName' in data:
self.mix_name = six.text_type(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "http://beatport.com/track/{0}/{1}".format(data['slug'],
data['id'])
self.track_number = data.get('trackNumber')
class BeatportPlugin(BeetsPlugin):
def __init__(self):
super(BeatportPlugin, self).__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'communication with Beatport failed')
beets.ui.print_(u"To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_(u"Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'Beatport token request failed')
# Save the token for later use.
self._log.debug(u'Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the beatport source weight and the maximum source weight
for albums.
"""
dist = Distance()
if album_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def track_distance(self, item, track_info):
"""Returns the beatport source weight and the maximum source weight
for individual tracks.
"""
dist = Distance()
if track_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, release, va_likely):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = '%s %s' % (artist, release)
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = '%s %s' % (artist, title)
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the release is not found.
"""
self._log.debug(u'Searching for release {0}', release_id)
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
return None
release = self.client.get_release(match.group(2))
album = self._get_album_info(release)
return album
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not found.
"""
self._log.debug(u'Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
return None
bp_track = self.client.get_track(match.group(2))
track = self._get_track_info(bp_track)
return track
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = u"Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media=u'Digital',
data_source=u'Beatport', data_url=release.url)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != u"Original Mix":
title += u" ({0})".format(track.mix_name)
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=u'Beatport', data_url=track.url)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
artist_id = None
bits = []
for artist in artists:
if not artist_id:
artist_id = artist[0]
name = artist[1]
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
bits.append(name)
artist = ', '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks
|
jetskijoe/headphones
|
lib/beetsplug/beatport.py
|
Python
|
gpl-3.0
| 18,360
|
[
"VisIt"
] |
6eef24014f58b83340ce93ca00139e0acab082505b7dcbf1fae5941472c894ba
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
BOOLEANS_TRUE = ['y', 'yes', 'on', '1', 'true', 1, True]
BOOLEANS_FALSE = ['n', 'no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
SIZE_RANGES = { 'Y': 1<<80, 'Z': 1<<70, 'E': 1<<60, 'P': 1<<50, 'T': 1<<40, 'G': 1<<30, 'M': 1<<20, 'K': 1<<10, 'B': 1 }
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import pipes
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG=True
except ImportError:
HAS_SYSLOG=False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX=False
try:
import selinux
HAVE_SELINUX=True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
try:
from collections import Sequence, Mapping
except ImportError:
# python2.5
Sequence = (list, tuple)
Mapping = (dict,)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, KeysView)
except:
SEQUENCETYPE = Sequence
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (PY2, PY3, b, binary_type, integer_types,
iteritems, text_type, string_types)
from ansible.module_utils.six.moves import map, reduce
from ansible.module_utils._text import to_native, to_bytes, to_text
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS=dict(
src = dict(),
mode = dict(type='raw'),
owner = dict(),
group = dict(),
seuser = dict(),
serole = dict(),
selevel = dict(),
setype = dict(),
follow = dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content = dict(no_log=True),
backup = dict(),
force = dict(),
remote_src = dict(), # used by assemble
regexp = dict(), # used by assemble
delimiter = dict(), # used by assemble
directory_mode = dict(), # used by copy
unsafe_writes = dict(type='bool'), # should be available to any module using atomic_move
attributes = dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Can't use 07777 on Python 3, can't use 0o7777 on Python 2.4
PERM_BITS = int('07777', 8) # file mode permission bits
EXEC_PERM_BITS = int('00111', 8) # execute permission bits
DEFAULT_PERM = int('0666', 8) # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch','alpine')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, encoding='utf-8', errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, encoding='utf-8', errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_or_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_or_replace')
else:
value = native_str_value
elif isinstance(value, SEQUENCETYPE):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
return dict((k, remove_values(v, no_log_strings)) for k, v in value.items())
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size)/ limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key,attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) #initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.no_log = no_log
self.cleanup_files = []
self._debug = False
self._diff = False
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self._passthrough = ['warnings', 'deprecations']
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception:
e = get_exception()
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % str(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in self.argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = self.params.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in self.params:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc,out,err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, filename):
filename = os.path.expanduser(os.path.expandvars(filename))
b_filename = to_bytes(filename, errors='surrogate_or_strict')
st = os.lstat(b_filename)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path = os.path.realpath(os.path.expanduser(os.path.expandvars(path)))
while not os.path.ismount(path):
path = os.path.dirname(path)
return path
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path),
str(':'.join(new_context)))
except OSError:
e = get_exception()
self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(path)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception:
e = get_exception()
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=str(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError:
e = get_exception()
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise e
except Exception:
e = get_exception()
self.fail_json(path=path, msg='chmod failed', details=str(e))
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags','') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except:
e = get_exception()
self.fail_json(path=path, msg='chattr failed', details=str(e))
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-','').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
new_mode = stat.S_IMODE(path_stat.st_mode)
mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst-]*|[ugo])$')
for mode in symbolic_mode.split(','):
match = mode_re.match(mode)
if match:
users = match.group('users')
operator = match.group('operator')
perms = match.group('perms')
if users == 'a':
users = 'ugo'
for user in users:
mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
else:
raise ValueError("bad symbolic permission for mode: %s" % mode)
return new_mode
def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH}
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0}
}
user_perms_to_modes = {
'u': {
'r': stat.S_IRUSR,
'w': stat.S_IWUSR,
'x': stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6 },
'g': {
'r': stat.S_IRGRP,
'w': stat.S_IWGRP,
'x': stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3 },
'o': {
'r': stat.S_IROTH,
'w': stat.S_IWOTH,
'x': stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO }
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def set_file_attributes_if_different(self, file_args, changed, diff=None):
return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception:
e = get_exception()
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in self.params:
self.params[k] = self.params[alias]
return aliases_results
def _check_arguments(self, check_invalid_arguments):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
for (k,v) in list(self.params.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif check_invalid_arguments and k not in self._legal_inputs:
unsupported_parameters.add(k)
#clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
self.fail_json(msg="Unsupported parameters for (%s) module: %s. Supported parameters include: %s" % (self._name,
','.join(sorted(list(unsupported_parameters))),
','.join(sorted(self.argument_spec.keys()))))
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check):
count = 0
for term in check:
if term in self.params:
count += 1
return count
def _check_mutually_exclusive(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count > 1:
self.fail_json(msg="parameters are mutually exclusive: %s" % (check,))
def _check_required_one_of(self, spec):
if spec is None:
return
for check in spec:
count = self._count_terms(check)
if count == 0:
self.fail_json(msg="one of the following is required: %s" % ','.join(check))
def _check_required_together(self, spec):
if spec is None:
return
for check in spec:
counts = [ self._count_terms([field]) for field in check ]
non_zero = [ c for c in counts if c > 0 ]
if len(non_zero) > 0:
if 0 in counts:
self.fail_json(msg="parameters are required together: %s" % (check,))
def _check_required_arguments(self):
''' ensure all required arguments are present '''
missing = []
for (k,v) in self.argument_spec.items():
required = v.get('required', False)
if required and k not in self.params:
missing.append(k)
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
def _check_required_if(self, spec):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in self.params and self.params[key] == val:
for check in requirements:
count = self._count_terms((check,))
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.items():
choices = v.get('choices',None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in self.params:
if self.params[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if self.params[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
FALSEY = frozenset(BOOLEANS_FALSE)
overlap = FALSEY.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(self.params[k],) = overlap
if self.params[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
TRUTHY = frozenset(BOOLEANS_TRUE)
overlap = TRUTHY.intersection(choices)
if len(overlap) == 1:
(self.params[k],) = overlap
if self.params[k] not in choices:
choices_str=",".join([to_native(c) for c in choices])
msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
self.fail_json(msg=msg)
else:
self.fail_json(msg="internal error: choices for argument %s are not iterable: %s" % (k, choices))
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception:
e = get_exception()
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [ str(value) ]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return json.dumps(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _check_argument_types(self):
''' ensure all arguments have the requested type '''
for (k, v) in self.argument_spec.items():
wanted = v.get('type', None)
if k not in self.params:
continue
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if self.params[k] is None:
continue
wanted = 'str'
value = self.params[k]
if value is None:
continue
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
try:
self.params[k] = type_checker(value)
except (TypeError, ValueError):
e = get_exception()
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" % (k, type(value), wanted, e))
def _set_defaults(self, pre=True):
for (k,v) in self.argument_spec.items():
default = v.get('default', None)
if pre == True:
# this prevents setting defaults on required items
if default is not None and k not in self.params:
self.params[k] = default
else:
# make sure things without a default still get set None
if k not in self.params:
self.params[k] = default
def _set_fallbacks(self):
for k,v in self.argument_spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in self.params and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
self.params[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
passwd_keys = ['password', 'login_password']
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
elif param in passwd_keys:
log_args[param] = 'NOT_LOGGING_PASSWORD'
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = []
for arg in log_args:
arg_val = log_args[arg]
if not isinstance(arg_val, (text_type, binary_type)):
arg_val = str(arg_val)
elif isinstance(arg_val, text_type):
arg_val = arg_val.encode('utf-8')
msg.append('%s=%s' % (arg, arg_val))
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK|os.R_OK):
raise
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK|os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None or isinstance(arg, bool):
return arg
if isinstance(arg, string_types):
arg = arg.lower()
if arg in BOOLEANS_TRUE:
return True
elif arg in BOOLEANS_FALSE:
return False
else:
self.fail_json(msg='%s is not a valid boolean. Valid booleans include: %s' % (to_text(arg), ','.join(['%s' % x for x in BOOLEANS])))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
self.warn(d)
else:
self.warn(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
if not 'changed' in kwargs:
kwargs['changed'] = False
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename-YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
shutil.copy2(fn, backupdest)
except (shutil.Error, IOError):
e = get_exception()
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError:
e = get_exception()
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError:
e = get_exception()
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError):
e = get_exception()
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp( prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError):
e = get_exception()
self.fail_json(msg='The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError:
e = get_exception()
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError):
e = get_exception()
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, e), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError):
e = get_exception()
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, e), exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
shell = False
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([pipes.quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [ os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None ]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths \
if not x.endswith('/ansible_modlib.zip') \
and not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in to_clean_args:
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError):
e = get_exception()
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception:
e = get_exception()
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
emersonsoftware/ansiblefork
|
lib/ansible/module_utils/basic.py
|
Python
|
gpl-3.0
| 98,105
|
[
"VisIt"
] |
fa23e4b59b4bc33c9051499a217e8f0080ba249274df128b288f019ac6d0a957
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import hyperspy as hs
from distutils.version import LooseVersion
import logging
from hyperspy.defaults_parser import preferences
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
nans = np.isnan(data)
if nans.any():
data = data[~nans]
vmin = np.percentile(data, saturated_pixels / 2.)
vmax = np.percentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
fig.canvas.set_window_title(window_title)
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif navigator is "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator is "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator is "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Parameters
----------
images : list
`images` should be a list of Signals (Images) to plot
If any signal is not an image, a ValueError will be raised
multi-dimensional images will have each plane plotted as a separate
image
cmap : matplotlib colormap, optional
The colormap used for the images, by default read from pyplot
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: scalar
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
if isinstance(images, BaseSignal) and len(images) is 1:
images.plot()
ax = plt.gca()
return ax
elif not isinstance(images, (list, tuple, BaseSignal)):
raise ValueError("images must be a list of image signals or "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# Get default colormap from pyplot:
if cmap is None:
cmap = plt.get_cmap().name
elif isinstance(cmap, mpl.colors.Colormap):
cmap = cmap.name
if centre_colormap == "auto":
if cmap in MPL_DIVERGING_COLORMAPS:
centre_colormap = True
else:
centre_colormap = False
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
if isinstance(vmin, list):
if len(vmin) != n:
_logger.warning('The provided vmin values are ignored because the '
'length of the list does not match the number of '
'images')
vmin = [None] * n
else:
vmin = [vmin] * n
if isinstance(vmax, list):
if len(vmax) != n:
_logger.warning('The provided vmax values are ignored because the '
'length of the list does not match the number of '
'images')
vmax = [None] * n
else:
vmax = [vmax] * n
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label is 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label is 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) is 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar is 'single':
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), saturated_pixels)
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if centre_colormap:
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
idx += 1
ax = f.add_subplot(rows, per_row, idx)
axes_list.append(ax)
data = im.data
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(data, saturated_pixels)
l_vmin = vmin[idx - 1] if vmin[idx - 1] is not None else l_vmin
l_vmax = vmax[idx - 1] if vmax[idx - 1] is not None else l_vmax
if centre_colormap:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
print('Did not understand aspect ratio input. '
'Using \'auto\' as default.')
aspect = 'auto'
if aspect is 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect is 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect is 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar is 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cmap, extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cmap,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor is 'all':
warnings.warn(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx - 1]
elif user_labels:
title = label_list[idx - 1]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar is 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx - 1 in scalebar) or scalebar is 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar is 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar is 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor is 'off':
ax.axis('off')
elif axes_decor is 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor is 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : iterable object
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(figure=fig)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(figure='last'):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
figure: 'last' | matplotlib.figure
If 'last' pick the last figure
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if figure == 'last':
figure = plt.gcf()
ax = plt.gca()
else:
ax = figure.axes[0]
lines = ax.lines
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
figure.canvas.draw_idle()
figure.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
|
CodeMonkeyJan/hyperspy
|
hyperspy/drawing/utils.py
|
Python
|
gpl-3.0
| 47,617
|
[
"ADF"
] |
def92fb30484063d82f37011b97be8798820476e8ef25e8752a3bd54efbdf669
|
# $Id$
#
# Copyright (C) 2003-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" class definitions for similarity screening
See _SimilarityScreener_ for overview of required API
"""
from rdkit import DataStructs
from rdkit import six
from rdkit.DataStructs import TopNContainer
class SimilarityScreener(object):
""" base class
important attributes:
probe: the probe fingerprint against which we screen.
metric: a function that takes two arguments and returns a similarity
measure between them
dataSource: the source pool from which to draw, needs to support
a next() method
fingerprinter: a function that takes a molecule and returns a
fingerprint of the appropriate format
**Notes**
subclasses must support either an iterator interface
or __len__ and __getitem__
"""
def __init__(self, probe=None, metric=None, dataSource=None, fingerprinter=None):
self.metric = metric
self.dataSource = dataSource
self.fingerprinter = fingerprinter
self.probe = probe
def Reset(self):
""" used to reset screeners that behave as iterators """
pass
# FIX: add setters/getters for attributes
def SetProbe(self, probeFingerprint):
""" sets our probe fingerprint """
self.probe = probeFingerprint
def GetSingleFingerprint(self, probe):
""" returns a fingerprint for a single probe object
This is potentially useful in initializing our internal
probe object.
"""
return self.fingerprinter(probe)
class ThresholdScreener(SimilarityScreener):
""" Used to return all compounds that have a similarity
to the probe beyond a threshold value
**Notes**:
- This is as lazy as possible, so the data source isn't
queried until the client asks for a hit.
- In addition to being lazy, this class is as thin as possible.
(Who'd have thought it was possible!)
Hits are *not* stored locally, so if a client resets
the iteration and starts over, the same amount of work must
be done to retrieve the hits.
- The thinness and laziness forces us to support only forward
iteration (not random access)
"""
def __init__(self, threshold, **kwargs):
SimilarityScreener.__init__(self, **kwargs)
self.threshold = threshold
self.dataIter = iter(self.dataSource)
# FIX: add setters/getters for attributes
def _nextMatch(self):
""" *Internal use only* """
done = 0
res = None
sim = 0
while not done:
# this is going to crap out when the data source iterator finishes,
# that's how we stop when no match is found
obj = six.next(self.dataIter)
fp = self.fingerprinter(obj)
sim = DataStructs.FingerprintSimilarity(fp, self.probe, self.metric)
if sim >= self.threshold:
res = obj
done = 1
return sim, res
def Reset(self):
""" used to reset our internal state so that iteration
starts again from the beginning
"""
self.dataSource.reset()
self.dataIter = iter(self.dataSource)
def __iter__(self):
""" returns an iterator for this screener
"""
self.Reset()
return self
def next(self):
""" required part of iterator interface """
return self._nextMatch()
__next__ = next
class TopNScreener(SimilarityScreener):
""" A screener that only returns the top N hits found
**Notes**
- supports forward iteration and getitem
"""
def __init__(self, num, **kwargs):
SimilarityScreener.__init__(self, **kwargs)
self.numToGet = num
self.topN = None
self._pos = 0
def Reset(self):
self._pos = 0
def __iter__(self):
if self.topN is None:
self._initTopN()
self.Reset()
return self
def next(self):
if self._pos >= self.numToGet:
raise StopIteration
else:
res = self.topN[self._pos]
self._pos += 1
return res
__next__ = next
def _initTopN(self):
self.topN = TopNContainer.TopNContainer(self.numToGet)
for obj in self.dataSource:
fp = self.fingerprinter(obj)
sim = DataStructs.FingerprintSimilarity(fp, self.probe, self.metric)
self.topN.Insert(sim, obj)
def __len__(self):
if self.topN is None:
self._initTopN()
return self.numToGet
def __getitem__(self, idx):
if self.topN is None:
self._initTopN()
return self.topN[idx]
|
rvianello/rdkit
|
rdkit/Chem/Fingerprints/SimilarityScreener.py
|
Python
|
bsd-3-clause
| 4,634
|
[
"RDKit"
] |
4525d318ceef042af50170b5f170ac9bdb2da0f74f00bd44d16769a194f1b9fb
|
"""
blast module
Requirements:
- getSequence uses BLAST's fastacmd
"""
import os, sys, copy, re
from mungoCore import *
import fasta, sequence
from useful import smartopen
def sign_to_strand(x):
if x<0:
return '-'
else:
return '+'
def test_to_strand(test):
if test:
return '+'
else:
return '-'
class HSP(AbstractFeature):
"""HSP feature"""
attributes = ['queryId','subjectId','pcId','alignLength',
'matches','mismatches','qStart','qEnd','sStart','sEnd','eValue',
'bitScore']
converters = [
('pcId', float),
('alignLength', int),
('matches', int),
('mismatches', int),
('qStart', int),
('qEnd', int),
('sStart', int),
('sEnd', int),
('eValue', float),
('bitScore', float)
]
format = attributesToFormat(attributes)
def __init__(self, *args, **kw):
super(HSP,self).__init__(*args,**kw)
if self.sStart>self.sEnd:
self.sStart,self.sEnd = self.sEnd,self.sStart
self._strand = '-'
else:
self._strand = '+'
def __repr__(self):
self.forcePlusStrand()
output = self.format % self.__dict__
self.forceCorrectStrand()
return output
def __eq__(self,x):
if self.__dict__==x.__dict__:
return True
else:
return False
def __hash__(self):
return hash("%(queryId)s %(subjectId)s:%(sStart)i-%(sEnd)i" % self.__dict__)
def addField(self, field, default=None):
if not field in self.attributes:
self.attributes.append(field)
self.format = self.format + '\t%%(%s)s' % field
self.__dict__[field] = default
def strand(self):
strand = test_to_strand(
self._strand==sign_to_strand(self.sEnd-self.sStart))
return strand
def swapStartEnd(self):
self.sStart,self.sEnd = self.sEnd,self.sStart
self._strand = {'+': '-', '-': '+'}[self._strand]
def forcePlusStrand(self):
if self._strand=='-':
self.sStart,self.sEnd = self.sEnd,self.sStart
self._strand = '+'
def forceCorrectStrand(self):
if self._strand=='+' and self.sStart<self.sEnd:
self.sStart,self.sEnd = self.sEnd,self.sStart
self._strand = '-'
def convertBlockToGenomeCoords(self):
"""Convert block to genome coordinates."""
try:
for delimiter in [':', '.', '-']:
self.subjectId = self.subjectId.replace(delimiter, ' ')
tokens = self.subjectId.split()
self.subjectId = tokens[0]
self.sStart += int(tokens[1])-1
self.sEnd += int(tokens[1])-1
except Exception, e:
print e
def convertGenomeToBlockCoords(self, L, blockSize=5000000):
"""Convert genome to block coordinates.
@param L: Length of chromosome
@param blockSize: Length of block (Default = 5000000)
"""
self.subjectId,self.sStart,self.sEnd = fasta.genomeToBlock(
self.subjectId, self.sStart, self.sEnd, L, blockSize=blockSize)
def getSequence(self, blastDb, accessionConverter=lambda x: x,
padFivePrime=0, padThreePrime=0, getAll=True):
if getAll:
start = 0
end = 0
else:
start = max(1,self.sStart-padFivePrime)
end = self.sEnd+padThreePrime
accession = accessionConverter(self.subjectId)
h,s = getSequence(blastDb, accession, start, end, self.strand())
return h,s
def BlastFile(iFileHandle, multi=False, **kw):
"""Factory function for Reader and Writer classes
@param iFileHandle: BLAST file name or object
"""
if multi:
return MultiBlastReader(iFileHandle, **kw)
else:
return BlastReader(iFileHandle, **kw)
class BlastReader(AbstractDataReader):
def __init__(self, iFileHandle, eValueCutoff=None, **kw):
"""Constructor
@param iFileHandle: Input file or name
"""
super(BlastReader, self).__init__(iFileHandle)
self.eValueCutoff = eValueCutoff
def iterBestHits(self):
oldQueryId = None
for hsp in self:
if hsp.queryId!=oldQueryId:
yield hsp
oldQueryId = copy.copy(hsp.queryId)
def bestHits(self):
data = []
for bh in self.iterBestHits():
data.append(bh)
return bh
def firstHit(self):
for hsp in self:
return hsp
def _generator(self):
for line in self.iFile:
line = line.strip()
if line and line[0]!='#':
tokens = line.split('\t')
hsp = HSP(tokens)
if not self.eValueCutoff or hsp.eValue<=self.eValueCutoff:
yield HSP(tokens)
class MultiBlastReader(BlastReader):
"""
Subclass of BlastReader for parsing the results of aligning
a multi-fasta file to a blast database.
"""
def bestHits(self):
hsps = []
oldQueryId = None
for hsp in self:
if hsp.queryId!=oldQueryId and not oldQueryId is None:
if len(hsps)>1:
yield hsps[0], hsps[1], len(hsps)
else:
yield hsps[0], None, len(hsps)
hsps = []
hsps.append(hsp)
oldQueryId = copy.copy(hsp.queryId)
if len(hsps)>1:
yield hsps[0], hsps[1], len(hsps)
elif len(hsps)==1:
yield hsps[0], None, len(hsps)
def groupByQuerySeq(self):
hsps = []
oldQueryId = None
for hsp in self:
if hsp.queryId!=oldQueryId and not oldQueryId is None:
yield hsps
hsps = []
hsps.append(hsp)
oldQueryId = copy.copy(hsp.queryId)
yield hsps
class NotFoundException(Exception):
pass
def getSequence(blastDb, accession, start=0, end=0, strand='+', padding=0, debug=False):
"""Load a sequence from a BLAST database.
@param blastDb: BLAST database
@param accession: Accession name
@param start: Start coordinate (Default: 0, extract from start of sequence)
@param end: End coordinate (Default: 0, extract to the end of sequence)
@param strand: Strand (Default: '+')
@param padding: Sequence padding (Default: 0)
@returns: (header,seq)
"""
if start>end: start,end = end,start
cmd = 'fastacmd -d %s -s "%s" -L %i,%i' % (blastDb,accession,start,end)
if debug: print cmd
p = os.popen(cmd)
header = p.readline()[1:].strip()
if not header:
raise Exception('BLAST failure')
seq = []
for line in p:
seq.append(line.strip())
seq = ''.join(seq)
if not seq:
print blastDb, accession
raise NotFoundException()
if strand=='-':
seq = sequence.reverseComplement(seq)
return header,seq
class TooManyBlocks(Exception):
def __init__(self, chrom, start, end):
self.chrom = chrom
self.start = start
self.end = end
print chrom,start,end
def genomeToBlock(chrom, start, end, L=1000000000, blockSize=5000000, delimiter='.'):
"""Transform genomic to block coordinates.
@param chrom: Chromosome
@param start: Start coordinate
@param end: End coorinate
@param L: Chromosome length
@param blockSize: Block size (Default: 5000000)
@returns: (block, relStart, relEnd)
"""
strand = '+'
if start>end:
strand = '-'
start,end = end,start
blockStart = 1 + blockSize*int(start/blockSize)
blockEnd = min(L, blockStart+blockSize-1)
block = '%s%s%i-%i' % (chrom, delimiter, blockStart, blockEnd)
relStart = start % blockSize
relEnd = end % blockSize
if strand=='-':
relStart,relEnd = relEnd,relStart
return block,relStart,relEnd
def genomeToBlocks(chrom, start, end, L, blockSize=5000000):
"""Transform genomic to block coordinates
(chrom.blockStart-blockEnd:start-end).
@param chrom: Chromosome
@param start: Start coordinate
@param end: End coorinate
@param L: Chromosome length
@param blockSize: Block size (Default: 5000000)
@returns: a tuple of lists (blocks,startInBlocks,endInBlocks).
"""
strand = '+'
if start>end:
strand = '-'
start,end = end,start
startBlock = 1 + blockSize*int(start/blockSize)
endBlock = min(L, blockSize*(1+int(end/blockSize)))
blocks = []
i = 0 ; imax = 200
s = startBlock
while s<endBlock:
e = min(L, s+blockSize-1)
blocks.append('%s.%i-%i' % (chrom,s,e))
s += blockSize
i += 1
if i>imax:
raise TooManyBlocks(chrom,start,end)
startInBlocks = [start % blockSize]
endInBlocks = [end % blockSize]
for i in xrange(len(blocks)-1):
startInBlocks.append(0)
endInBlocks.insert(0,0)
if strand=='-':
startInBlocks,endInBlocks = endInBlocks,startInBlocks
return blocks,startInBlocks,endInBlocks
def blockToGenome(block, startInBlock, endInBlock):
"""Transform block to genomic coordinates.
@param block: Block (chrom.blockStart-blockEnd)
@param startInBlock: Start coordinate in block
@param endInBlock: End coorinate in block
@returns: (chrom, gStart, gEnd)
"""
prog = re.compile('[\.|:]|-')
chrom,blockStart,blockEnd = prog.split(block)
gStart = int(blockStart)+startInBlock-1
gEnd = int(blockStart)+endInBlock-1
return chrom,gStart,gEnd
|
PapenfussLab/Mungo
|
mungo/blast.py
|
Python
|
artistic-2.0
| 9,823
|
[
"BLAST"
] |
12eb3ad45f5f17ff71ee7bdc76f72f79e510481b506ee3552fc1cd56ea467ab8
|
from galaxy import web, util
from galaxy.web.base.controller import BaseAPIController
from galaxy.web.framework.helpers import is_true
def get_id( base, format ):
if format:
return "%s.%s" % ( base, format )
else:
return base
class GenomesController( BaseAPIController ):
"""
RESTful controller for interactions with genome data.
"""
@web.expose_api_anonymous
def index( self, trans, **kwd ):
"""
GET /api/genomes: returns a list of installed genomes
"""
return self.app.genomes.get_dbkeys( trans, **kwd )
@web.json
def show( self, trans, id, num=None, chrom=None, low=None, high=None, **kwd ):
"""
GET /api/genomes/{id}
Returns information about build <id>
"""
# Process kwds.
id = get_id( id, kwd.get( 'format', None ) )
reference = is_true( kwd.get( 'reference', False ) )
# Return info.
rval = None
if reference:
region = self.app.genomes.reference( trans, dbkey=id, chrom=chrom, low=low, high=high )
rval = { 'dataset_type': 'refseq', 'data': region.sequence }
else:
rval = self.app.genomes.chroms( trans, dbkey=id, num=num, chrom=chrom, low=low )
return rval
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/genomes.py
|
Python
|
gpl-3.0
| 1,289
|
[
"Galaxy"
] |
9ddb07cbf0d55b4d07bdae7be9a463b041590bc49abba2674cb37f2761f42ba6
|
"""
Displays Agg images in the browser, with interactivity
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# The WebAgg backend is divided into two modules:
#
# - `backend_webagg_core.py` contains code necessary to embed a WebAgg
# plot inside of a web application, and communicate in an abstract
# way over a web socket.
#
# - `backend_webagg.py` contains a concrete implementation of a basic
# application, implemented with tornado.
import six
import datetime
import errno
import io
import json
import os
import random
import socket
import threading
try:
import tornado
except ImportError:
raise RuntimeError("The WebAgg backend requires Tornado.")
import tornado.web
import tornado.ioloop
import tornado.websocket
import matplotlib
from matplotlib import rcParams
from matplotlib import backend_bases
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
from . import backend_webagg_core as core
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasWebAgg(figure)
manager = core.FigureManagerWebAgg(canvas, num)
return manager
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(backend_bases.ShowBase):
def mainloop(self):
WebAggApplication.initialize()
url = "http://127.0.0.1:{port}{prefix}".format(
port=WebAggApplication.port,
prefix=WebAggApplication.url_prefix)
if rcParams['webagg.open_in_browser']:
import webbrowser
webbrowser.open(url)
else:
print("To view figure, visit {0}".format(url))
WebAggApplication.start()
show = Show().mainloop
class ServerThread(threading.Thread):
def run(self):
tornado.ioloop.IOLoop.instance().start()
webagg_server_thread = ServerThread()
class TimerTornado(backend_bases.TimerBase):
def _timer_start(self):
self._timer_stop()
if self._single:
ioloop = tornado.ioloop.IOLoop.instance()
self._timer = ioloop.add_timeout(
datetime.timedelta(milliseconds=self.interval),
self._on_timer)
else:
self._timer = tornado.ioloop.PeriodicCallback(
self._on_timer,
self.interval)
self._timer.start()
def _timer_stop(self):
if self._timer is not None:
self._timer.stop()
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
class FigureCanvasWebAgg(core.FigureCanvasWebAggCore):
def show(self):
# show the figure window
show()
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
backend_bases.FigureCanvasBase.start_event_loop_default(
self, timeout)
start_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
backend_bases.FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = \
backend_bases.FigureCanvasBase.stop_event_loop_default.__doc__
class WebAggApplication(tornado.web.Application):
initialized = False
started = False
class FavIcon(tornado.web.RequestHandler):
def get(self):
image_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'mpl-data', 'images')
self.set_header('Content-Type', 'image/png')
with open(os.path.join(image_path,
'matplotlib.png'), 'rb') as fd:
self.write(fd.read())
class SingleFigurePage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self, fignum):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"single_figure.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=manager.canvas)
class AllFiguresPage(tornado.web.RequestHandler):
def __init__(self, application, request, **kwargs):
self.url_prefix = kwargs.pop('url_prefix', '')
return tornado.web.RequestHandler.__init__(self, application,
request, **kwargs)
def get(self):
ws_uri = 'ws://{req.host}{prefix}/'.format(req=self.request,
prefix=self.url_prefix)
self.render(
"all_figures.html",
prefix=self.url_prefix,
ws_uri=ws_uri,
figures=sorted(
list(Gcf.figs.items()), key=lambda item: item[0]),
toolitems=core.NavigationToolbar2WebAgg.toolitems)
class MplJs(tornado.web.RequestHandler):
def get(self):
self.set_header('Content-Type', 'application/javascript')
js_content = core.FigureManagerWebAgg.get_javascript()
self.write(js_content)
class Download(tornado.web.RequestHandler):
def get(self, fignum, fmt):
fignum = int(fignum)
manager = Gcf.get_fig_manager(fignum)
# TODO: Move this to a central location
mimetypes = {
'ps': 'application/postscript',
'eps': 'application/postscript',
'pdf': 'application/pdf',
'svg': 'image/svg+xml',
'png': 'image/png',
'jpeg': 'image/jpeg',
'tif': 'image/tiff',
'emf': 'application/emf'
}
self.set_header('Content-Type', mimetypes.get(fmt, 'binary'))
buff = io.BytesIO()
manager.canvas.print_figure(buff, format=fmt)
self.write(buff.getvalue())
class WebSocket(tornado.websocket.WebSocketHandler):
supports_binary = True
def open(self, fignum):
self.fignum = int(fignum)
self.manager = Gcf.get_fig_manager(self.fignum)
self.manager.add_web_socket(self)
if hasattr(self, 'set_nodelay'):
self.set_nodelay(True)
def on_close(self):
self.manager.remove_web_socket(self)
def on_message(self, message):
message = json.loads(message)
# The 'supports_binary' message is on a client-by-client
# basis. The others affect the (shared) canvas as a
# whole.
if message['type'] == 'supports_binary':
self.supports_binary = message['value']
else:
manager = Gcf.get_fig_manager(self.fignum)
# It is possible for a figure to be closed,
# but a stale figure UI is still sending messages
# from the browser.
if manager is not None:
manager.handle_json(message)
def send_json(self, content):
self.write_message(json.dumps(content))
def send_binary(self, blob):
if self.supports_binary:
self.write_message(blob, binary=True)
else:
data_uri = "data:image/png;base64,{0}".format(
blob.encode('base64').replace('\n', ''))
self.write_message(data_uri)
def __init__(self, url_prefix=''):
if url_prefix:
assert url_prefix[0] == '/' and url_prefix[-1] != '/', \
'url_prefix must start with a "/" and not end with one.'
super(WebAggApplication, self).__init__(
[
# Static files for the CSS and JS
(url_prefix + r'/_static/(.*)',
tornado.web.StaticFileHandler,
{'path': core.FigureManagerWebAgg.get_static_file_path()}),
# An MPL favicon
(url_prefix + r'/favicon.ico', self.FavIcon),
# The page that contains all of the pieces
(url_prefix + r'/([0-9]+)', self.SingleFigurePage,
{'url_prefix': url_prefix}),
# The page that contains all of the figures
(url_prefix + r'/?', self.AllFiguresPage,
{'url_prefix': url_prefix}),
(url_prefix + r'/mpl.js', self.MplJs),
# Sends images and events to the browser, and receives
# events from the browser
(url_prefix + r'/([0-9]+)/ws', self.WebSocket),
# Handles the downloading (i.e., saving) of static images
(url_prefix + r'/([0-9]+)/download.([a-z0-9.]+)',
self.Download),
],
template_path=core.FigureManagerWebAgg.get_static_file_path())
@classmethod
def initialize(cls, url_prefix='', port=None):
if cls.initialized:
return
# Create the class instance
app = cls(url_prefix=url_prefix)
cls.url_prefix = url_prefix
# This port selection algorithm is borrowed, more or less
# verbatim, from IPython.
def random_ports(port, n):
"""
Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield port + random.randint(-2 * n, 2 * n)
success = None
cls.port = rcParams['webagg.port']
for port in random_ports(cls.port, rcParams['webagg.port_retries']):
try:
app.listen(port)
except socket.error as e:
if e.errno != errno.EADDRINUSE:
raise
else:
cls.port = port
success = True
break
if not success:
raise SystemExit(
"The webagg server could not be started because an available "
"port could not be found")
cls.initialized = True
@classmethod
def start(cls):
if cls.started:
return
# Set the flag to True *before* blocking on IOLoop.instance().start()
cls.started = True
"""
IOLoop.running() was removed as of Tornado 2.4; see for example
https://groups.google.com/forum/#!topic/python-tornado/QLMzkpQBGOY
Thus there is no correct way to check if the loop has already been
launched. We may end up with two concurrently running loops in that
unlucky case with all the expected consequences.
"""
print("Press Ctrl+C to stop server")
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("Server stopped")
finally:
cls.started = False
def ipython_inline_display(figure):
import tornado.template
WebAggApplication.initialize()
if not webagg_server_thread.is_alive():
webagg_server_thread.start()
with open(os.path.join(
core.FigureManagerWebAgg.get_static_file_path(),
'ipython_inline_figure.html')) as fd:
tpl = fd.read()
fignum = figure.number
t = tornado.template.Template(tpl)
return t.generate(
prefix=WebAggApplication.url_prefix,
fig_id=fignum,
toolitems=core.NavigationToolbar2WebAgg.toolitems,
canvas=figure.canvas,
port=WebAggApplication.port).decode('utf-8')
FigureCanvas = FigureCanvasWebAgg
|
yavalvas/yav_com
|
build/matplotlib/lib/matplotlib/backends/backend_webagg.py
|
Python
|
mit
| 12,895
|
[
"VisIt"
] |
75c30758425d37ca29ff51d25670bd31b6aeb564066dcbca84c419b8c22b562f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.