hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99a20277c32bb1e28312f42ab6d732f38323169
| 241
|
py
|
Python
|
quick_search/admin.py
|
naman1901/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | null | null | null |
quick_search/admin.py
|
naman1901/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | 2
|
2020-02-11T23:28:22.000Z
|
2020-06-05T19:27:40.000Z
|
quick_search/admin.py
|
HereWithoutPermission/django-quick-search
|
7b93554ed9fa4721e52372f9fd1a395d94cc04a7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import SearchResult
# Register your models here.
class SearchResultAdmin(admin.ModelAdmin):
fields = ["query", "heading", "url", "text"]
admin.site.register(SearchResult, SearchResultAdmin)
| 30.125
| 52
| 0.771784
| 27
| 241
| 6.888889
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116183
| 241
| 8
| 52
| 30.125
| 0.873239
| 0.107884
| 0
| 0
| 0
| 0
| 0.088785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
d9a88e74a4ac032ae6e8218d9ec1ed42e6092d32
| 375
|
py
|
Python
|
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
app/views/web/homestack.py
|
geudrik/hautomation
|
0baae29e85cd68658a0f8578de2e36e42945053f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2.7
# -*- coding: latin-1 -*-
from flask import Blueprint
from flask import current_app
from flask import render_template
from flask_login import login_required
homestack = Blueprint("homestack", __name__, url_prefix="/homestack")
@homestack.route("/", methods=["GET"])
@login_required
def home():
return render_template("homestack/home.html")
| 22.058824
| 69
| 0.749333
| 49
| 375
| 5.510204
| 0.591837
| 0.133333
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009091
| 0.12
| 375
| 16
| 70
| 23.4375
| 0.809091
| 0.128
| 0
| 0
| 0
| 0
| 0.129231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.444444
| 0.111111
| 0.666667
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 3
|
d9b55a7ee025f94a0ef3f125fa9c30f974dd7d6e
| 211
|
py
|
Python
|
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc165/abc165e.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
N, M = map(int, input().split())
for i in range(1, M + 1):
if i % 2 == 1:
j = (i - 1) // 2
print(1 + j, M + 1 - j)
else:
j = (i - 2) // 2
print(M + 2 + j, 2 * M + 1 - j)
| 21.1
| 39
| 0.336493
| 40
| 211
| 1.775
| 0.4
| 0.112676
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.445498
| 211
| 9
| 40
| 23.444444
| 0.495727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
d9b8d42e905cba910e6a30f7d6f38e82d05ab46c
| 2,110
|
py
|
Python
|
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | 2
|
2020-08-28T13:42:38.000Z
|
2020-09-05T03:13:45.000Z
|
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | null | null | null |
graphdb/transformer.py
|
muggat0n/graphdb
|
56dfd5ef8a3321abc6a919faee47494bbe059080
|
[
"MIT"
] | null | null | null |
"""
A query transformer is a function that accepts a program and returns a program, plus a priority level.
Higher priority transformers are placed closer to the front of the list. We’re ensuring is a function,
because we’re going to evaluate it later 31 .
We’ll assume there won’t be an enormous number of transformer additions,
and walk the list linearly to add a new one.
We’ll leave a note in case this assumption turns out to be false —
a binary search is much more time-optimal for long lists,
but adds a little complexity and doesn’t really speed up short lists.
"""
class Transformer:
def __init__(self):
self.T = []
def transform(self, program):
return program
"""
Dagoba.T = [] # transformers (more than meets the eye)
"""
"""
Dagoba.addTransformer = function(fun, priority) {
if(typeof fun != 'function')
return Dagoba.error('Invalid transformer function')
for(var i = 0; i < Dagoba.T.length; i++) # OPT: binary search
if(priority > Dagoba.T[i].priority) break
Dagoba.T.splice(i, 0, {priority: priority, fun: fun})
}
"""
"""
Dagoba.transform = function(program) {
return Dagoba.T.reduce(function(acc, transformer) {
return transformer.fun(acc)
}, program)
}
"""
"""
Dagoba.addAlias = function(newname, oldname, defaults) {
defaults = defaults || [] # default arguments for the alias
Dagoba.addPipetype(newname, function() {}) # because there's no method catchall in js
Dagoba.addTransformer(function(program) {
return program.map(function(step) {
if(step[0] != newname) return step
return [oldname, Dagoba.extend(step[1], defaults)]
})
}, 100) # these need to run early, so they get a high priority
}
"""
"""
Dagoba.extend = function(list, defaults) {
return Object.keys(defaults).reduce(function(acc, key) {
if(typeof list[key] != 'undefined') return acc
acc[key] = defaults[key]
return acc
}, list)
}
"""
| 30.57971
| 120
| 0.627962
| 273
| 2,110
| 4.842491
| 0.47619
| 0.026475
| 0.016641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005799
| 0.264455
| 2,110
| 68
| 121
| 31.029412
| 0.845361
| 0.267299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
d9b9563b7aae9c46b0fbd98073d96eeedfaec4aa
| 91
|
py
|
Python
|
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
Courses/1 month/2 week/day 6/Formula.py
|
emir-naiz/first_git_lesson
|
1fecf712290f6da3ef03deff518870d91638eb69
|
[
"MIT"
] | null | null | null |
summary = 0
i = 0
while i < 5:
summary = summary + i
print(summary)
i = i + 1
| 11.375
| 25
| 0.516484
| 15
| 91
| 3.133333
| 0.466667
| 0.340426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 0.373626
| 91
| 7
| 26
| 13
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
d9b9af3bd25b0d2f9357446b0ff43e3ab614b141
| 243
|
py
|
Python
|
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
tests/image_saver/image_saver_7.py
|
Vicken-Ghoubiguian/Imtreat
|
1f8e8406dc48af3b1e8e0c138a09aa1faee0b8a0
|
[
"MIT"
] | null | null | null |
import imtreat
img = imtreat.imageManagerClass.openImageFunction("../images/soleil.png", 0)
img = imtreat.definedModesClass.detailEnhanceFunction(img)
imtreat.imageManagerClass.saveImageFunction("/Téléchargements/", "image_1", ".png", img)
| 30.375
| 88
| 0.794239
| 23
| 243
| 8.347826
| 0.652174
| 0.15625
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008811
| 0.065844
| 243
| 7
| 89
| 34.714286
| 0.837004
| 0
| 0
| 0
| 0
| 0
| 0.197531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
d9c389b63a2c9720abef56190237f31a2306da19
| 1,972
|
py
|
Python
|
src/biotite/copyable.py
|
danijoo/biotite
|
22072e64676e4e917236eac8493eed4c6a22cc33
|
[
"BSD-3-Clause"
] | 208
|
2018-04-20T15:59:42.000Z
|
2022-03-22T07:47:12.000Z
|
src/biotite/copyable.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | 121
|
2017-11-15T14:52:07.000Z
|
2022-03-30T16:31:41.000Z
|
src/biotite/copyable.py
|
danielmuthama/biotite
|
cb238a8d8d7dc82b3bcea274d7d91d5c876badcd
|
[
"BSD-3-Clause"
] | 49
|
2018-07-19T09:06:24.000Z
|
2022-03-23T17:21:34.000Z
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite"
__author__ = "Patrick Kunzmann"
__all__ = ["Copyable"]
import abc
class Copyable(metaclass=abc.ABCMeta):
"""
Base class for all objects, that should be copyable.
The public method `copy()` first creates a fresh instance of the
class of the instance, that is copied via the `__copy_create__()`
method. All variables, that could not be set via the constructor,
are then copied via `__copy_fill__()`, starting with the method in
the uppermost base class and ending with the class of the instance
to be copied.
This approach solves the problem of encapsulated variables in
superclasses.
"""
def copy(self):
"""
Create a deep copy of this object.
Returns
-------
copy
A copy of this object.
"""
clone = self.__copy_create__()
self.__copy_fill__(clone)
return clone
def __copy_create__(self):
"""
Instantiate a new object of this class.
Only the constructor should be called in this method.
All further attributes, that need to be copied are handled
in `__copy_fill__()`
Do not call the `super()` method here.
This method must be overridden, if the constructor takes
parameters.
Returns
-------
copy
A freshly instantiated copy of *self*.
"""
return type(self)()
def __copy_fill__(self, clone):
"""
Copy all necessary attributes to the new object.
Always call the `super()` method as first statement.
Parameters
----------
clone
The freshly instantiated copy of *self*.
"""
pass
| 27.774648
| 70
| 0.59432
| 235
| 1,972
| 4.787234
| 0.421277
| 0.017778
| 0.017778
| 0.023111
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00076
| 0.333164
| 1,972
| 71
| 71
| 27.774648
| 0.854753
| 0.626775
| 0
| 0
| 0
| 0
| 0.077114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0.076923
| 0.076923
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
d9f1f15178cb9e26d9b4f91695b333a07eaa59d6
| 74,778
|
py
|
Python
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 15
|
2019-07-25T12:13:31.000Z
|
2020-10-17T13:42:58.000Z
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 1
|
2020-01-07T05:49:15.000Z
|
2020-04-22T01:22:00.000Z
|
sqlova/model/nl2sql/wikisql_models.py
|
guotong1988/Rule-SQL
|
e826c0d659c8b35a72b64aa2b50d4d943fdd70f1
|
[
"Apache-2.0"
] | 3
|
2019-10-01T09:14:35.000Z
|
2020-07-18T08:39:48.000Z
|
# Copyright 2019-present NAVER Corp.
# Apache License v2.0
# Wonseok Hwang
import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
class Seq2SQL_v1(nn.Module):
def __init__(self, input_size, hidden_size, num_layer, dropout,
number_cond_ops, number_agg_ops, old=False):
super(Seq2SQL_v1, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.max_where_number = 4
self.number_cond_ops = number_cond_ops
self.number_agg_ops = number_agg_ops
self.select_column_predict = SelectColumnPredict(input_size, hidden_size, num_layer, dropout)
self.select_agg_predict = SelectAggPredict(input_size, hidden_size, num_layer, dropout, number_agg_ops, old=old)
self.where_number_predict = WhereNumberPredict(input_size, hidden_size, num_layer, dropout)
self.wcp = WhereColumnPredict(input_size, hidden_size, num_layer, dropout)
self.wop = WhereOpPredict(input_size, hidden_size, num_layer, dropout, number_cond_ops)
self.wvp = WhereValuePredict_startend(input_size, hidden_size, num_layer, dropout, number_cond_ops, old=old) # start-end-search-discriminative model
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# sc
s_sc,s_sc_softmax = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=show_p_sc)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# sa
s_sa,s_sa_softmax = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, number_header, pr_sc, show_p_sa=show_p_sa)
if g_sa:
# it's not necessary though.
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# wn
s_wn,s_wn_softmax = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wn=show_p_wn)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
s_wc,s_wc_softmax = self.wcp(emb_question, len_question, emb_header, len_header_token, number_header, show_p_wc=show_p_wc, penalty=True)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
s_wo,s_wo_softmax = self.wop(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, show_p_wo=show_p_wo)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
s_wv,s_wv_softmax = self.wvp(emb_question, len_question, emb_header, len_header_token, number_header, wn=pr_wn, wc=pr_wc, wo=pr_wo, show_p_wv=show_p_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_sc_softmax, s_sa_softmax, s_wn_softmax, s_wc_softmax, s_wo_softmax, s_wv_softmax
def beam_forward(self, emb_question, len_question, emb_header, len_header_token, l_header, engine, tb,
nlu_t, nlu_wp_t, wp_to_wh_index, nlu,
beam_size=4,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
"""
Execution-guided beam decoding.
"""
# sc
s_sc,_ = self.select_column_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_sc=show_p_sc)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.number_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
for i_beam in range(beam_size):
pr_sc = list( array(pr_sc_beam)[:,i_beam] )
s_sa,_ = self.select_agg_predict(emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=show_p_sa)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:,i_beam,:] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
# First flatten to 1-d
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS),beam_idx_sca,0]
pr_sa = idxs_arr[range(bS),beam_idx_sca,1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# Now, Where-clause beam search.
s_wn,_ = self.where_number_predict(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=show_p_wn)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
s_wc,_ = self.wcp(emb_question, len_question, emb_header, len_header_token, l_header, show_p_wc=show_p_wc, penalty=True)
prob_wc = F.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.max_where_number] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.max_where_number])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b,:] = prob_wc[b,pr_wc_max1]
# get most probable max_wn where-clouses
# wo
s_wo_max,_ = self.wop(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, show_p_wo=show_p_wo)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, max_wn, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
for i_op in range(self.number_cond_ops - 1):
pr_wo_temp = [[i_op] * self.max_where_number] * bS
# wv
s_wv,_ = self.wvp(emb_question, len_question, emb_header, len_header_token, l_header, wn=pr_wn_max, wc=pr_wc_max, wo=pr_wo_temp, show_p_wv=show_p_wv)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam = pred_wvi_se_beam(self.max_where_number, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
# pr_wvi_beam = [B, max_wn, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, max_wn, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.max_where_number, self.number_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.max_where_number):
for i_op in range(self.number_cond_ops - 1): # do not use final one
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wc = prob_wc_max[b, i_wn]
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.max_wn:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [wp_to_wh_index[b]], [nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc[b], pr_sa[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = []
pr_wn_based_on_prob = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len( conds_max[b] )
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn+1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_sql_i.append(pr_sql_i1)
# s_wv = [B, max_wn, max_nlu_tokens, 2]
return prob_sca, prob_w, prob_wn_w, pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_sql_i
class SelectColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(SelectColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.sc_out = nn.Sequential(nn.Tanh(), nn.Linear(2 * hidden_size, 1))
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
# emb_question, [16,26,1536]
# len_question, [16]
# emb_header, [102,12,1536]
# len_header_token, [102]
# number_header, [16]
def forward(self, emb_question, len_question, emb_header, len_header_token, number_header, show_p_sc=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, number_header) # [b, header, dim]
bS = len(number_header)
mL_n = max(len_question)
# [bS, max_len_header, 100] * [bS, 100, mL_n] -> [bS, max_len_header, mL_n]
att_h = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att_h[b, :, l_n1:] = -10000000000
p_n = self.softmax_dim2(att_h)
if show_p_sc:
# p = [b, header, n]
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001, figsize=(12,3.5))
# subplot(6,2,7)
subplot2grid((7,2), (3, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(number_header[0]):
color_idx = i_h % len(_color)
plot(p_n[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('sc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# p_n [ bS, max_len_header, mL_n] -> [ bS, max_len_header, mL_n, 1]
# wenc_n [ bS, mL_n, 100] -> [ bS, 1, mL_n, 100]
# -> [bS, max_len_header, mL_n, 100] -> [bS, max_len_header, 100]
c_n = torch.mul(p_n.unsqueeze(3), encoded_question.unsqueeze(1)).sum(dim=2)
vec = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2)
score_select_column = self.sc_out(vec).squeeze(2) # [bS, max_len_header, 1] -> [bS, max_len_header]
score_select_column_softmax = self.softmax_dim_1(score_select_column)
# Penalty
max_len_header = max(number_header)
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column[b, l_header1:] = -10000000000
for b, l_header1 in enumerate(number_header):
if l_header1 < max_len_header:
score_select_column_softmax[b, l_header1:] = 0
return score_select_column,score_select_column_softmax
class SelectAggPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_agg_ops=-1, old=False):
super(SelectAggPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.sa_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_agg_ops)) # Fixed number of aggregation operator.
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
if old:
# for backwoard compatibility
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, pr_sc, show_p_sa=False):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
mL_n = max(len_question)
wenc_header_ob = encoded_header[list(range(bS)), pr_sc] # list, so one sample for each batch.
# [bS, question_len, 100] * [bS, 100, 1] -> [bS, question_len]
att = torch.bmm(self.W_att(encoded_question), wenc_header_ob.unsqueeze(2)).squeeze(2)
# Penalty on blank parts
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, l_n1:] = -10000000000
# [bS, question_len]
p = self.softmax_dim1(att)
if show_p_sa:
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,3)
cla()
plot(p[0].data.numpy(), '--rs', ms=7)
title('sa: nlu_weight')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [bS, question_len, 100] * ( [bS, question_len, 1] -> [bS, question_len, 100])
# -> [bS, question_len, 100] -> [bS, 100]
c_n = torch.mul(encoded_question, p.unsqueeze(2).expand_as(encoded_question)).sum(dim=1)
s_sa = self.sa_out(c_n)
s_sa_softmax = self.softmax_dim_1(s_sa)
return s_sa,s_sa_softmax
class WhereNumberPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, ):
super(WhereNumberPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att_h = nn.Linear(hidden_size, 1)
self.W_hidden = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_cell = nn.Linear(hidden_size, num_layer * hidden_size)
self.W_att_n = nn.Linear(hidden_size, 1)
self.wn_out = nn.Sequential(nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, self.mL_w + 1)) # max number (4 + 1)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, show_p_wn=False):
# Encode
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, max_len_header, dim]
bS = len(l_header)
max_len_question = max(len_question)
max_len_header = max(l_header)
# mL_h = max(len_header_token)
# (self-attention?) column Embedding?
# [B, max_len_header, 100] -> [B, max_len_header, 1] -> [B, max_len_header]
att_h = self.W_att_h(encoded_header).squeeze(2)
# Penalty
for b, l_header1 in enumerate(l_header):
if l_header1 < max_len_header:
att_h[b, l_header1:] = -10000000000
p_h = self.softmax_dim1(att_h)
if show_p_wn:
if p_h.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,5)
cla()
plot(p_h[0].data.numpy(), '--rs', ms=7)
title('wn: header_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Eenter to continue.')
# [B, max_len_header, 100] * [ B, max_len_header, 1] -> [B, max_len_header, 100] -> [B, 100]
c_header = torch.mul(encoded_header, p_h.unsqueeze(2)).sum(1)
# [B, 100] --> [B, 2*100] Enlarge because there are two layers.
hidden = self.W_hidden(c_header) # [B, 4, 200/2]
hidden = hidden.view(bS, self.num_layer * 2, int(
self.hidden_size / 2)) # [4, B, 100/2] # number_of_layer_layer * (bi-direction) # lstm input convention.
hidden = hidden.transpose(0, 1).contiguous()
cell = self.W_cell(c_header) # [B, 4, 100/2]
cell = cell.view(bS, self.num_layer * 2, int(self.hidden_size / 2)) # [4, B, 100/2]
cell = cell.transpose(0, 1).contiguous()
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=(hidden, cell),
last_only=False) # [b, n, dim]
att_n = self.W_att_n(wenc_n).squeeze(2) # [B, max_len, 100] -> [B, max_len, 1] -> [B, max_len]
# Penalty
for b, l_n1 in enumerate(len_question):
if l_n1 < max_len_question:
att_n[b, l_n1:] = -10000000000
p_n = self.softmax_dim1(att_n)
if show_p_wn:
if p_n.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
subplot(7,2,6)
cla()
plot(p_n[0].data.numpy(), '--rs', ms=7)
title('wn: nlu_weight')
grid(True)
fig.canvas.draw()
show()
# input('Type Enter to continue.')
# [B, mL_n, 100] *([B, mL_n] -> [B, mL_n, 1] -> [B, mL_n, 100] ) -> [B, 100]
c_n = torch.mul(wenc_n, p_n.unsqueeze(2).expand_as(wenc_n)).sum(dim=1)
s_wn = self.wn_out(c_n)
s_wn_softmax = self.softmax_dim_1(s_wn)
return s_wn,s_wn_softmax
# where column predict
class WhereColumnPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3):
super(WhereColumnPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_out = nn.Sequential(
nn.Tanh(), nn.Linear(2 * hidden_size, 1)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, show_p_wc, penalty=True):
# Encode
encoded_question = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
# attention
# wenc = [bS, mL, hidden_size]
# att = [bS, max_len_header, mL_n]
# att[b, i_h, j_n] = p(j_n| i_h)
att = torch.bmm(encoded_header, self.W_att(encoded_question).transpose(1, 2))
# penalty to blank part.
mL_n = max(len_question)
for b_n, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b_n, :, l_n1:] = -10000000000
# make p(j_n | i_h)
p = self.softmax_dim2(att)
if show_p_wc:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001);
# subplot(6,2,7)
subplot2grid((7,2), (3, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_h in range(l_header[0]):
color_idx = i_h % len(_color)
plot(p[0][i_h][:].data.numpy() - i_h, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wc: p_n for each h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# max nlu context vectors
# [bS, max_len_header, mL_n]*[bS, max_len_header, mL_n]
encoded_question = encoded_question.unsqueeze(1) # [ b, n, dim] -> [b, 1, n, dim]
p = p.unsqueeze(3) # [b, header, n] -> [b, header, n, 1]
c_n = torch.mul(encoded_question, p).sum(2) # -> [b, header, dim], c_n for each header.
y = torch.cat([self.W_c(c_n), self.W_header(encoded_header)], dim=2) # [b, header, 2*dim]
score = self.W_out(y).squeeze(2) # [b, header]
score[torch.isnan(score)] = 0
score_softmax = self.softmax_dim_1(score)
if penalty:
for b, l_header1 in enumerate(l_header):
score[b, l_header1:] = -1e+10
for b, l_header1 in enumerate(l_header):
score_softmax[b, l_header1:] = 0
return score,score_softmax
# where op predict
class WhereOpPredict(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=3):
super(WhereOpPredict, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.wo_out = nn.Sequential(
nn.Linear(2*hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, n_cond_ops)
)
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token,
l_header, wn, wc, wenc_n=None, show_p_wo=False):
# Encode
if not wenc_n:
wenc_n = encode(self.enc_n, emb_question, len_question,
return_hidden=False,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
# wn
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wo:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 0), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wo: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# [bS, 5-1, dim] -> [bS, 5-1, 3]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob)], dim=2)
s_wo = self.wo_out(vec)
s_wo_softmax = self.softmax_dim_1(s_wo)
return s_wo,s_wo_softmax
class WhereValuePredict_startend(nn.Module):
"""
Discriminative model
Get start and end.
Here, classifier for [ [투수], [팀1], [팀2], [연도], ...]
Input: Encoded nlu & selected column.
Algorithm: Encoded nlu & selected column. -> classifier -> mask scores -> ...
"""
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, n_cond_ops=4, old=False):
super(WhereValuePredict_startend, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.mL_w = 4 # max where condition number
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.W_att = nn.Linear(hidden_size, hidden_size)
self.W_c = nn.Linear(hidden_size, hidden_size)
self.W_header = nn.Linear(hidden_size, hidden_size)
self.W_op = nn.Linear(n_cond_ops, hidden_size)
# self.W_n = nn.Linear(hidden_size, hidden_size)
if old:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, 2)
)
else:
self.wv_out = nn.Sequential(
nn.Linear(4 * hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 2)
)
# self.wv_out = nn.Sequential(
# nn.Linear(3 * hidden_size, hidden_size),
# nn.Tanh(),
# nn.Linear(hidden_size, self.gdkL)
# )
self.softmax_dim1 = nn.Softmax(dim=1)
self.softmax_dim2 = nn.Softmax(dim=2)
self.softmax_dim_1 = nn.Softmax(dim=-1)
def forward(self, emb_question, len_question, emb_header, len_header_token, l_header, wn, wc, wo, wenc_n=None, show_p_wv=False):
# Encode
if not wenc_n:
wenc_n, hout, cout = encode(self.enc_n, emb_question, len_question,
return_hidden=True,
hc0=None,
last_only=False) # [b, n, dim]
encoded_header = encode_header(self.enc_h, emb_header, len_header_token, l_header) # [b, header, dim]
bS = len(l_header)
wenc_header_ob = [] # observed header
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
real = [encoded_header[b, col] for col in wc[b]]
pad = (self.mL_w - wn[b]) * [encoded_header[b, 0]] # this padding could be wrong. Test with zero padding later.
wenc_header_ob1 = torch.stack(real + pad) # It is not used in the loss function.
wenc_header_ob.append(wenc_header_ob1)
# list to [B, 4, dim] tensor.
wenc_header_ob = torch.stack(wenc_header_ob) # list to tensor.
wenc_header_ob = wenc_header_ob.to(device)
# Column attention
# [B, 1, mL_n, dim] * [B, 4, dim, 1]
# -> [B, 4, mL_n, 1] -> [B, 4, mL_n]
# multiplication bewteen NLq-tokens and selected column
att = torch.matmul(self.W_att(wenc_n).unsqueeze(1),
wenc_header_ob.unsqueeze(3)
).squeeze(3)
# Penalty for blank part.
mL_n = max(len_question)
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
att[b, :, l_n1:] = -10000000000
p = self.softmax_dim2(att) # p( n| selected_col )
if show_p_wv:
# p = [b, header, n]
if p.shape[0] != 1:
raise Exception("Batch size should be 1.")
fig=figure(2001)
# subplot(6,2,7)
subplot2grid((7,2), (5, 1), rowspan=2)
cla()
_color='rgbkcm'
_symbol='.......'
for i_wn in range(self.mL_w):
color_idx = i_wn % len(_color)
plot(p[0][i_wn][:].data.numpy() - i_wn, '--'+_symbol[color_idx]+_color[color_idx], ms=7)
title('wv: p_n for selected h')
grid(True)
fig.tight_layout()
fig.canvas.draw()
show()
# [B, 1, mL_n, dim] * [B, 4, mL_n, 1]
# --> [B, 4, mL_n, dim]
# --> [B, 4, dim]
c_n = torch.mul(wenc_n.unsqueeze(1), p.unsqueeze(3)).sum(dim=2)
# Select observed headers only.
# Also generate one_hot vector encoding info of the operator
# [B, 4, dim]
wenc_op = []
for b in range(bS):
# [[...], [...]]
# Pad list to maximum number of selections
wenc_op1 = torch.zeros(self.mL_w, self.n_cond_ops)
wo1 = wo[b]
idx_scatter = []
l_wo1 = len(wo1)
for i_wo11 in range(self.mL_w):
if i_wo11 < l_wo1:
wo11 = wo1[i_wo11]
idx_scatter.append([int(wo11)])
else:
idx_scatter.append([0]) # not used anyway
wenc_op1 = wenc_op1.scatter(1, torch.tensor(idx_scatter), 1)
wenc_op.append(wenc_op1)
# list to [B, 4, dim] tensor.
wenc_op = torch.stack(wenc_op) # list to tensor.
wenc_op = wenc_op.to(device)
# Now after concat, calculate logits for each token
# [bS, 5-1, 3*hidden_size] = [bS, 4, 300]
vec = torch.cat([self.W_c(c_n), self.W_header(wenc_header_ob), self.W_op(wenc_op)], dim=2)
# Make extended vector based on encoded nl token containing column and operator information.
# wenc_n = [bS, mL, 100]
# vec2 = [bS, 4, mL, 400]
vec1e = vec.unsqueeze(2).expand(-1,-1, mL_n, -1) # [bS, 4, 1, 300] -> [bS, 4, mL, 300]
wenc_ne = wenc_n.unsqueeze(1).expand(-1, 4, -1, -1) # [bS, 1, mL, 100] -> [bS, 4, mL, 100]
vec2 = torch.cat( [vec1e, wenc_ne], dim=3)
# now make logits
s_wv = self.wv_out(vec2) # [bS, 4, mL, 400] -> [bS, 4, mL, 2]
s_wv_softmax = self.softmax_dim_1(s_wv)
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv[b, :, l_n1:, :] = -10000000000
for b, l_n1 in enumerate(len_question):
if l_n1 < mL_n:
s_wv_softmax[b, :, l_n1:, :] = 0
return s_wv,s_wv_softmax
def Loss_selectwhere_startend_v2(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
# loss += Loss_sc(score_select_column, ground_truth_select_column)
# loss += Loss_sa(s_sa, g_sa)
# loss += Loss_wn(s_wn, g_wn)
# loss += Loss_wc(s_wc, g_wc)
# loss += Loss_wo(s_wo, g_wn, g_wo)
# loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sw_se(score_select_column, s_sa, s_wn, s_wc, s_wo,
s_wv, ground_truth_select_column, g_sa, g_wn, g_wc, g_wo, g_wvi):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
loss += Loss_sc(score_select_column, ground_truth_select_column)
loss += Loss_sa(s_sa, g_sa)
loss += Loss_wn(s_wn, g_wn)
loss += Loss_wc(s_wc, g_wc)
loss += Loss_wo(s_wo, g_wn, g_wo)
loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss
def Loss_sc(s_sc, g_sc):
loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device))
return loss
def Loss_sa(s_sa, g_sa):
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss
def Loss_wn(s_wn, g_wn):
loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device))
return loss
def Loss_wc(s_wc, g_wc):
# Construct index matrix
bS, max_h_len = s_wc.shape
im = torch.zeros([bS, max_h_len]).to(device)
for b, g_wc1 in enumerate(g_wc):
for g_wc11 in g_wc1:
im[b, g_wc11] = 1.0
# Construct prob.
p = F.sigmoid(s_wc)
loss = F.binary_cross_entropy(p, im)
return loss
def Loss_wo(s_wo, g_wn, g_wo):
# Construct index matrix
loss = 0
for b, g_wn1 in enumerate(g_wn):
if g_wn1 == 0:
continue
g_wo1 = g_wo[b]
s_wo1 = s_wo[b]
loss += F.cross_entropy(s_wo1[:g_wn1], torch.tensor(g_wo1).to(device))
return loss
def Loss_wv_se(s_wv, g_wn, g_wvi):
"""
s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits.
g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2).
"""
loss = 0
# g_wvi = torch.tensor(g_wvi).to(device)
for b, g_wvi1 in enumerate(g_wvi):
# for i_wn, g_wvi11 in enumerate(g_wvi1):
g_wn1 = len(g_wvi1) # 有改动
# g_wn1 = g_wn[b] # 有改动
if g_wn1 == 0:
continue
g_wvi1 = torch.tensor(g_wvi1)[:g_wn1].to(device) # 有改动
g_st1 = g_wvi1[:,0]
g_ed1 = g_wvi1[:,1]
# loss from the start position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,0], g_st1)
# print("st_login: ", s_wv[b,:g_wn1,:,0], g_st1, loss)
# loss from the end position
loss += F.cross_entropy(s_wv[b,:g_wn1,:,1], g_ed1)
# print("ed_login: ", s_wv[b,:g_wn1,:,1], g_ed1, loss)
return loss
# ========= Decoder-Layer ===========
class FT_s2s_1(nn.Module):
""" Decoder-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, max_seq_length, n_cond_ops, n_agg_ops, old=False):
super(FT_s2s_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size # hidden_size
self.ls = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
self.decoder_s2s = Decoder_s2s(input_size, hidden_size, num_layer, dropout, max_seq_length)
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None):
score = self.decoder_s2s(wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs)
return score
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size=4, beam_only=True):
""" EG-guided beam-search """
score = self.decoder_s2s.EG_forward(wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only)
return score
class Decoder_s2s(nn.Module):
def __init__(self, input_size=300, hidden_size=100, num_layer=2, dropout=0.3, max_seq_length=222, n_cond_ops=3):
super(Decoder_s2s, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.mL = max_seq_length
self.Tmax = 200
self.enc_h = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.enc_n = nn.LSTM(input_size=input_size, hidden_size=int(hidden_size / 2),
num_layers=num_layer, batch_first=True,
dropout=dropout, bidirectional=True)
self.decode_pn = nn.LSTM(input_size=max_seq_length, hidden_size=hidden_size,
num_layers=num_layer, batch_first=True,
dropout=dropout)
self.W_s2s = nn.Linear(input_size, hidden_size)
self.W_pnt = nn.Linear(hidden_size, hidden_size)
self.wv_out = nn.Sequential(nn.Tanh(), nn.Linear(hidden_size, 1))
def forward(self, wenc_s2s, l_input, cls_vec, pnt_start_tok, g_pnt_idxs=None,):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# Now, pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
# initial (current) pointer
cpnt = ipnt
# reshape wenc_s2s to incorporate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
if g_pnt_idxs:
pnt_n = torch.zeros(bS, self.Tmax, mL_input).to(device) # one hot
# assign index
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
for t, g_pnt_idx in enumerate(g_pnt_idxs1):
pnt_n[b, t, g_pnt_idx] = 1
# Encode
dec_pn, _ = self.decode_pn(pnt_n, (h_0, c_0))
dec_pn = dec_pn.contiguous()
# [bS, T, input_size]
dec_pn = dec_pn.unsqueeze(2)
# Calculate score
s_wv = self.wv_out(
self.W_s2s(wenc_s2s)
+ self.W_pnt(dec_pn)
).squeeze(3) # [B, T, mL_input, dim] -> [B, T, mL_input, 1] -> [B, T, mL_input]
# s_wv = [B, 4, T, mL_n] = [batch, conds, token idx, score]
# penalty
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv[b, :, l_input1:] = -10000000000
else:
t = 0
s_wv_list = []
cpnt_h = (h_0, c_0)
while t < self.Tmax:
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Collect score--
s_wv_list.append(s_wv1)
# [B, 1, mL_input] -> [B, mL_n] -> [bS*(5-1)]
# (max_val, max_indices)
_val, pnt_n = s_wv1.view(bS, -1).max(dim=1)
# formatting pnt_n as a one-hot input.
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B * 4, 1, 200]
t += 1
s_wv = torch.stack(s_wv_list, 1) # [B,
s_wv = s_wv.squeeze(2) #
# # Following lines seems to be unnecessary.
# # Penalty to blank parts
# for b, l_input1 in enumerate(l_input):
# if l_input1 < mL_input:
# s_wv[b, :, l_input1:] = -10000000000
return s_wv
def EG_forward(self, wenc_s2s, l_input, cls_vec,
pnt_start_tok, pnt_end_tok,
i_sql_vocab, i_nlu, i_hds, # for EG
tokens, nlu, nlu_t, hds, tt_to_t_idx, # for EG
tb, engine,
beam_size, beam_only=True):
# Encode
bS, mL_input, input_size = wenc_s2s.shape
# reshape wenc_s2s to incorperate T later
wenc_s2s = wenc_s2s.unsqueeze(1)
# h_0 and c_0 from cls_vec
# They are not bidirectional.
h_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
c_0 = torch.zeros([self.num_layer, bS, self.hidden_size]).to(device)
for i_layer in range(self.num_layer):
h_st = (2*i_layer)*self.hidden_size
h_ed = h_st + self.hidden_size
c_st = (2*i_layer+1)*self.hidden_size
c_ed = c_st + self.hidden_size
h_0[i_layer] = cls_vec[:, h_st:h_ed] # [ # of layers, batch, dim]
c_0[i_layer] = cls_vec[:, c_st:c_ed] # [ # of layers, batch, dim]
# initial (current) pointer
pnt_list_beam = []
cpnt_beam = []
cpnt_h_beam = []
for i_beam in range(beam_size):
pnt_list_beam1 = []
for b in range(bS):
pnt_list_beam1.append( [ [pnt_start_tok], 0] )
pnt_list_beam.append(pnt_list_beam1)
# initisl cpnt
# Now, initialize pointer network.
ipnt = wenc_s2s.new_zeros(bS, 1, mL_input).to(device) # [B, 1, 200]
# Distort ipnt by i_bam on purpose to avoid initial duplication of beam-search
ipnt[:, 0, pnt_start_tok] = 1 # 27 is of start token under current tokenization scheme
cpnt_beam.append(ipnt)
cpnt_h_beam.append( (h_0, c_0) )
t = 0
while t < self.Tmax:
# s_wv1_beam = []
candidates = [ [] for b in range(bS) ] # [bS]
# Generate beam
for i_beam, cpnt in enumerate(cpnt_beam):
cpnt_h = cpnt_h_beam[i_beam]
pnt_list_beam1 = pnt_list_beam[i_beam]
dec_pn, cpnt_h = self.decode_pn(cpnt, cpnt_h) # lstm
cpnt_h_beam[i_beam] = cpnt_h
# [B, 1, 100] -> [B, 1, 1, 100]
dec_pn = dec_pn.unsqueeze(2)
# [bS, T, input_size]
# get score
s_wv1 = self.wv_out(
self.W_s2s(wenc_s2s) # [B, 1, mL_input, dim]
+ self.W_pnt(dec_pn) # [B, T=1, 1, dim] Now, T=1
).squeeze(3)
# s_wv = [B, 4, 1, mL_n, 1] = [batch, conds, token idx, score]
# -> [B, 4, mL_n]
# Masking --
for b, l_input1 in enumerate(l_input):
if l_input1 < mL_input:
s_wv1[b, :, l_input1:] = -10000000000
# Get the candidates only among the input space.
prob, idxs = F.softmax(s_wv1.view(bS, -1), dim=1).topk(dim=1, k=max(l_input))
log_prob = torch.log(prob) # [bS, beam_size]
for b, log_prob1 in enumerate(log_prob):
pnt_list11, score = pnt_list_beam1[b]
for i_can, log_prob11 in enumerate(log_prob1):
# no update if last token was the end-token
previous_pnt = pnt_list11[-1]
if previous_pnt== pnt_end_tok:
new_seq = pnt_list11
new_score = score
else:
new_seq = pnt_list11 + [idxs[b][i_can].item()]
new_score = score + log_prob11.item()
_candidate = [new_seq, new_score]
candidates[b].append(_candidate)
# Execution-guided beam filtering
for b, candidates1 in enumerate(candidates):
new_pnt_list_batch1 = sorted(candidates1, key=lambda list1: list1[-1], reverse=True)
count = 0
selected_candidates1 = []
for new_pnt_list_batch11 in new_pnt_list_batch1:
if new_pnt_list_batch11 not in selected_candidates1:
if beam_only:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count +=1
else:
# Need to be modified here.
executable = False
testable = False
pr_i_vg_list, pr_i_vg_sub_list = gen_i_vg_from_pnt_idxs([new_pnt_list_batch11[0]], [i_sql_vocab[b]], [i_nlu[b]],
[i_hds[b]])
pr_sql_q_s2s, pr_sql_i = gen_sql_q_from_i_vg([tokens[b]], [nlu[b]], [nlu_t[b]], [hds[b]], [tt_to_t_idx[b]],
pnt_start_tok, pnt_end_tok,
[new_pnt_list_batch11[0]], pr_i_vg_list, pr_i_vg_sub_list)
# check testability from select-clause
try:
# check whether basic elements presents in pr_sql_i
# If so, it is testable.
idx_agg = pr_sql_i[0]["agg"]
idx_sel = pr_sql_i[0]["sel"]
testable = True
except:
testable = False
pass
# check the presence of conds
if testable:
try:
conds = pr_sql_i[0]["conds"]
except:
conds = []
try:
pr_ans1 = engine.execute(tb[b]['id'], idx_sel, idx_agg, conds)
executable = bool(pr_ans1)
except:
executable = False
#
if testable:
if executable:
add_candidate = True
else:
add_candidate = False
else:
add_candidate = True
if add_candidate:
selected_candidates1.append(new_pnt_list_batch11)
pnt_list_beam[count][b] = new_pnt_list_batch11
count += 1
if count == beam_size:
break
if count < beam_size:
# not executable at all..
# add junk sequence.
for i_junk in range(count, beam_size):
pnt_list_beam[i_junk][b] = [[pnt_end_tok],-9999999]
# generate cpnt
# formatting pnt_n as a one-hot input.
for i_beam in range(beam_size):
cpnt = torch.zeros(bS, mL_input).to(device)
# cpnt = cpnt.scatter_(dim=1, index=pnt_n.unsqueeze(1), src=1).to(device)
idx_batch = [seq_score[0][-1] for seq_score in pnt_list_beam[i_beam]]
pnt_n = torch.tensor(idx_batch).to(device)
cpnt = cpnt.scatter_(1, pnt_n.unsqueeze(1), 1)
cpnt = cpnt.unsqueeze(1) # --> [B, t=1, mL_input]
cpnt_beam[i_beam] = cpnt
t += 1
# Generate best pr_pnt_list, p_tot
pr_pnt_idxs = []
p_list = []
for b in range(bS):
pnt_list_beam_best = pnt_list_beam[0]
pr_pnt_idxs.append(pnt_list_beam_best[b][0])
p_list.append( pnt_list_beam_best[b][1])
return pr_pnt_idxs, p_list, pnt_list_beam
# ============= Shallow-Layer ===============
class FT_Scalar_1(nn.Module):
""" Shallow-Layer """
def __init__(self, input_size, hidden_size, num_layer, dropout, n_cond_ops, n_agg_ops, old=False):
super(FT_Scalar_1, self).__init__()
self.input_size = input_size # input_size
self.hidden_size = hidden_size
self.num_layer = num_layer
self.dropout = dropout
self.n_cond_ops = n_cond_ops
self.n_agg_ops = n_agg_ops
self.n_where_num = 4
def scp(self, wemb_h, l_header):
bS, max_header_len, _ = wemb_h.shape
# s_sc
s_sc = torch.zeros(bS, max_header_len).to(device)
s_sc[:, :] = wemb_h[:, :, 0] # s_sc = [B, max_header length, 1]
# s_sc[:,:] = F.tanh(wemb_h[:,:,0]) # s_sc = [B, max_header length, 1]
# s_sc = s_sc.squeeze(2)
# masking
# print(f"s_sc {s_sc}")
for b, l_header1 in enumerate(l_header):
s_sc[b, l_header1:] = -9999999999.0
return s_sc
def sap(self, wemb_h, pr_sc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
# select of aggregation operator
s_sa = torch.zeros([bS, self.n_agg_ops]).to(device)
for b, pr_sc1 in enumerate(pr_sc):
s_sa[b,:] = wemb_h[b,pr_sc1,idx_st:idx_ed]
return s_sa
def wnp(self, cls_vec):
bS = cls_vec.shape[0]
# [B,hidden_size] -> [B, n_where_num+1]
s_wn = torch.zeros(bS, (self.n_where_num + 1)).to(device)
s_wn[:, :] = cls_vec[:, 0:(self.n_where_num + 1)]
return s_wn
def wcp(self, wemb_h, l_header, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wc = torch.zeros(bS, max_header_len, 1).to(device)
s_wc[:, :, :] = wemb_h[:, :, idx_st:idx_ed]
s_wc = s_wc.squeeze(2) # [B, max_header_length]
# masking
for b, l_header1 in enumerate(l_header):
s_wc[b, l_header1:] = -99999999999.0
return s_wc
def wop(self, wemb_h, pr_wc, idx_st, idx_ed):
bS, max_header_len, _ = wemb_h.shape
s_wo = torch.zeros([bS, self.n_where_num, self.n_cond_ops]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
s_wo[b, 0:len(pr_wc1), :] = wemb_h[b, pr_wc1, idx_st:idx_ed]
else:
pass
return s_wo
def wvp(self, emb_question, len_question, pr_wc):
bS, _, _ = emb_question.shape
s_wv = torch.zeros([bS, self.n_where_num, max(len_question), 2]).to(device)
for b, pr_wc1 in enumerate(pr_wc):
if len(pr_wc1) > 0:
# start logit
s_wv[b, 0:len(pr_wc1), :, 0] = emb_question[b, :, pr_wc1].transpose(0, 1)
# end logit
s_wv[b, 0:len(pr_wc1), :, 1] = emb_question[b, :, [pr_wc11 + 100 for pr_wc11 in pr_wc1]].transpose(0, 1)
else:
pass
# masking
# penalty for spurious tokens
for b, l_n1 in enumerate(len_question):
if l_n1 < max(len_question):
s_wv[b, :, l_n1:, :] = -1e+11
return s_wv
def forward(self, emb_question, len_question, wemb_h, l_header, cls_vec,
g_sc=None, g_sa=None, g_wn=None, g_wc=None, g_wo=None, g_wvi=None,
show_p_sc=False, show_p_sa=False,
show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
# emb_question = [B, max_nlu_token_length, hidden_size] # here, # of target_layer is fixed to 1.
# wemb_h = [B, max_header #, hidden_size]
s_sc = self.scp(wemb_h, l_header)
if g_sc:
pr_sc = g_sc
else:
pr_sc = pred_sc(s_sc)
# s_sa
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
if g_sa:
pr_sa = g_sa
else:
pr_sa = pred_sa(s_sa)
# where_number
s_wn = self.wnp(cls_vec)
if g_wn:
pr_wn = g_wn
else:
pr_wn = pred_wn(s_wn)
# wc
idx_st = idx_ed+1
idx_ed = idx_st+1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
if g_wc:
pr_wc = g_wc
else:
pr_wc = pred_wherecolumn(pr_wn, s_wc)
# wo
idx_st = idx_ed+1
idx_ed = idx_st + self.n_cond_ops
s_wo = self.wop(wemb_h, pr_wc, idx_st, idx_ed)
if g_wo:
pr_wo = g_wo
else:
pr_wo = pred_wo(pr_wn, s_wo)
# wv
# s_wv = [bS, 4, mL, 2]
s_wv = self.wvp(emb_question, len_question, pr_wc)
# print(s_wv)
# s_wv = F.tanh(s_wv)
return s_sc, s_sa, s_wn, s_wc, s_wo, s_wv
def forward_EG(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
beam_size=4):
"""
Execution-guided beam decoding.
Essentially identical with that of NL2SQL Layer.
"""
# Select-clause
prob_sca, pr_sc_best, pr_sa_best, \
p_sc_best, p_sa_best, p_select \
= self.EG_decoding_select(wemb_h, l_header, tb, beam_size=beam_size)
# Where-clause
prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn_best, p_wc_best, p_wo_best, p_wvi_best \
= self.EG_decoding_where(emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_tt, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4)
p_tot = cal_prob_tot(p_select, p_where)
return pr_sc_best, pr_sa_best, pr_wn_based_on_prob, pr_wvi_best, \
pr_sql_i, p_tot, p_select, p_where, p_sc_best, p_sa_best, \
p_wn_best, p_wc_best, p_wo_best, p_wvi_best
def EG_decoding_select(self, wemb_h, l_header, tb,
beam_size=4, show_p_sc=False, show_p_sa=False):
# sc
s_sc = self.scp(wemb_h, l_header)
prob_sc = F.softmax(s_sc, dim=-1)
bS, mcL = s_sc.shape
# minimum_header_length = min(l_header)
# beam_size = minimum_header_length if beam_size > minimum_header_length else beam_size
# sa
# Construct all possible sc_sa_score
prob_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
score_sc_sa = torch.zeros([bS, beam_size, self.n_agg_ops]).to(device)
prob_sca = torch.zeros_like(prob_sc_sa).to(device)
# get the top-k indices. pr_sc_beam = [B, beam_size]
pr_sc_beam = pred_sc_beam(s_sc, beam_size)
# calculate and predict s_sa.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
for i_beam in range(beam_size):
pr_sc = list(array(pr_sc_beam)[:, i_beam])
s_sa = self.sap(wemb_h, pr_sc, idx_st, idx_ed)
prob_sa = F.softmax(s_sa, dim=-1)
prob_sc_sa[:, i_beam, :] = prob_sa
score_sc_sa[:, i_beam, :] = s_sa
prob_sc_selected = prob_sc[range(bS), pr_sc] # [B]
prob_sca[:, i_beam, :] = (prob_sa.t() * prob_sc_selected).t()
# [mcL, B] * [B] -> [mcL, B] (element-wise multiplication)
# [mcL, B] -> [B, mcL]
# Calculate the dimension of tensor
# tot_dim = len(prob_sca.shape)
idxs = topk_multi_dim(torch.tensor(prob_sca), n_topk=beam_size, batch_exist=True)
# Now as sc_idx is already sorted, re-map them properly.
idxs = remap_sc_idx(idxs, pr_sc_beam) # [sc_beam_idx, sa_idx] -> [sc_idx, sa_idx]
idxs_arr = array(idxs)
# [B, beam_size, remainig dim]
# idxs[b][0] gives first probable [sc_idx, sa_idx] pairs.
# idxs[b][1] gives of second.
# Calculate prob_sca, a joint probability
beam_idx_sca = [0] * bS
beam_meet_the_final = [False] * bS
while True:
pr_sc = idxs_arr[range(bS), beam_idx_sca, 0]
pr_sa = idxs_arr[range(bS), beam_idx_sca, 1]
# map index properly
check = check_sc_sa_pairs(tb, pr_sc, pr_sa)
if sum(check) == bS:
break
else:
for b, check1 in enumerate(check):
if not check1: # wrong pair
beam_idx_sca[b] += 1
if beam_idx_sca[b] >= beam_size:
beam_meet_the_final[b] = True
beam_idx_sca[b] -= 1
else:
beam_meet_the_final[b] = True
if sum(beam_meet_the_final) == bS:
break
# Now pr_sc, pr_sa are properly predicted.
pr_sc_best = list(pr_sc)
pr_sa_best = list(pr_sa)
# output for later analysis.
p_sc_best = cal_prob_sc(s_sc, pr_sc_best)
p_sa_best = cal_prob_sa(score_sc_sa[range(bS), beam_idx_sca, :].squeeze(1), pr_sa_best)
p_select = cal_prob_select(p_sc_best, p_sa_best)
# p_select = prob_sca[range(bS),beam_idx_sca,pr_sa_best].detach().to('cpu').numpy()
return prob_sca, pr_sc_best, pr_sa_best, p_sc_best, p_sa_best, p_select
def EG_decoding_where(self, emb_question, len_question, wemb_h, l_header, cls_vec, engine, tb,
nlu_t, nlu_wp_t, tt_to_t_idx, nlu,
pr_sc_best, pr_sa_best,
beam_size=4, show_p_wn=False, show_p_wc=False, show_p_wo=False, show_p_wv=False):
bS, max_header_len, _ = wemb_h.shape
# Now, Where-clause beam search.
idx_st = 1
idx_ed = 1 + self.n_agg_ops
s_wn = self.wnp(cls_vec)
prob_wn = F.softmax(s_wn, dim=-1).detach().to('cpu').numpy()
# Found "executable" most likely 4(=max_num_of_conditions) where-clauses.
# wc
idx_st = idx_ed + 1
idx_ed = idx_st + 1
s_wc = self.wcp(wemb_h, l_header, idx_st, idx_ed)
prob_wc = torch.sigmoid(s_wc).detach().to('cpu').numpy()
# pr_wc_sorted_by_prob = pred_wc_sorted_by_prob(s_wc)
# get max_wn # of most probable columns & their prob.
pr_wn_max = [self.n_where_num] * bS
pr_wc_max = pred_wherecolumn(pr_wn_max, s_wc) # if some column do not have executable where-claouse, omit that column
prob_wc_max = zeros([bS, self.n_where_num])
for b, pr_wc_max1 in enumerate(pr_wc_max):
prob_wc_max[b, :] = prob_wc[b, pr_wc_max1]
# get most probable n_where_num where-clouses
# wo
idx_st = idx_ed + 1
idx_ed = idx_st + self.n_cond_ops
s_wo_max = self.wop(wemb_h, pr_wc_max, idx_st, idx_ed)
prob_wo_max = F.softmax(s_wo_max, dim=-1).detach().to('cpu').numpy()
# [B, n_where_num, n_cond_op]
pr_wvi_beam_op_list = []
prob_wvi_beam_op_list = []
prob_wvi_beam_st_op_list = []
prob_wvi_beam_ed_op_list = []
# To re-use code, repeat the calculation unnecessarily.
for i_op in range(self.n_cond_ops - 1):
pr_wo_temp = [[i_op] * self.n_where_num] * bS
# wv
s_wv = self.wvp(emb_question, len_question, pr_wc_max)
prob_wv = F.softmax(s_wv, dim=-2).detach().to('cpu').numpy()
# prob_wv
pr_wvi_beam, prob_wvi_beam, prob_wvi_beam_st, prob_wvi_beam_ed = pred_wvi_se_beam(self.n_where_num, s_wv, beam_size)
pr_wvi_beam_op_list.append(pr_wvi_beam)
prob_wvi_beam_op_list.append(prob_wvi_beam)
prob_wvi_beam_st_op_list.append(prob_wvi_beam_st)
prob_wvi_beam_ed_op_list.append(prob_wvi_beam_ed)
# pr_wvi_beam = [B, n_where_num, k_logit**2 [st, ed] paris]
# pred_wv_beam
# Calculate joint probability of where-clause
# prob_w = [batch, wc, wo, wv] = [B, n_where_num, n_cond_op, n_pairs]
n_wv_beam_pairs = prob_wvi_beam.shape[2]
prob_w = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wc_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wo_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_st_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
prob_wvi_ed_dupl = zeros([bS, self.n_where_num, self.n_cond_ops - 1, n_wv_beam_pairs])
for b in range(bS):
for i_wn in range(self.n_where_num):
for i_op in range(self.n_cond_ops - 1): # do not use final one
p_wc = prob_wc_max[b, i_wn]
for i_wv_beam in range(n_wv_beam_pairs):
# i_wc = pr_wc_max[b][i_wn] # already done
p_wo = prob_wo_max[b, i_wn, i_op]
p_wv = prob_wvi_beam_op_list[i_op][b, i_wn, i_wv_beam]
prob_w[b, i_wn, i_op, i_wv_beam] = p_wc * p_wo * p_wv
prob_wc_dupl[b, i_wn, i_op, i_wv_beam] = p_wc
prob_wo_dupl[b, i_wn, i_op, i_wv_beam] = p_wo
p_wv_st = prob_wvi_beam_st_op_list[i_op][b, i_wn, i_wv_beam]
p_wv_ed = prob_wvi_beam_ed_op_list[i_op][b, i_wn, i_wv_beam]
prob_wvi_st_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_st
prob_wvi_ed_dupl[b, i_wn, i_op, i_wv_beam] = p_wv_ed
# Perform execution guided decoding
conds_max = []
prob_conds_max = []
# while len(conds_max) < self.n_where_num:
idxs = topk_multi_dim(torch.tensor(prob_w), n_topk=beam_size, batch_exist=True)
# idxs = [B, i_wc_beam, i_op, i_wv_pairs]
# Construct conds1. Collect only executable one. It is descending order of the probability.
pr_wvi_max = []
p_wc_max = []
p_wo_max = []
p_wvi_max = []
for b, idxs1 in enumerate(idxs):
conds_max1 = []
prob_conds_max1 = []
pr_wvi1_max = []
p_wc1_max = []
p_wo1_max = []
p_wvi1_max = []
for i_wn, idxs11 in enumerate(idxs1):
i_wc = pr_wc_max[b][idxs11[0]]
i_op = idxs11[1]
wvi = pr_wvi_beam_op_list[i_op][b][idxs11[0]][idxs11[2]]
# idx11[0]
# get wv_str
temp_pr_wv_str, _ = convert_pred_wvi_to_string([[wvi]], [nlu_t[b]], [nlu_wp_t[b]], [tt_to_t_idx[b]],
[nlu[b]])
merged_wv11 = merge_wv_t1_eng(temp_pr_wv_str[0][0], nlu[b])
conds11 = [i_wc, i_op, merged_wv11]
prob_conds11 = prob_w[b, idxs11[0], idxs11[1], idxs11[2]]
p_wc11_max = prob_wc_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wo11_max = prob_wo_dupl[b, idxs11[0], idxs11[1], idxs11[2]]
p_wvi11_max = [ prob_wvi_st_dupl[b, idxs11[0], idxs11[1], idxs11[2]],
prob_wvi_ed_dupl[b, idxs11[0], idxs11[1], idxs11[2]] ]
# test execution
# print(nlu[b])
# print(tb[b]['id'], tb[b]['types'], pr_sc[b], pr_sa[b], [conds11])
pr_ans = engine.execute(tb[b]['id'], pr_sc_best[b], pr_sa_best[b], [conds11])
if bool(pr_ans):
# pr_ans is not empty!
conds_max1.append(conds11)
prob_conds_max1.append(prob_conds11)
pr_wvi1_max.append(wvi)
p_wc1_max.append(p_wc11_max)
p_wo1_max.append(p_wo11_max)
p_wvi1_max.append(p_wvi11_max)
conds_max.append(conds_max1)
prob_conds_max.append(prob_conds_max1)
pr_wvi_max.append(pr_wvi1_max)
p_wc_max.append(p_wc1_max)
p_wo_max.append(p_wo1_max)
p_wvi_max.append(p_wvi1_max)
# May need to do more exhuastive search?
# i.e. up to.. getting all executable cases.
# Calculate total probability to decide the number of where-clauses
pr_sql_i = []
prob_wn_w = [] # total where-clause probability
pr_wn_based_on_prob = []
pr_wvi_best = []
p_wc = []
p_wo = []
p_wvi = []
for b, prob_wn1 in enumerate(prob_wn):
max_executable_wn1 = len(conds_max[b])
prob_wn_w1 = []
prob_wn_w1.append(prob_wn1[0]) # wn=0 case.
for i_wn in range(max_executable_wn1):
prob_wn_w11 = prob_wn1[i_wn + 1] * prob_conds_max[b][i_wn]
prob_wn_w1.append(prob_wn_w11)
pr_wn_based_on_prob.append(argmax(prob_wn_w1))
prob_wn_w.append(prob_wn_w1)
pr_sql_i1 = {'agg': pr_sa_best[b], 'sel': pr_sc_best[b], 'conds': conds_max[b][:pr_wn_based_on_prob[b]]}
pr_wvi_best1 = pr_wvi_max[b][:pr_wn_based_on_prob[b]]
pr_sql_i.append(pr_sql_i1)
pr_wvi_best.append(pr_wvi_best1)
p_wc.append( p_wc_max[b][:pr_wn_based_on_prob[b]] )
p_wo.append( p_wo_max[b][:pr_wn_based_on_prob[b]] )
p_wvi.append( p_wvi_max[b][:pr_wn_based_on_prob[b]] )
# s_wv = [B, n_where_num, max_nlu_tokens, 2]
p_wn = cal_prob_wn(s_wn, pr_wn_based_on_prob)
p_where = cal_prob_where(p_wn, p_wc, p_wo, p_wvi)
return prob_w, prob_wn_w, pr_wn_based_on_prob, pr_sql_i, pr_wvi_best, \
p_where, p_wn, p_wc, p_wo, p_wvi
def Loss_s2s(score, g_pnt_idxs):
"""
score = [B, T, max_seq_length]
"""
# WHERE string part
loss = 0
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
ed = len(g_pnt_idxs1) - 1
score_part = score[b, :ed]
loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.
return loss
| 39.419083
| 161
| 0.555163
| 10,973
| 74,778
| 3.441903
| 0.05313
| 0.039187
| 0.02187
| 0.018005
| 0.766522
| 0.732075
| 0.704671
| 0.683595
| 0.661115
| 0.641284
| 0
| 0.030277
| 0.333948
| 74,778
| 1,896
| 162
| 39.439873
| 0.728025
| 0.168378
| 0
| 0.597066
| 0
| 0
| 0.006951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035375
| false
| 0.002588
| 0.006903
| 0
| 0.077653
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
d9f32d2b9e677d6893c7269bf23bcedaa4e7f68a
| 363
|
py
|
Python
|
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | 2
|
2021-10-06T13:19:09.000Z
|
2021-10-20T17:32:36.000Z
|
chia/components/sample_transformers/__init__.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
from chia import components
from chia.components.sample_transformers import identity
from chia.components.sample_transformers.sample_transformer import SampleTransformer
class SampleTransformerFactory(components.Factory):
name_to_class_mapping = {"identity": identity.IdentitySampleTransformer}
__all__ = ["SampleTransformer", "SampleTransformerFactory"]
| 33
| 84
| 0.85124
| 34
| 363
| 8.794118
| 0.5
| 0.080268
| 0.120401
| 0.160535
| 0.240803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082645
| 363
| 10
| 85
| 36.3
| 0.897898
| 0
| 0
| 0
| 0
| 0
| 0.134986
| 0.066116
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
d9f3cb72d610ec30e4ecf05d60ba2025dc849112
| 416
|
py
|
Python
|
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
3/3.6/add_guest.py
|
singi2016cn/python-scaffold
|
274e508d1919da67e599aa73be139800c043bce4
|
[
"MIT"
] | null | null | null |
# 添加嘉宾
names = []
names.append('singi')
names.append('lily')
names.append('sam')
print('I find a big dining-table,I can invite more friends.')
names.insert(0, 'xiaoling')
names.insert(2, 'fangsi')
names.append('zhangqing')
greets = ',would you like to have dinner with me ?'
print(names[0]+greets)
print(names[1]+greets)
print(names[2]+greets)
print(names[3]+greets)
print(names[4]+greets)
print(names[5]+greets)
| 20.8
| 61
| 0.711538
| 67
| 416
| 4.41791
| 0.537313
| 0.202703
| 0.27027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02139
| 0.100962
| 416
| 20
| 62
| 20.8
| 0.770053
| 0.009615
| 0
| 0
| 0
| 0
| 0.309002
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.466667
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 3
|
8a03248b6fead646cb68e7a6a935435de664969c
| 14,492
|
py
|
Python
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 4
|
2019-07-26T11:32:22.000Z
|
2019-09-11T05:34:59.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 10
|
2020-05-11T20:29:28.000Z
|
2022-01-13T01:41:27.000Z
|
anaconda-mode/0.1.13/jedi-0.15.1-py3.7.egg/jedi/evaluate/base_context.py
|
space-scl/emacs.d
|
6285c38714023b72a023fe24cbcb5e4fcdcdb949
|
[
"Apache-2.0"
] | 2
|
2019-08-28T14:57:54.000Z
|
2019-11-26T16:18:30.000Z
|
"""
Contexts are the "values" that Python would return. However Contexts are at the
same time also the "contexts" that a user is currently sitting in.
A ContextSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from functools import reduce
from operator import add
from parso.python.tree import ExprStmt, SyncCompFor
from jedi import debug
from jedi._compatibility import zip_longest, unicode
from jedi.parser_utils import clean_scope_docstring
from jedi.common import BaseContextSet, BaseContext
from jedi.evaluate.helpers import SimpleGetItemNotFound
from jedi.evaluate.utils import safe_property
from jedi.evaluate.cache import evaluator_as_method_param_cache
from jedi.cache import memoize_method
_sentinel = object()
class HelperContextMixin(object):
def get_root_context(self):
context = self
while True:
if context.parent_context is None:
return context
context = context.parent_context
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, *args, **kwargs):
return cls(*args, **kwargs)
def execute(self, arguments):
return self.evaluator.execute(self, arguments=arguments)
def execute_evaluated(self, *value_list):
from jedi.evaluate.arguments import ValuesArguments
arguments = ValuesArguments([ContextSet([value]) for value in value_list])
return self.evaluator.execute(self, arguments)
def execute_annotation(self):
return self.execute_evaluated()
def gather_annotation_classes(self):
return ContextSet([self])
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in self.iterate(contextualized_node, is_async)
)
def py__getattribute__(self, name_or_str, name_context=None, position=None,
search_global=False, is_goto=False,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
from jedi.evaluate import finder
f = finder.NameFinder(self.evaluator, self, name_context, name_or_str,
position, analysis_errors=analysis_errors)
filters = f.get_filters(search_global)
if is_goto:
return f.filter_name(filters)
return f.find(filters, attribute_lookup=not search_global)
def py__await__(self):
await_context_set = self.py__getattribute__(u"__await__")
if not await_context_set:
debug.warning('Tried to run __await__ on context %s', self)
return await_context_set.execute_evaluated()
def eval_node(self, node):
return self.evaluator.eval_element(self, node)
def create_context(self, node, node_is_context=False, node_is_object=False):
return self.evaluator.create_context(self, node, node_is_context, node_is_object)
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
if is_async:
from jedi.evaluate.lazy_context import LazyKnownContexts
# TODO if no __aiter__ contexts are there, error should be:
# TypeError: 'async for' requires an object with __aiter__ method, got int
return iter([
LazyKnownContexts(
self.py__getattribute__('__aiter__').execute_evaluated()
.py__getattribute__('__anext__').execute_evaluated()
.py__getattribute__('__await__').execute_evaluated()
.py__stop_iteration_returns()
) # noqa
])
return self.py__iter__(contextualized_node)
def is_sub_class_of(self, class_context):
for cls in self.py__mro__():
if cls.is_same_class(class_context):
return True
return False
def is_same_class(self, class2):
# Class matching should prefer comparisons that are not this function.
if type(class2).is_same_class != HelperContextMixin.is_same_class:
return class2.is_same_class(self)
return self == class2
class Context(HelperContextMixin, BaseContext):
"""
Should be defined, otherwise the API returns empty types.
"""
predefined_names = {}
"""
To be defined by subclasses.
"""
tree_node = None
@property
def api_type(self):
# By default just lower name of the class. Can and should be
# overwritten.
return self.__class__.__name__.lower()
def py__getitem__(self, index_context_set, contextualized_node):
from jedi.evaluate import analysis
# TODO this context is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_CONTEXTS
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.evaluate import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def get_signatures(self):
return []
def is_class(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
return None
def get_safe_value(self, default=_sentinel):
if default is _sentinel:
raise ValueError("There exists no safe value for context %s" % self)
return default
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_CONTEXTS
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_CONTEXTS
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root context knows if it's a stub or not.
return self.parent_context.is_stub()
def iterate_contexts(contexts, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all contexts but ignores the ordering and just returns
all contexts that the iterate functions yield.
"""
return ContextSet.from_sets(
lazy_context.infer()
for lazy_context in contexts.iterate(contextualized_node, is_async=is_async)
)
class _ContextWrapperBase(HelperContextMixin):
predefined_names = {}
@safe_property
def name(self):
from jedi.evaluate.names import ContextName
wrapped_name = self._wrapped_context.name
if wrapped_name.tree_name is not None:
return ContextName(self, wrapped_name.tree_name)
else:
from jedi.evaluate.compiled import CompiledContextName
return CompiledContextName(self, wrapped_name.string_name)
@classmethod
@evaluator_as_method_param_cache()
def create_cached(cls, evaluator, *args, **kwargs):
return cls(*args, **kwargs)
def __getattr__(self, name):
assert name != '_wrapped_context', 'Problem with _get_wrapped_context'
return getattr(self._wrapped_context, name)
class LazyContextWrapper(_ContextWrapperBase):
@safe_property
@memoize_method
def _wrapped_context(self):
with debug.increase_indent_cm('Resolve lazy context wrapper'):
return self._get_wrapped_context()
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def _get_wrapped_context(self):
raise NotImplementedError
class ContextWrapper(_ContextWrapperBase):
def __init__(self, wrapped_context):
self._wrapped_context = wrapped_context
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_context)
class TreeContext(Context):
def __init__(self, evaluator, parent_context, tree_node):
super(TreeContext, self).__init__(evaluator, parent_context)
self.predefined_names = {}
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode(object):
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.eval_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
class ContextualizedName(ContextualizedNode):
# TODO merge with TreeNameDefinition?!
@property
def name(self):
return self.node
def assignment_indexes(self):
"""
Returns an array of tuple(int, node) of the indexes that are used in
tuple assignments.
For example if the name is ``y`` in the following code::
x, (y, z) = 2, ''
would result in ``[(1, xyz_node), (0, yz_node)]``.
When searching for b in the case ``a, *b, c = [...]`` it will return::
[(slice(1, -1), abc_node)]
"""
indexes = []
is_star_expr = False
node = self.node.parent
compare = self.node
while node is not None:
if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'):
for i, child in enumerate(node.children):
if child == compare:
index = int(i / 2)
if is_star_expr:
from_end = int((len(node.children) - i) / 2)
index = slice(index, -from_end)
indexes.insert(0, (index, node))
break
else:
raise LookupError("Couldn't find the assignment.")
is_star_expr = False
elif node.type == 'star_expr':
is_star_expr = True
elif isinstance(node, (ExprStmt, SyncCompFor)):
break
compare = node
node = node.parent
return indexes
def _getitem(context, index_contexts, contextualized_node):
from jedi.evaluate.context.iterable import Slice
# The actual getitem call.
simple_getitem = getattr(context, 'py__simple_getitem__', None)
result = NO_CONTEXTS
unused_contexts = set()
for index_context in index_contexts:
if simple_getitem is not None:
index = index_context
if isinstance(index_context, Slice):
index = index.obj
try:
method = index.get_safe_value
except AttributeError:
pass
else:
index = method(default=None)
if type(index) in (float, int, str, unicode, slice, bytes):
try:
result |= simple_getitem(index)
continue
except SimpleGetItemNotFound:
pass
unused_contexts.add(index_context)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the contexts and just take
# all results.
if unused_contexts or not index_contexts:
result |= context.py__getitem__(
ContextSet(unused_contexts),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
class ContextSet(BaseContextSet):
def py__class__(self):
return ContextSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.evaluate.lazy_context import get_merged_lazy_context
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_contexts in zip_longest(*type_iters):
yield get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def execute(self, arguments):
return ContextSet.from_sets(c.evaluator.execute(c, arguments) for c in self._set)
def execute_evaluated(self, *args, **kwargs):
return ContextSet.from_sets(c.execute_evaluated(*args, **kwargs) for c in self._set)
def py__getattribute__(self, *args, **kwargs):
if kwargs.get('is_goto'):
return reduce(add, [c.py__getattribute__(*args, **kwargs) for c in self._set], [])
return ContextSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
def get_item(self, *args, **kwargs):
return ContextSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name):
context_set = self.__class__([])
for c in self._set:
try:
method = getattr(c, function_name)
except AttributeError:
pass
else:
context_set |= method()
return context_set
def gather_annotation_classes(self):
return ContextSet.from_sets([c.gather_annotation_classes() for c in self._set])
def get_signatures(self):
return [sig for c in self._set for sig in c.get_signatures()]
NO_CONTEXTS = ContextSet([])
def iterator_to_context_set(func):
def wrapper(*args, **kwargs):
return ContextSet(func(*args, **kwargs))
return wrapper
| 33.162471
| 94
| 0.637524
| 1,718
| 14,492
| 5.089639
| 0.185681
| 0.030878
| 0.021958
| 0.011436
| 0.237077
| 0.180009
| 0.141468
| 0.087374
| 0.064959
| 0.042543
| 0
| 0.001151
| 0.280638
| 14,492
| 436
| 95
| 33.238532
| 0.837602
| 0.110199
| 0
| 0.248322
| 0
| 0
| 0.045142
| 0.004025
| 0
| 0
| 0
| 0.006881
| 0.003356
| 1
| 0.204698
| false
| 0.010067
| 0.067114
| 0.110738
| 0.526846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
8a13575cd76b03c2660c0f973dca2598509c1205
| 34,179
|
py
|
Python
|
sdk/lusid/models/lusid_instrument.py
|
rizwansaeed/lusid-sdk-python-preview
|
52d092d6d4099b8526f0318f3fe1ddc0b943da6a
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/lusid_instrument.py
|
rizwansaeed/lusid-sdk-python-preview
|
52d092d6d4099b8526f0318f3fe1ddc0b943da6a
|
[
"MIT"
] | null | null | null |
sdk/lusid/models/lusid_instrument.py
|
rizwansaeed/lusid-sdk-python-preview
|
52d092d6d4099b8526f0318f3fe1ddc0b943da6a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | | EffectiveUntil|datetimeoffset|The effective datetime until which the property is valid. If not supplied this will be valid indefinitely, potentially overwriting values with EffectiveFrom's in the future. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | # noqa: E501
The version of the OpenAPI document: 0.11.2275
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class LusidInstrument(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'instrument_type': 'str'
}
attribute_map = {
'instrument_type': 'instrumentType'
}
required_map = {
'instrument_type': 'required'
}
discriminator_value_class_map = {
'EquityOption': 'EquityOption',
'InstrumentLeg': 'InstrumentLeg',
'InterestRateSwaption': 'InterestRateSwaption',
'FxForward': 'FxForward',
'InterestRateSwap': 'InterestRateSwap',
'ExoticInstrument': 'ExoticInstrument',
'FxOption': 'FxOption',
'Bond': 'Bond',
'TermDeposit': 'TermDeposit',
'CreditDefaultSwap': 'CreditDefaultSwap',
'Future': 'Future'
}
def __init__(self, instrument_type=None): # noqa: E501
"""
LusidInstrument - a model defined in OpenAPI
:param instrument_type: The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit (required)
:type instrument_type: str
""" # noqa: E501
self._instrument_type = None
self.discriminator = 'instrument_type'
self.instrument_type = instrument_type
@property
def instrument_type(self):
"""Gets the instrument_type of this LusidInstrument. # noqa: E501
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit # noqa: E501
:return: The instrument_type of this LusidInstrument. # noqa: E501
:rtype: str
"""
return self._instrument_type
@instrument_type.setter
def instrument_type(self, instrument_type):
"""Sets the instrument_type of this LusidInstrument.
The available values are: QuotedSecurity, InterestRateSwap, FxForward, Future, ExoticInstrument, FxOption, CreditDefaultSwap, InterestRateSwaption, Bond, EquityOption, FixedLeg, FloatingLeg, BespokeCashflowLeg, Unknown, TermDeposit # noqa: E501
:param instrument_type: The instrument_type of this LusidInstrument. # noqa: E501
:type: str
"""
if instrument_type is None:
raise ValueError("Invalid value for `instrument_type`, must not be `None`") # noqa: E501
allowed_values = ["QuotedSecurity", "InterestRateSwap", "FxForward", "Future", "ExoticInstrument", "FxOption", "CreditDefaultSwap", "InterestRateSwaption", "Bond", "EquityOption", "FixedLeg", "FloatingLeg", "BespokeCashflowLeg", "Unknown", "TermDeposit"] # noqa: E501
if instrument_type not in allowed_values:
raise ValueError(
"Invalid value for `instrument_type` ({0}), must be one of {1}" # noqa: E501
.format(instrument_type, allowed_values)
)
self._instrument_type = instrument_type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LusidInstrument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 221.941558
| 28,647
| 0.692179
| 4,703
| 34,179
| 5.007229
| 0.218371
| 0.042465
| 0.019873
| 0.016009
| 0.145569
| 0.0989
| 0.070704
| 0.064759
| 0.055841
| 0.04565
| 0
| 0.053849
| 0.170339
| 34,179
| 153
| 28,648
| 223.392157
| 0.776598
| 0.875128
| 0
| 0.025641
| 0
| 0
| 0.205901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.038462
| 0
| 0.320513
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
8a1c71c22813d34b18261a3c040c83b4a288d938
| 1,272
|
py
|
Python
|
caravan_search_engine/test/test_task.py
|
crest-cassia/caravan
|
0a8e606e31d2d36a9379bdc00fafe55cf9144da6
|
[
"MIT"
] | 4
|
2017-12-27T06:04:46.000Z
|
2018-04-27T04:07:49.000Z
|
caravan_search_engine/test/test_task.py
|
crest-cassia/caravan
|
0a8e606e31d2d36a9379bdc00fafe55cf9144da6
|
[
"MIT"
] | null | null | null |
caravan_search_engine/test/test_task.py
|
crest-cassia/caravan
|
0a8e606e31d2d36a9379bdc00fafe55cf9144da6
|
[
"MIT"
] | null | null | null |
import unittest
from caravan.task import Task
from caravan.tables import Tables
class TestRun(unittest.TestCase):
def setUp(self):
self.t = Tables.get()
self.t.clear()
def test_task(self):
t = Task(1234, "echo hello world")
self.assertEqual(t.id(), 1234)
self.assertEqual(t.is_finished(), False)
self.assertEqual(t.command(), "echo hello world")
t._store_result([1.0, 2.0, 3.0], 0, 3, 111, 222)
self.assertTrue(t.is_finished())
self.assertEqual(t.rc(), 0)
self.assertEqual(t.rank(), 3)
self.assertEqual(t.start_at(), 111)
self.assertEqual(t.finish_at(), 222)
def test_create(self):
for i in range(10):
t = Task.create("echo %d" % i)
self.assertEqual(t.id(), i)
self.assertEqual(t.is_finished(), False)
self.assertEqual(len(Task.all()), 10)
def test_all(self):
tasks = [Task.create("echo %d" % i) for i in range(10)]
self.assertEqual(Task.all(), tasks)
def test_find(self):
tasks = [Task.create("echo %d" % i) for i in range(10)]
self.assertEqual(Task.find(5).id(), 5)
self.assertEqual(Task.find(5), tasks[5])
if __name__ == '__main__':
unittest.main()
| 30.285714
| 63
| 0.589623
| 180
| 1,272
| 4.061111
| 0.305556
| 0.266758
| 0.19699
| 0.045144
| 0.361149
| 0.281806
| 0.281806
| 0.281806
| 0.155951
| 0.155951
| 0
| 0.044257
| 0.253931
| 1,272
| 41
| 64
| 31.02439
| 0.726027
| 0
| 0
| 0.121212
| 0
| 0
| 0.047956
| 0
| 0
| 0
| 0
| 0
| 0.424242
| 1
| 0.151515
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
8a2561d549e0edb64456facf130fd386d46356d5
| 96,782
|
py
|
Python
|
.infra/setup/playbooks/roles/ansible.kubernetes-modules/library/openshift_v1_build_config_list.py
|
cvicens/lab-knative
|
ef98aa111e566c6d33fd72c61f9c0d93a2c05b2f
|
[
"Apache-2.0"
] | null | null | null |
.infra/setup/playbooks/roles/ansible.kubernetes-modules/library/openshift_v1_build_config_list.py
|
cvicens/lab-knative
|
ef98aa111e566c6d33fd72c61f9c0d93a2c05b2f
|
[
"Apache-2.0"
] | null | null | null |
.infra/setup/playbooks/roles/ansible.kubernetes-modules/library/openshift_v1_build_config_list.py
|
cvicens/lab-knative
|
ef98aa111e566c6d33fd72c61f9c0d93a2c05b2f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException
DOCUMENTATION = '''
module: openshift_v1_build_config_list
short_description: OpenShift BuildConfigList
description:
- Retrieve a list of build_configs. List operations provide a snapshot read of the
underlying objects, returning a resource_version representing a consistent version
of the listed objects.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- openshift == 0.3.3
'''
EXAMPLES = '''
'''
RETURN = '''
api_version:
type: string
description: Requested API version
build_config_list:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
items:
description:
- items is a list of build configs
type: list
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value,
and may reject unrecognized values.
type: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated. In CamelCase.
type: str
metadata:
description:
- metadata for BuildConfig.
type: complex
contains:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource
that may be set by external tools to store and retrieve arbitrary
metadata. They are not queryable and should be preserved when modifying
objects.
type: complex
contains: str, str
cluster_name:
description:
- The name of the cluster which the object belongs to. This is used
to distinguish resources with same name and namespace in different
clusters. This field is not set anywhere right now and apiserver is
going to ignore it if set in create or update request.
type: str
creation_timestamp:
description:
- CreationTimestamp is a timestamp representing the server time when
this object was created. It is not guaranteed to be set in happens-before
order across separate operations. Clients may not set this value.
It is represented in RFC3339 form and is in UTC. Populated by the
system. Read-only. Null for lists.
type: complex
contains: {}
deletion_grace_period_seconds:
description:
- Number of seconds allowed for this object to gracefully terminate
before it will be removed from the system. Only set when deletionTimestamp
is also set. May only be shortened. Read-only.
type: int
deletion_timestamp:
description:
- DeletionTimestamp is RFC 3339 date and time at which this resource
will be deleted. This field is set by the server when a graceful deletion
is requested by the user, and is not directly settable by a client.
The resource is expected to be deleted (no longer visible from resource
lists, and not reachable by name) after the time in this field. Once
set, this value may not be unset or be set further into the future,
although it may be shortened or the resource may be deleted prior
to this time. For example, a user may request that a pod is deleted
in 30 seconds. The Kubelet will react by sending a graceful termination
signal to the containers in the pod. After that 30 seconds, the Kubelet
will send a hard termination signal (SIGKILL) to the container and
after cleanup, remove the pod from the API. In the presence of network
partitions, this object may still exist after this timestamp, until
an administrator or automated process can determine the resource is
fully terminated. If not set, graceful deletion of the object has
not been requested. Populated by the system when a graceful deletion
is requested. Read-only.
type: complex
contains: {}
finalizers:
description:
- Must be empty before the object is deleted from the registry. Each
entry is an identifier for the responsible component that will remove
the entry from the list. If the deletionTimestamp of the object is
non-nil, entries in this list can only be removed.
type: list
contains: str
generate_name:
description:
- GenerateName is an optional prefix, used by the server, to generate
a unique name ONLY IF the Name field has not been provided. If this
field is used, the name returned to the client will be different than
the name passed. This value will also be combined with a unique suffix.
The provided value has the same validation rules as the Name field,
and may be truncated by the length of the suffix required to make
the value unique on the server. If this field is specified and the
generated name exists, the server will NOT return a 409 - instead,
it will either return 201 Created or 500 with Reason ServerTimeout
indicating a unique name could not be found in the time allotted,
and the client should retry (optionally after the time indicated in
the Retry-After header). Applied only if Name is not specified.
type: str
generation:
description:
- A sequence number representing a specific generation of the desired
state. Populated by the system. Read-only.
type: int
initializers:
description:
- An initializer is a controller which enforces some system invariant
at object creation time. This field is a list of initializers that
have not yet acted on this object. If nil or empty, this object has
been completely initialized. Otherwise, the object is considered uninitialized
and is hidden (in list/watch and get calls) from clients that haven't
explicitly asked to observe uninitialized objects. When an object
is created, the system will populate this list with the current set
of initializers. Only privileged users may set or modify this list.
Once it is empty, it may not be modified further by any user.
type: complex
contains:
pending:
description:
- Pending is a list of initializers that must execute in order before
this object is visible. When the last pending initializer is removed,
and no failing result is set, the initializers struct will be
set to nil and the object is considered as initialized and visible
to all clients.
type: list
contains:
name:
description:
- name of the process that is responsible for initializing this
object.
type: str
result:
description:
- If result is set with the Failure field, the object will be persisted
to storage and then deleted, ensuring that other clients can observe
the deletion.
type: complex
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to
the latest internal value, and may reject unrecognized values.
type: str
code:
description:
- Suggested HTTP return code for this status, 0 if not set.
type: int
details:
description:
- Extended data associated with the reason. Each reason may
define its own extended details. This field is optional and
the data returned is not guaranteed to conform to any schema
except that defined by the reason type.
type: complex
contains:
causes:
description:
- The Causes array includes more details associated with
the StatusReason failure. Not all StatusReasons may provide
detailed causes.
type: list
contains:
field:
description:
- 'The field of the resource that has caused this error,
as named by its JSON serialization. May include dot
and postfix notation for nested attributes. Arrays
are zero-indexed. Fields may appear more than once
in an array of causes due to fields having multiple
errors. Optional. Examples: "name" - the field "name"
on the current resource "items[0].name" - the field
"name" on the first array entry in "items"'
type: str
message:
description:
- A human-readable description of the cause of the error.
This field may be presented as-is to a reader.
type: str
reason:
description:
- A machine-readable description of the cause of the
error. If this value is empty there is no information
available.
type: str
group:
description:
- The group attribute of the resource associated with the
status StatusReason.
type: str
kind:
description:
- The kind attribute of the resource associated with the
status StatusReason. On some operations may differ from
the requested resource Kind.
type: str
name:
description:
- The name attribute of the resource associated with the
status StatusReason (when there is a single name which
can be described).
type: str
retry_after_seconds:
description:
- If specified, the time in seconds before the operation
should be retried.
type: int
uid:
description:
- UID of the resource. (when there is a single resource
which can be described).
type: str
kind:
description:
- Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint
the client submits requests to. Cannot be updated. In CamelCase.
type: str
message:
description:
- A human-readable description of the status of this operation.
type: str
metadata:
description:
- Standard list metadata.
type: complex
contains:
resource_version:
description:
- String that identifies the server's internal version of
this object that can be used by clients to determine when
objects have changed. Value must be treated as opaque
by clients and passed unmodified back to the server. Populated
by the system. Read-only.
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated
by the system. Read-only.
type: str
reason:
description:
- A machine-readable description of why this operation is in
the "Failure" status. If this value is empty there is no information
available. A Reason clarifies an HTTP status code but does
not override it.
type: str
status:
description:
- 'Status of the operation. One of: "Success" or "Failure".'
type: str
labels:
description:
- Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and services.
type: complex
contains: str, str
name:
description:
- Name must be unique within a namespace. Is required when creating
resources, although some resources may allow a client to request the
generation of an appropriate name automatically. Name is primarily
intended for creation idempotence and configuration definition. Cannot
be updated.
type: str
namespace:
description:
- Namespace defines the space within each name must be unique. An empty
namespace is equivalent to the "default" namespace, but "default"
is the canonical representation. Not all objects are required to be
scoped to a namespace - the value of this field for those objects
will be empty. Must be a DNS_LABEL. Cannot be updated.
type: str
owner_references:
description:
- List of objects depended by this object. If ALL objects in the list
have been deleted, this object will be garbage collected. If this
object is managed by a controller, then an entry in this list will
point to this controller, with the controller field set to true. There
cannot be more than one managing controller.
type: list
contains:
api_version:
description:
- API version of the referent.
type: str
block_owner_deletion:
description:
- If true, AND if the owner has the "foregroundDeletion" finalizer,
then the owner cannot be deleted from the key-value store until
this reference is removed. Defaults to false. To set this field,
a user needs "delete" permission of the owner, otherwise 422 (Unprocessable
Entity) will be returned.
type: bool
controller:
description:
- If true, this reference points to the managing controller.
type: bool
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
uid:
description:
- UID of the referent.
type: str
resource_version:
description:
- An opaque value that represents the internal version of this object
that can be used by clients to determine when objects have changed.
May be used for optimistic concurrency, change detection, and the
watch operation on a resource or set of resources. Clients must treat
these values as opaque and passed unmodified back to the server. They
may only be valid for a particular resource or set of resources. Populated
by the system. Read-only. Value must be treated as opaque by clients
and .
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system.
Read-only.
type: str
uid:
description:
- UID is the unique in time and space value for this object. It is typically
generated by the server on successful creation of a resource and is
not allowed to change on PUT operations. Populated by the system.
Read-only.
type: str
spec:
description:
- spec holds all the input necessary to produce a new build, and the conditions
when to trigger them.
type: complex
contains:
completion_deadline_seconds:
description:
- completionDeadlineSeconds is an optional duration in seconds, counted
from the time when a build pod gets scheduled in the system, that
the build may be active on a node before the system actively tries
to terminate the build; value must be positive integer
type: int
failed_builds_history_limit:
description:
- failedBuildsHistoryLimit is the number of old failed builds to retain.
If not specified, all failed builds are retained.
type: int
node_selector:
description:
- nodeSelector is a selector which must be true for the build pod to
fit on a node If nil, it can be overridden by default build nodeselector
values for the cluster. If set to an empty map or a map with any values,
default build nodeselector values are ignored.
type: complex
contains: str, str
output:
description:
- output describes the Docker image the Strategy should produce.
type: complex
contains:
image_labels:
description:
- imageLabels define a list of labels that are applied to the resulting
image. If there are multiple labels with the same name then the
last one in the list is used.
type: list
contains:
name:
description:
- name defines the name of the label. It must have non-zero
length.
type: str
value:
description:
- value defines the literal value of the label.
type: str
push_secret:
description:
- PushSecret is the name of a Secret that would be used for setting
up the authentication for executing the Docker push to authentication
enabled Docker Registry (or Docker Hub).
type: complex
contains:
name:
description:
- Name of the referent.
type: str
to:
description:
- to defines an optional location to push the output of this build
to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This
value will be used to look up a Docker image repository to push
to. In the case of an ImageStreamTag, the ImageStreamTag will
be looked for in the namespace of the build unless Namespace is
specified.
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field access
statement, such as desiredState.manifest.containers[2]. For
example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
post_commit:
description:
- postCommit is a build hook executed after the build output image is
committed, before it is pushed to a registry.
type: complex
contains:
args:
description:
- args is a list of arguments that are provided to either Command,
Script or the Docker image's default entrypoint. The arguments
are placed immediately after the command to be run.
type: list
contains: str
command:
description:
- command is the command to run. It may not be specified with Script.
This might be needed if the image doesn't have `/bin/sh`, or if
you do not want to use a shell. In all other cases, using Script
might be more convenient.
type: list
contains: str
script:
description:
- script is a shell script to be run with `/bin/sh -ic`. It may
not be specified with Command. Use Script when a shell script
is appropriate to execute the post build hook, for example for
running unit tests with `rake test`. If you need control over
the image entrypoint, or if the image does not have `/bin/sh`,
use Command and/or Args. The `-i` flag is needed to support CentOS
and RHEL images that use Software Collections (SCL), in order
to have the appropriate collections enabled in the shell. E.g.,
in the Ruby image, this is necessary to make `ruby`, `bundle`
and other binaries available in the PATH.
type: str
resources:
description:
- resources computes resource requirements to execute the build.
type: complex
contains:
limits:
description:
- Limits describes the maximum amount of compute resources allowed.
type: complex
contains: str, str
requests:
description:
- Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits
if that is explicitly specified, otherwise to an implementation-defined
value.
type: complex
contains: str, str
revision:
description:
- revision is the information from the source for a specific repo snapshot.
This is optional.
type: complex
contains:
git:
description:
- Git contains information about git-based build source
type: complex
contains:
author:
description:
- author is the author of a specific commit
type: complex
contains:
email:
description:
- email of the source control user
type: str
name:
description:
- name of the source control user
type: str
commit:
description:
- commit is the commit hash identifying a specific commit
type: str
committer:
description:
- committer is the committer of a specific commit
type: complex
contains:
email:
description:
- email of the source control user
type: str
name:
description:
- name of the source control user
type: str
message:
description:
- message is the description of a specific commit
type: str
type:
description:
- type of the build source, may be one of 'Source', 'Dockerfile',
'Binary', or 'Images'
type: str
run_policy:
description:
- RunPolicy describes how the new build created from this build configuration
will be scheduled for execution. This is optional, if not specified
we default to "Serial".
type: str
service_account:
description:
- serviceAccount is the name of the ServiceAccount to use to run the
pod created by this build. The pod will be allowed to use secrets
referenced by the ServiceAccount
type: str
source:
description:
- source describes the SCM in use.
type: complex
contains:
binary:
description:
- binary builds accept a binary as their input. The binary is generally
assumed to be a tar, gzipped tar, or zip file depending on the
strategy. For Docker builds, this is the build context and an
optional Dockerfile may be specified to override any Dockerfile
in the build context. For Source builds, this is assumed to be
an archive as described above. For Source and Docker builds, if
binary.asFile is set the build will receive a directory with a
single file. contextDir may be used when an archive is provided.
Custom builds will receive this binary as input on STDIN.
type: complex
contains:
as_file:
description:
- asFile indicates that the provided binary input should be
considered a single file within the build input. For example,
specifying "webapp.war" would place the provided binary as
`/webapp.war` for the builder. If left empty, the Docker and
Source build strategies assume this file is a zip, tar, or
tar.gz file and extract it as the source. The custom strategy
receives this binary as standard input. This filename may
not contain slashes or be '..' or '.'.
type: str
context_dir:
description:
- contextDir specifies the sub-directory where the source code for
the application exists. This allows to have buildable sources
in directory other than root of repository.
type: str
dockerfile:
description:
- dockerfile is the raw contents of a Dockerfile which should be
built. When this option is specified, the FROM may be modified
based on your strategy base image and additional ENV stanzas from
your strategy environment will be added after the FROM, but before
the rest of your Dockerfile stanzas. The Dockerfile source type
may be used with other options like git - in those cases the Git
repo will have any innate Dockerfile replaced in the context dir.
type: str
git:
description:
- git contains optional information about git build source
type: complex
contains:
http_proxy:
description:
- httpProxy is a proxy used to reach the git repository over
http
type: str
https_proxy:
description:
- httpsProxy is a proxy used to reach the git repository over
https
type: str
no_proxy:
description:
- noProxy is the list of domains for which the proxy should
not be used
type: str
ref:
description:
- ref is the branch/tag/ref to build.
type: str
uri:
description:
- uri points to the source that will be built. The structure
of the source will depend on the type of build to run
type: str
images:
description:
- images describes a set of images to be used to provide source
for the build
type: list
contains:
_from:
description:
- from is a reference to an ImageStreamTag, ImageStreamImage,
or DockerImage to copy source from.
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
paths:
description:
- paths is a list of source and destination paths to copy from
the image.
type: list
contains:
destination_dir:
description:
- destinationDir is the relative directory within the build
directory where files copied from the image are placed.
type: str
source_path:
description:
- sourcePath is the absolute path of the file or directory
inside the image to copy to the build directory. If the
source path ends in /. then the content of the directory
will be copied, but the directory itself will not be created
at the destination.
type: str
pull_secret:
description:
- pullSecret is a reference to a secret to be used to pull the
image from a registry If the image is pulled from the OpenShift
registry, this field does not need to be set.
type: complex
contains:
name:
description:
- Name of the referent.
type: str
secrets:
description:
- secrets represents a list of secrets and their destinations that
will be used only for the build.
type: list
contains:
destination_dir:
description:
- destinationDir is the directory where the files from the secret
should be available for the build time. For the Source build
strategy, these will be injected into a container where the
assemble script runs. Later, when the script finishes, all
files injected will be truncated to zero length. For the Docker
build strategy, these will be copied into the build directory,
where the Dockerfile is located, so users can ADD or COPY
them during docker build.
type: str
secret:
description:
- secret is a reference to an existing secret that you want
to use in your build.
type: complex
contains:
name:
description:
- Name of the referent.
type: str
source_secret:
description:
- "sourceSecret is the name of a Secret that would be used for setting\
\ up the authentication for cloning private repository. The secret\
\ contains valid credentials for remote repository, where the\
\ data's key represent the authentication method to be used and\
\ value is the base64 encoded credentials. Supported auth methods\
\ are: ssh-privatekey."
type: complex
contains:
name:
description:
- Name of the referent.
type: str
type:
description:
- type of build input to accept
type: str
strategy:
description:
- strategy defines how to perform a build.
type: complex
contains:
custom_strategy:
description:
- customStrategy holds the parameters to the Custom build strategy
type: complex
contains:
_from:
description:
- from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage
from which the docker image should be pulled
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
build_api_version:
description:
- buildAPIVersion is the requested API version for the Build
object serialized and passed to the custom builder
type: str
env:
description:
- env contains additional environment variables you want to
pass into a builder container.
type: list
contains:
name:
description:
- Name of the environment variable. Must be a C_IDENTIFIER.
type: str
value:
description:
- 'Variable references $(VAR_NAME) are expanded using the
previous defined environment variables in the container
and any service environment variables. If a variable cannot
be resolved, the reference in the input string will be
unchanged. The $(VAR_NAME) syntax can be escaped with
a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable
exists or not. Defaults to "".'
type: str
value_from:
description:
- Source for the environment variable's value. Cannot be
used if value is not empty.
type: complex
contains:
config_map_key_ref:
description:
- Selects a key of a ConfigMap.
type: complex
contains:
key:
description:
- The key to select.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the ConfigMap or it's key must
be defined
type: bool
field_ref:
description:
- 'Selects a field of the pod: supports metadata.name,
metadata.namespace, metadata.labels, metadata.annotations,
spec.nodeName, spec.serviceAccountName, status.hostIP,
status.podIP.'
type: complex
contains:
api_version:
description:
- Version of the schema the FieldPath is written
in terms of, defaults to "v1".
type: str
field_path:
description:
- Path of the field to select in the specified API
version.
type: str
resource_field_ref:
description:
- 'Selects a resource of the container: only resources
limits and requests (limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
type: complex
contains:
container_name:
description:
- 'Container name: required for volumes, optional
for env vars'
type: str
divisor:
description:
- Specifies the output format of the exposed resources,
defaults to "1"
type: str
resource:
description:
- 'Required: resource to select'
type: str
secret_key_ref:
description:
- Selects a key of a secret in the pod's namespace
type: complex
contains:
key:
description:
- The key of the secret to select from. Must be
a valid secret key.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the Secret or it's key must be
defined
type: bool
expose_docker_socket:
description:
- exposeDockerSocket will allow running Docker commands (and
build Docker images) from inside the Docker container.
type: bool
force_pull:
description:
- forcePull describes if the controller should configure the
build pod to always pull the images for the builder or only
pull if it is not present locally
type: bool
pull_secret:
description:
- pullSecret is the name of a Secret that would be used for
setting up the authentication for pulling the Docker images
from the private Docker registries
type: complex
contains:
name:
description:
- Name of the referent.
type: str
secrets:
description:
- secrets is a list of additional secrets that will be included
in the build pod
type: list
contains:
mount_path:
description:
- mountPath is the path at which to mount the secret
type: str
secret_source:
description:
- secretSource is a reference to the secret
type: complex
contains:
name:
description:
- Name of the referent.
type: str
docker_strategy:
description:
- dockerStrategy holds the parameters to the Docker build strategy.
type: complex
contains:
_from:
description:
- from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage
from which the docker image should be pulled the resulting
image will be used in the FROM line of the Dockerfile for
this build.
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
build_args:
description:
- buildArgs contains build arguments that will be resolved in
the Dockerfile. See
type: list
contains:
name:
description:
- Name of the environment variable. Must be a C_IDENTIFIER.
type: str
value:
description:
- 'Variable references $(VAR_NAME) are expanded using the
previous defined environment variables in the container
and any service environment variables. If a variable cannot
be resolved, the reference in the input string will be
unchanged. The $(VAR_NAME) syntax can be escaped with
a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable
exists or not. Defaults to "".'
type: str
value_from:
description:
- Source for the environment variable's value. Cannot be
used if value is not empty.
type: complex
contains:
config_map_key_ref:
description:
- Selects a key of a ConfigMap.
type: complex
contains:
key:
description:
- The key to select.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the ConfigMap or it's key must
be defined
type: bool
field_ref:
description:
- 'Selects a field of the pod: supports metadata.name,
metadata.namespace, metadata.labels, metadata.annotations,
spec.nodeName, spec.serviceAccountName, status.hostIP,
status.podIP.'
type: complex
contains:
api_version:
description:
- Version of the schema the FieldPath is written
in terms of, defaults to "v1".
type: str
field_path:
description:
- Path of the field to select in the specified API
version.
type: str
resource_field_ref:
description:
- 'Selects a resource of the container: only resources
limits and requests (limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
type: complex
contains:
container_name:
description:
- 'Container name: required for volumes, optional
for env vars'
type: str
divisor:
description:
- Specifies the output format of the exposed resources,
defaults to "1"
type: str
resource:
description:
- 'Required: resource to select'
type: str
secret_key_ref:
description:
- Selects a key of a secret in the pod's namespace
type: complex
contains:
key:
description:
- The key of the secret to select from. Must be
a valid secret key.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the Secret or it's key must be
defined
type: bool
dockerfile_path:
description:
- dockerfilePath is the path of the Dockerfile that will be
used to build the Docker image, relative to the root of the
context (contextDir).
type: str
env:
description:
- env contains additional environment variables you want to
pass into a builder container.
type: list
contains:
name:
description:
- Name of the environment variable. Must be a C_IDENTIFIER.
type: str
value:
description:
- 'Variable references $(VAR_NAME) are expanded using the
previous defined environment variables in the container
and any service environment variables. If a variable cannot
be resolved, the reference in the input string will be
unchanged. The $(VAR_NAME) syntax can be escaped with
a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable
exists or not. Defaults to "".'
type: str
value_from:
description:
- Source for the environment variable's value. Cannot be
used if value is not empty.
type: complex
contains:
config_map_key_ref:
description:
- Selects a key of a ConfigMap.
type: complex
contains:
key:
description:
- The key to select.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the ConfigMap or it's key must
be defined
type: bool
field_ref:
description:
- 'Selects a field of the pod: supports metadata.name,
metadata.namespace, metadata.labels, metadata.annotations,
spec.nodeName, spec.serviceAccountName, status.hostIP,
status.podIP.'
type: complex
contains:
api_version:
description:
- Version of the schema the FieldPath is written
in terms of, defaults to "v1".
type: str
field_path:
description:
- Path of the field to select in the specified API
version.
type: str
resource_field_ref:
description:
- 'Selects a resource of the container: only resources
limits and requests (limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
type: complex
contains:
container_name:
description:
- 'Container name: required for volumes, optional
for env vars'
type: str
divisor:
description:
- Specifies the output format of the exposed resources,
defaults to "1"
type: str
resource:
description:
- 'Required: resource to select'
type: str
secret_key_ref:
description:
- Selects a key of a secret in the pod's namespace
type: complex
contains:
key:
description:
- The key of the secret to select from. Must be
a valid secret key.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the Secret or it's key must be
defined
type: bool
force_pull:
description:
- forcePull describes if the builder should pull the images
from registry prior to building.
type: bool
image_optimization_policy:
description:
- imageOptimizationPolicy describes what optimizations the system
can use when building images to reduce the final size or time
spent building the image. The default policy is 'None' which
means the final build image will be equivalent to an image
created by the Docker build API. The experimental policy 'SkipLayers'
will avoid commiting new layers in between each image step,
and will fail if the Dockerfile cannot provide compatibility
with the 'None' policy. An additional experimental policy
'SkipLayersAndWarn' is the same as 'SkipLayers' but simply
warns if compatibility cannot be preserved.
type: str
no_cache:
description:
- noCache if set to true indicates that the docker build must
be executed with the --no-cache=true flag
type: bool
pull_secret:
description:
- pullSecret is the name of a Secret that would be used for
setting up the authentication for pulling the Docker images
from the private Docker registries
type: complex
contains:
name:
description:
- Name of the referent.
type: str
jenkins_pipeline_strategy:
description:
- JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline
build strategy. This strategy is in tech preview.
type: complex
contains:
env:
description:
- env contains additional environment variables you want to
pass into a build pipeline.
type: list
contains:
name:
description:
- Name of the environment variable. Must be a C_IDENTIFIER.
type: str
value:
description:
- 'Variable references $(VAR_NAME) are expanded using the
previous defined environment variables in the container
and any service environment variables. If a variable cannot
be resolved, the reference in the input string will be
unchanged. The $(VAR_NAME) syntax can be escaped with
a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable
exists or not. Defaults to "".'
type: str
value_from:
description:
- Source for the environment variable's value. Cannot be
used if value is not empty.
type: complex
contains:
config_map_key_ref:
description:
- Selects a key of a ConfigMap.
type: complex
contains:
key:
description:
- The key to select.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the ConfigMap or it's key must
be defined
type: bool
field_ref:
description:
- 'Selects a field of the pod: supports metadata.name,
metadata.namespace, metadata.labels, metadata.annotations,
spec.nodeName, spec.serviceAccountName, status.hostIP,
status.podIP.'
type: complex
contains:
api_version:
description:
- Version of the schema the FieldPath is written
in terms of, defaults to "v1".
type: str
field_path:
description:
- Path of the field to select in the specified API
version.
type: str
resource_field_ref:
description:
- 'Selects a resource of the container: only resources
limits and requests (limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
type: complex
contains:
container_name:
description:
- 'Container name: required for volumes, optional
for env vars'
type: str
divisor:
description:
- Specifies the output format of the exposed resources,
defaults to "1"
type: str
resource:
description:
- 'Required: resource to select'
type: str
secret_key_ref:
description:
- Selects a key of a secret in the pod's namespace
type: complex
contains:
key:
description:
- The key of the secret to select from. Must be
a valid secret key.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the Secret or it's key must be
defined
type: bool
jenkinsfile:
description:
- Jenkinsfile defines the optional raw contents of a Jenkinsfile
which defines a Jenkins pipeline build.
type: str
jenkinsfile_path:
description:
- JenkinsfilePath is the optional path of the Jenkinsfile that
will be used to configure the pipeline relative to the root
of the context (contextDir). If both JenkinsfilePath & Jenkinsfile
are both not specified, this defaults to Jenkinsfile in the
root of the specified contextDir.
type: str
source_strategy:
description:
- sourceStrategy holds the parameters to the Source build strategy.
type: complex
contains:
_from:
description:
- from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage
from which the docker image should be pulled
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
env:
description:
- env contains additional environment variables you want to
pass into a builder container.
type: list
contains:
name:
description:
- Name of the environment variable. Must be a C_IDENTIFIER.
type: str
value:
description:
- 'Variable references $(VAR_NAME) are expanded using the
previous defined environment variables in the container
and any service environment variables. If a variable cannot
be resolved, the reference in the input string will be
unchanged. The $(VAR_NAME) syntax can be escaped with
a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable
exists or not. Defaults to "".'
type: str
value_from:
description:
- Source for the environment variable's value. Cannot be
used if value is not empty.
type: complex
contains:
config_map_key_ref:
description:
- Selects a key of a ConfigMap.
type: complex
contains:
key:
description:
- The key to select.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the ConfigMap or it's key must
be defined
type: bool
field_ref:
description:
- 'Selects a field of the pod: supports metadata.name,
metadata.namespace, metadata.labels, metadata.annotations,
spec.nodeName, spec.serviceAccountName, status.hostIP,
status.podIP.'
type: complex
contains:
api_version:
description:
- Version of the schema the FieldPath is written
in terms of, defaults to "v1".
type: str
field_path:
description:
- Path of the field to select in the specified API
version.
type: str
resource_field_ref:
description:
- 'Selects a resource of the container: only resources
limits and requests (limits.cpu, limits.memory, requests.cpu
and requests.memory) are currently supported.'
type: complex
contains:
container_name:
description:
- 'Container name: required for volumes, optional
for env vars'
type: str
divisor:
description:
- Specifies the output format of the exposed resources,
defaults to "1"
type: str
resource:
description:
- 'Required: resource to select'
type: str
secret_key_ref:
description:
- Selects a key of a secret in the pod's namespace
type: complex
contains:
key:
description:
- The key of the secret to select from. Must be
a valid secret key.
type: str
name:
description:
- Name of the referent.
type: str
optional:
description:
- Specify whether the Secret or it's key must be
defined
type: bool
force_pull:
description:
- forcePull describes if the builder should pull the images
from registry prior to building.
type: bool
incremental:
description:
- incremental flag forces the Source build to do incremental
builds if true.
type: bool
pull_secret:
description:
- pullSecret is the name of a Secret that would be used for
setting up the authentication for pulling the Docker images
from the private Docker registries
type: complex
contains:
name:
description:
- Name of the referent.
type: str
runtime_artifacts:
description:
- 'runtimeArtifacts specifies a list of source/destination pairs
that will be copied from the builder to the runtime image.
sourcePath can be a file or directory. destinationDir must
be a directory. destinationDir can also be empty or equal
to ".", in this case it just refers to the root of WORKDIR.
Deprecated: This feature will be removed in a future release.
Use ImageSource to copy binary artifacts created from one
build into a separate runtime image.'
type: list
contains:
destination_dir:
description:
- destinationDir is the relative directory within the build
directory where files copied from the image are placed.
type: str
source_path:
description:
- sourcePath is the absolute path of the file or directory
inside the image to copy to the build directory. If the
source path ends in /. then the content of the directory
will be copied, but the directory itself will not be created
at the destination.
type: str
runtime_image:
description:
- 'runtimeImage is an optional image that is used to run an
application without unneeded dependencies installed. The building
of the application is still done in the builder image but,
post build, you can copy the needed artifacts in the runtime
image for use. Deprecated: This feature will be removed in
a future release. Use ImageSource to copy binary artifacts
created from one build into a separate runtime image.'
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
scripts:
description:
- scripts is the location of Source scripts
type: str
type:
description:
- type is the kind of build strategy.
type: str
successful_builds_history_limit:
description:
- successfulBuildsHistoryLimit is the number of old successful builds
to retain. If not specified, all successful builds are retained.
type: int
triggers:
description:
- triggers determine how new Builds can be launched from a BuildConfig.
If no triggers are defined, a new build can only occur as a result
of an explicit client build creation.
type: list
contains:
bitbucket:
description:
- BitbucketWebHook contains the parameters for a Bitbucket webhook
type of trigger
type: complex
contains:
allow_env:
description:
- allowEnv determines whether the webhook can set environment
variables; can only be set to true for GenericWebHook.
type: bool
secret:
description:
- secret used to validate requests.
type: str
generic:
description:
- generic contains the parameters for a Generic webhook type of
trigger
type: complex
contains:
allow_env:
description:
- allowEnv determines whether the webhook can set environment
variables; can only be set to true for GenericWebHook.
type: bool
secret:
description:
- secret used to validate requests.
type: str
github:
description:
- github contains the parameters for a GitHub webhook type of trigger
type: complex
contains:
allow_env:
description:
- allowEnv determines whether the webhook can set environment
variables; can only be set to true for GenericWebHook.
type: bool
secret:
description:
- secret used to validate requests.
type: str
gitlab:
description:
- GitLabWebHook contains the parameters for a GitLab webhook type
of trigger
type: complex
contains:
allow_env:
description:
- allowEnv determines whether the webhook can set environment
variables; can only be set to true for GenericWebHook.
type: bool
secret:
description:
- secret used to validate requests.
type: str
image_change:
description:
- imageChange contains parameters for an ImageChange type of trigger
type: complex
contains:
_from:
description:
- from is a reference to an ImageStreamTag that will trigger
a build when updated It is optional. If no From is specified,
the From image from the build strategy will be used. Only
one ImageChangeTrigger with an empty From reference is allowed
in a build configuration.
type: complex
contains:
api_version:
description:
- API version of the referent.
type: str
field_path:
description:
- 'If referring to a piece of an object instead of an entire
object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container
within a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that
triggered the event) or if no container name is specified
"spec.containers[2]" (container with index 2 in this pod).
This syntax is chosen only to have some well-defined way
of referencing a part of an object.'
type: str
kind:
description:
- Kind of the referent.
type: str
name:
description:
- Name of the referent.
type: str
namespace:
description:
- Namespace of the referent.
type: str
resource_version:
description:
- Specific resourceVersion to which this reference is made,
if any.
type: str
uid:
description:
- UID of the referent.
type: str
last_triggered_image_id:
description:
- lastTriggeredImageID is used internally by the ImageChangeController
to save last used image ID for build
type: str
type:
description:
- type is the type of build trigger
type: str
status:
description:
- status holds any relevant information about a build config
type: complex
contains:
last_version:
description:
- lastVersion is used to inform about number of last triggered build.
type: int
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- metadata for BuildConfigList.
type: complex
contains:
resource_version:
description:
- String that identifies the server's internal version of this object that
can be used by clients to determine when objects have changed. Value must
be treated as opaque by clients and passed unmodified back to the server.
Populated by the system. Read-only.
type: str
self_link:
description:
- SelfLink is a URL representing this object. Populated by the system. Read-only.
type: str
'''
def main():
try:
module = OpenShiftAnsibleModule('build_config_list', 'v1')
except OpenShiftAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except OpenShiftAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
| 50.486176
| 100
| 0.443027
| 8,403
| 96,782
| 5.076044
| 0.113293
| 0.030196
| 0.033408
| 0.022718
| 0.555352
| 0.53015
| 0.519435
| 0.514723
| 0.508909
| 0.496882
| 0
| 0.001486
| 0.527257
| 96,782
| 1,916
| 101
| 50.512526
| 0.930781
| 0.001333
| 0
| 0.706408
| 0
| 0.00105
| 0.994899
| 0.011547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000525
| false
| 0.006303
| 0.000525
| 0
| 0.00105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
8a6c803544f7e0d285bc37ff4aefd197349a5940
| 456
|
py
|
Python
|
src/trw/reporting/__init__.py
|
civodlu/trw
|
b9a1cf045f61d6df9c65c014ef63b4048972dcdc
|
[
"MIT"
] | 3
|
2019-07-04T01:20:41.000Z
|
2020-01-27T02:36:12.000Z
|
src/trw/reporting/__init__.py
|
civodlu/trw
|
b9a1cf045f61d6df9c65c014ef63b4048972dcdc
|
[
"MIT"
] | null | null | null |
src/trw/reporting/__init__.py
|
civodlu/trw
|
b9a1cf045f61d6df9c65c014ef63b4048972dcdc
|
[
"MIT"
] | 2
|
2020-10-19T13:46:06.000Z
|
2021-12-27T02:18:10.000Z
|
#from trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \
# safe_lookup, len_batch
from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image
from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows
from .reporting_bokeh import report, create_default_reporting_options
from .reporting_bokeh_samples import PanelDataSamplesTabular
| 65.142857
| 118
| 0.875
| 65
| 456
| 5.646154
| 0.6
| 0.103542
| 0.098093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002387
| 0.08114
| 456
| 6
| 119
| 76
| 0.873508
| 0.313596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
8a6d637336ee5d703603ebc196b3672612c215ab
| 1,976
|
py
|
Python
|
src/learndash/api_resources/user.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | null | null | null |
src/learndash/api_resources/user.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | 1
|
2021-05-06T19:01:24.000Z
|
2021-05-06T19:01:24.000Z
|
src/learndash/api_resources/user.py
|
MarkMacDon/learndash-python
|
a3fbfc45567a524b80c732d735f2ae101119f2e4
|
[
"MIT"
] | 2
|
2021-05-05T22:45:04.000Z
|
2021-07-24T08:47:02.000Z
|
import learndash
from learndash.api_resources.abstract import ListableAPIResource
from learndash.api_resources.abstract import RetrievableAPIResource
from learndash.api_resources.abstract import UpdateableAPIResource
from learndash.api_resources.abstract import NestedAPIResource
from learndash.api_resources.typing import UserDict
from learndash.api_resources.typing import UserCourseProgressDict
from learndash.api_resources.typing import UserCourseDict
from learndash.api_resources.typing import UserGroupDict
from learndash.api_resources.typing import UserQuizProgressDict
class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]):
api_path = learndash.path_users
def course_progress(self, id=None):
return UserCourseProgress(id, parent=self)
def courses(self, id=None):
return UserCourse(id, parent=self)
def groups(self, id=None):
return UserGroup(id, parent=self)
def quiz_progress(self, id=None):
return UserQuizProgress(id, parent=self)
class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource):
api_path = learndash.path_user_course_progress
# class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource):
class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable
api_path = learndash.path_user_courses
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable
api_path = learndash.path_user_groups
def instance_url(self):
# This endpoint accepts updates and deletions at it's base endpoint
return self.class_url()
class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource):
api_path = learndash.path_user_quiz_progress
| 35.927273
| 113
| 0.802632
| 212
| 1,976
| 7.334906
| 0.240566
| 0.075241
| 0.092605
| 0.144695
| 0.457235
| 0.395498
| 0.123473
| 0.123473
| 0.123473
| 0.123473
| 0
| 0
| 0.136134
| 1,976
| 54
| 114
| 36.592593
| 0.910955
| 0.117915
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.3125
| 0.1875
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 3
|
8a80483513e593a3c49ee46795ac3b8d601f6b9a
| 416
|
py
|
Python
|
main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/SimulationSettings/ScreenshotsSteppable/Simulation/screenshots_steppables.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | 1
|
2021-02-26T21:50:29.000Z
|
2021-02-26T21:50:29.000Z
|
from cc3d.core.PySteppables import *
from cc3d import CompuCellSetup
from random import random
class ScreenshotSteppable(SteppableBasePy):
def __init__(self, frequency=10):
SteppableBasePy.__init__(self, frequency)
def step(self, mcs):
if mcs in [3, 5, 19,20, 23, 29, 31]:
self.request_screenshot(mcs=mcs, screenshot_label='Cell_Field_CellField_2D_XY_0')
| 27.733333
| 93
| 0.6875
| 52
| 416
| 5.211538
| 0.673077
| 0.059041
| 0.125461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055901
| 0.225962
| 416
| 15
| 94
| 27.733333
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.067146
| 0.067146
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
8a894222f80aae1db1ccdaaadeb6288f55d6b62f
| 267
|
py
|
Python
|
compliance_suite/exceptions/user_config_exception.py
|
alextsaihi/rnaget-compliance-suite
|
a3accae431b9e4f7791dfa5ae867e70da2dd6278
|
[
"Apache-2.0"
] | 1
|
2019-09-18T14:38:55.000Z
|
2019-09-18T14:38:55.000Z
|
compliance_suite/exceptions/user_config_exception.py
|
alextsaihi/rnaget-compliance-suite
|
a3accae431b9e4f7791dfa5ae867e70da2dd6278
|
[
"Apache-2.0"
] | 14
|
2019-05-24T18:55:23.000Z
|
2022-02-25T16:56:28.000Z
|
compliance_suite/exceptions/user_config_exception.py
|
alextsaihi/rnaget-compliance-suite
|
a3accae431b9e4f7791dfa5ae867e70da2dd6278
|
[
"Apache-2.0"
] | 8
|
2019-04-08T14:48:35.000Z
|
2022-02-04T16:59:59.000Z
|
# -*- coding: utf-8 -*-
"""Module compliance_suite.exceptions.user_config_exception.py
This module contains class definition for user config file exceptions.
"""
class UserConfigException(Exception):
"""Exception for user config file-related errors"""
pass
| 26.7
| 70
| 0.752809
| 32
| 267
| 6.1875
| 0.65625
| 0.151515
| 0.131313
| 0.171717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004348
| 0.138577
| 267
| 10
| 71
| 26.7
| 0.856522
| 0.749064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
8a928ed1a44855a651b9670429234df930921f0a
| 125
|
py
|
Python
|
api/services/http.py
|
takos22/API-1
|
261ecd34648d610169caf27b3712256f757b100d
|
[
"MIT"
] | null | null | null |
api/services/http.py
|
takos22/API-1
|
261ecd34648d610169caf27b3712256f757b100d
|
[
"MIT"
] | null | null | null |
api/services/http.py
|
takos22/API-1
|
261ecd34648d610169caf27b3712256f757b100d
|
[
"MIT"
] | null | null | null |
from aiohttp import ClientSession
from typing import Optional
session: Optional[ClientSession] = None
__all__ = (session,)
| 17.857143
| 39
| 0.8
| 14
| 125
| 6.857143
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136
| 125
| 6
| 40
| 20.833333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
8aad8de20813d57dc973493fe2b63ad495089392
| 549
|
py
|
Python
|
setup.py
|
swfrench/nginx-access-tailer
|
5e060396ca749935c622e8e9c50b659b39e3675b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
swfrench/nginx-access-tailer
|
5e060396ca749935c622e8e9c50b659b39e3675b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
swfrench/nginx-access-tailer
|
5e060396ca749935c622e8e9c50b659b39e3675b
|
[
"BSD-3-Clause"
] | null | null | null |
"""TODO."""
from setuptools import setup
setup(
name='nginx-access-tailer',
version='0.1',
author='swfrench',
url='https://github.com/swfrench/nginx-tailer',
packages=['nginx_access_tailer',],
license='BSD three-clause license',
entry_points={
'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'],
},
install_requires=[
'python-gflags >= 3.1.1',
'google-cloud-monitoring >= 0.25.0',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
)
| 24.954545
| 87
| 0.626594
| 64
| 549
| 5.171875
| 0.6875
| 0.132931
| 0.205438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020316
| 0.193078
| 549
| 21
| 88
| 26.142857
| 0.726862
| 0.009107
| 0
| 0
| 0
| 0
| 0.483271
| 0.104089
| 0
| 0
| 0
| 0.047619
| 0
| 1
| 0
| true
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
8ab7c4d71edafc2000970ee8f5e485db6a4fa978
| 872
|
py
|
Python
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
class Foo:
pass
class Bar(Foo):
def __init__(self):
super(Bar, self).__init__() # [super-with-arguments]
class Baz(Foo):
def __init__(self):
super().__init__()
class Qux(Foo):
def __init__(self):
super(Bar, self).__init__()
class NotSuperCall(Foo):
def __init__(self):
super.test(Bar, self).__init__()
class InvalidSuperCall(Foo):
def __init__(self):
super(InvalidSuperCall.__class__, self).__init__()
def method_accepting_cls(cls, self):
# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
super(cls, self).__init__()
| 24.222222
| 86
| 0.65711
| 113
| 872
| 4.557522
| 0.424779
| 0.058252
| 0.097087
| 0.135922
| 0.227184
| 0.116505
| 0.116505
| 0.116505
| 0
| 0
| 0
| 0
| 0.209862
| 872
| 35
| 87
| 24.914286
| 0.74746
| 0.369266
| 0
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0.052632
| 0
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
8abd39aa48321431318051d54854247571fa2704
| 311
|
py
|
Python
|
betterloader/standard_transforms.py
|
BinItAI/BetterLoader
|
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
|
[
"MIT"
] | 39
|
2020-08-11T09:58:08.000Z
|
2022-02-24T19:22:42.000Z
|
betterloader/standard_transforms.py
|
BinItAI/BetterLoader
|
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
|
[
"MIT"
] | 21
|
2020-08-11T09:58:46.000Z
|
2021-05-10T12:50:12.000Z
|
betterloader/standard_transforms.py
|
BinItAI/BetterLoader
|
29ebcc22b53db6417a4b14d95f0a1e7f5afe7af8
|
[
"MIT"
] | 2
|
2020-10-29T14:51:01.000Z
|
2021-01-08T09:40:34.000Z
|
import numpy as np
from torchvision import transforms
np.random.seed(1)
class TransformWhileSampling(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
x1 = self.transform(sample)
x2 = self.transform(sample)
return x1, x2
| 19.4375
| 37
| 0.678457
| 37
| 311
| 5.486486
| 0.594595
| 0.256158
| 0.187192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021097
| 0.237942
| 311
| 16
| 38
| 19.4375
| 0.835443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
8aca0af3be9ee2ea88050772027c439546656c4a
| 3,651
|
py
|
Python
|
tests/test_EdiblesSpectrum.py
|
jancami/edibles
|
51263b24c5e8aef786692011289b906a810ad2f7
|
[
"MIT"
] | 8
|
2020-04-15T10:44:48.000Z
|
2021-06-21T15:58:19.000Z
|
tests/test_EdiblesSpectrum.py
|
jancami/edibles
|
51263b24c5e8aef786692011289b906a810ad2f7
|
[
"MIT"
] | 100
|
2020-05-08T13:20:41.000Z
|
2022-01-11T20:04:52.000Z
|
tests/test_EdiblesSpectrum.py
|
jancami/edibles
|
51263b24c5e8aef786692011289b906a810ad2f7
|
[
"MIT"
] | 8
|
2020-05-27T00:39:39.000Z
|
2021-06-23T14:07:16.000Z
|
import astropy
import datetime
import numpy as np
from edibles.utils.edibles_spectrum import EdiblesSpectrum
def testEdiblesSpectrum(filename="tests/HD170740_w860_redl_20140915_O12.fits"):
# Spectrum information
sp = EdiblesSpectrum(filename=filename, fully_featured=True, noDATADIR=True)
assert isinstance(sp.header, astropy.io.fits.header.Header)
assert isinstance(sp.target, str)
assert isinstance(sp.date, str)
assert isinstance(sp.datetime, datetime.datetime)
assert isinstance(sp.v_bary, float)
assert isinstance(sp.wave_units, str)
assert isinstance(sp.flux_units, str)
# Raw
assert isinstance(sp.raw_wave, np.ndarray)
assert isinstance(sp.raw_bary_wave, np.ndarray)
assert isinstance(sp.raw_flux, np.ndarray)
assert len(sp.raw_wave) == len(sp.raw_bary_wave)
assert len(sp.raw_wave) == len(sp.raw_flux)
assert isinstance(sp.raw_grid, np.ndarray)
assert len(sp.raw_grid) == 200443 # print(len(sp.raw_grid))
assert isinstance(sp.raw_sky_wave, np.ndarray)
assert isinstance(sp.raw_sky_flux, np.ndarray)
assert len(sp.raw_sky_wave) == len(sp.raw_sky_flux)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
# getSpectrum
xmin = 7660
xmax = 7680
sp.getSpectrum(xmin=xmin, xmax=xmax)
assert xmin == sp.xmin
assert xmax == sp.xmax
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
# shift
zoom_xmin = 7661
zoom_xmax = 7679
shift = 0.05
sp.shift(shift=shift, zoom_xmin=zoom_xmin, zoom_xmax=zoom_xmax)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
if __name__ == "__main__":
filename = "HD170740_w860_redl_20140915_O12.fits"
testEdiblesSpectrum(filename=filename)
| 34.443396
| 80
| 0.707751
| 565
| 3,651
| 4.424779
| 0.113274
| 0.2176
| 0.2448
| 0.15
| 0.6924
| 0.6832
| 0.6492
| 0.5884
| 0.5676
| 0.55
| 0
| 0.02084
| 0.172008
| 3,651
| 105
| 81
| 34.771429
| 0.806153
| 0.018077
| 0
| 0.573171
| 0
| 0
| 0.024029
| 0.021794
| 0
| 0
| 0
| 0
| 0.804878
| 1
| 0.012195
| false
| 0
| 0.04878
| 0
| 0.060976
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
76e2fbbb9481d029109c5c955ed7a3309fc9c83a
| 117
|
py
|
Python
|
extract.py
|
rmalav15/voice-data-extract
|
e021428afe2706cae0e5339e96bba7f8b033117d
|
[
"MIT"
] | null | null | null |
extract.py
|
rmalav15/voice-data-extract
|
e021428afe2706cae0e5339e96bba7f8b033117d
|
[
"MIT"
] | null | null | null |
extract.py
|
rmalav15/voice-data-extract
|
e021428afe2706cae0e5339e96bba7f8b033117d
|
[
"MIT"
] | null | null | null |
from srtvoiceext import extract
if __name__ == '__main__':
ext = extract('video.mkv', 'subtitles.srt', 'outdir')
| 29.25
| 57
| 0.700855
| 14
| 117
| 5.285714
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 117
| 4
| 57
| 29.25
| 0.74
| 0
| 0
| 0
| 0
| 0
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
76e301801e70d562cc3a1d9777a610e89dc8d94b
| 632
|
py
|
Python
|
bacon/readonly_collections.py
|
aholkner/bacon
|
edf3810dcb211942d392a8637945871399b0650d
|
[
"MIT"
] | 37
|
2015-01-29T17:42:11.000Z
|
2021-12-14T22:11:33.000Z
|
bacon/readonly_collections.py
|
aholkner/bacon
|
edf3810dcb211942d392a8637945871399b0650d
|
[
"MIT"
] | 3
|
2015-08-13T17:38:05.000Z
|
2020-09-25T17:21:31.000Z
|
bacon/readonly_collections.py
|
aholkner/bacon
|
edf3810dcb211942d392a8637945871399b0650d
|
[
"MIT"
] | 7
|
2015-02-12T17:54:35.000Z
|
2022-01-31T14:50:09.000Z
|
import collections
class ReadOnlyDict(collections.MutableMapping):
def __init__(self, store):
self.store = store
def __getitem__(self, key):
return self.store[key]
def __setitem__(self, key, value):
raise TypeError('Cannot modify ReadOnlyDict')
def __delitem__(self, key):
raise TypeError('Cannot modify ReadOnlyDict')
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __str__(self):
return 'ReadOnlyDict(%s)' % self.store
def __repr__(self):
return 'ReadOnlyDict(%r)' % self.store
| 25.28
| 53
| 0.642405
| 70
| 632
| 5.342857
| 0.357143
| 0.168449
| 0.096257
| 0.139037
| 0.219251
| 0.219251
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253165
| 632
| 25
| 54
| 25.28
| 0.792373
| 0
| 0
| 0.111111
| 0
| 0
| 0.132701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.444444
| false
| 0
| 0.055556
| 0.277778
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
0a0c72972354861b109e6305d555a377963ca24f
| 63
|
py
|
Python
|
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/stubs/FullyQualifiedTypingNamedTuple.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
import typing
nt = typing.NamedTuple("name", [("field", str)])
| 21
| 48
| 0.666667
| 8
| 63
| 5.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 63
| 3
| 48
| 21
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
0a1e494933ae306f17bb20205df33acd66dcd6cb
| 3,713
|
py
|
Python
|
src/genotypes.py
|
k8lion/admmdarts
|
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
|
[
"Apache-2.0"
] | null | null | null |
src/genotypes.py
|
k8lion/admmdarts
|
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
|
[
"Apache-2.0"
] | null | null | null |
src/genotypes.py
|
k8lion/admmdarts
|
4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
CRBPRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal=[
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat=[2, 3, 4, 5, 6],
reduce=[
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat=[4, 5, 6],
)
AmoebaNet = Genotype(
normal=[
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat=[4, 5, 6],
reduce=[
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat=[3, 4, 6]
)
DARTS_V1 = Genotype(
normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0),
('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(
normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1),
('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
BATH = Genotype(
normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0),
('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3),
('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6))
BATH2 = Genotype(
normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1),
('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1),
('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6))
| 34.700935
| 116
| 0.546458
| 543
| 3,713
| 3.3186
| 0.071823
| 0.170921
| 0.166482
| 0.091565
| 0.831299
| 0.772475
| 0.718091
| 0.54939
| 0.45283
| 0.398446
| 0
| 0.116984
| 0.235659
| 3,713
| 106
| 117
| 35.028302
| 0.51797
| 0
| 0
| 0.55102
| 0
| 0
| 0.395637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010204
| 0
| 0.010204
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0a1ed95ecf3a94b0314f7b8f523edacf4c486e8a
| 275
|
py
|
Python
|
pyccel/ast/basic.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
pyccel/ast/basic.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
pyccel/ast/basic.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
from sympy.core.basic import Basic as sp_Basic
class Basic(sp_Basic):
"""Basic class for Pyccel AST."""
_fst = None
def set_fst(self, fst):
"""Sets the redbaron fst."""
self._fst = fst
@property
def fst(self):
return self._fst
| 18.333333
| 46
| 0.6
| 39
| 275
| 4.076923
| 0.538462
| 0.132075
| 0.125786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287273
| 275
| 14
| 47
| 19.642857
| 0.811224
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
0a448d09286de882fe626777f47593a108a44caa
| 628
|
py
|
Python
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 25
|
2016-09-29T15:25:16.000Z
|
2021-09-19T14:20:58.000Z
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 22
|
2016-05-29T00:14:47.000Z
|
2019-06-08T13:24:21.000Z
|
test_app/models.py
|
alissonmuller/django-group-by
|
645c36ad2c3ab1f4691de6fcc04fed8b5d7ef78d
|
[
"MIT"
] | 2
|
2018-09-24T07:28:39.000Z
|
2019-02-12T14:09:18.000Z
|
from django.db import models
from .query import BookQuerySet
class Book(models.Model):
objects = BookQuerySet.as_manager()
title = models.CharField(max_length=50)
publication_date = models.DateTimeField()
author = models.ForeignKey('Author')
genres = models.ManyToManyField('Genre')
class Author(models.Model):
name = models.CharField(max_length=50)
nationality = models.ForeignKey('Nation', null=True)
class Genre(models.Model):
name = models.CharField(max_length=50)
class Nation(models.Model):
name = models.CharField(max_length=50)
demonym = models.CharField(max_length=50)
| 23.259259
| 56
| 0.732484
| 77
| 628
| 5.883117
| 0.415584
| 0.165563
| 0.198676
| 0.264901
| 0.386313
| 0.271523
| 0.271523
| 0.271523
| 0
| 0
| 0
| 0.018832
| 0.154459
| 628
| 26
| 57
| 24.153846
| 0.834275
| 0
| 0
| 0.1875
| 0
| 0
| 0.02707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
0a515a3d5abf09db1a4745bebd807a1a69030c04
| 219
|
py
|
Python
|
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
Introductions/The Rust Programming Language/embed/bindings/embed.py
|
uqtimes/Rust-SampleCodes
|
f9d7a040d8198acd30bf3423e7c6cf52bc9c7b6e
|
[
"MIT"
] | null | null | null |
# $ python embed.py
from ctypes import cdll
lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac
#lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux
lib.process()
print("done!")
| 19.909091
| 70
| 0.689498
| 29
| 219
| 5.206897
| 0.689655
| 0.092715
| 0.238411
| 0.317881
| 0.516556
| 0.516556
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118721
| 219
| 10
| 71
| 21.9
| 0.782383
| 0.43379
| 0
| 0
| 0
| 0
| 0.310924
| 0.268908
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0a58933890bb698e85d3cfefe359ee1effd69d83
| 1,050
|
py
|
Python
|
models/node.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
models/node.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
models/node.py
|
AlonsoReyes/t-intersection-graph
|
68bab234cd6e334edcec27bfee3e019f08997945
|
[
"MIT"
] | null | null | null |
class Node(object):
def __init__(self, name, follow_list, intention, lane):
self.name = name
self.follow_list = follow_list
self.intention = intention
self.lane = lane
def __eq__(self, other):
if isinstance(other, Node):
if self.name == other.get_name() and self.follow_list == other.get_follow_list() \
and self.intention == other.get_intention() and self.lane == other.get_lane():
return True
return False
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_follow_list(self):
return self.follow_list
def set_follow_list(self, follow_list):
self.follow_list = follow_list
def get_intention(self):
return self.intention
def set_intention(self, intention):
self.intention = intention
def get_lane(self):
return self.lane
def set_lane(self, lane):
self.lane = lane
| 26.25
| 99
| 0.591429
| 129
| 1,050
| 4.573643
| 0.162791
| 0.186441
| 0.118644
| 0.067797
| 0.122034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320952
| 1,050
| 39
| 100
| 26.923077
| 0.827489
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0
| 0
| 0.142857
| 0.607143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
0a5f2c5e88f319fb43560833894661a1abbe9435
| 1,934
|
py
|
Python
|
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/20bdcef0-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 20bdcef0-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20bdcef0-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Registry MultiSZ
multi_sz = cli.get_reg_multi_sz(r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', 'Machine')
# Output Lines
self.output = [r'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths', ('Machine=')] + multi_sz
# Recommended MultiSZ
rec_multi_sz = ("System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
for sz in multi_sz:
if sz.lower() not in rec_multi_sz.lower():
self.is_compliant = False
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg'")
cli.powershell(r"New-Item -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\SYSTEM\CurrentControlSet\control\SecurePipeServers\winreg\allowedExactPaths' -name 'Machine' -Type MultiString -value System\CurrentControlSet\Control\ProductOptions,System\CurrentControlSet\Control\Server Applications,Software\Microsoft\Windows NT\CurrentVersion")
| 46.047619
| 320
| 0.682006
| 196
| 1,934
| 6.637755
| 0.397959
| 0.176787
| 0.230592
| 0.156802
| 0.635665
| 0.586472
| 0.586472
| 0.586472
| 0.513451
| 0.387394
| 0
| 0.027496
| 0.134953
| 1,934
| 41
| 321
| 47.170732
| 0.750149
| 0.122027
| 0
| 0.105263
| 0
| 0.105263
| 0.591088
| 0.484273
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0a7052f7029ee061d74d603abefe9574ef7b3461
| 114
|
py
|
Python
|
DLA/__main__.py
|
StanczakDominik/DLA
|
bf63592a5ac96ffef639e7a0c80d7d52ff776322
|
[
"MIT"
] | null | null | null |
DLA/__main__.py
|
StanczakDominik/DLA
|
bf63592a5ac96ffef639e7a0c80d7d52ff776322
|
[
"MIT"
] | null | null | null |
DLA/__main__.py
|
StanczakDominik/DLA
|
bf63592a5ac96ffef639e7a0c80d7d52ff776322
|
[
"MIT"
] | null | null | null |
from DLA import main_single
d = main_single(1, gotosize=[1e4, 5e4])
d.plot_particles()
d.plot_mass_distribution()
| 22.8
| 39
| 0.780702
| 19
| 114
| 4.421053
| 0.736842
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048544
| 0.096491
| 114
| 4
| 40
| 28.5
| 0.76699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6a5f51cf2ae3a67fb99172b7bd4214f43d0d42bc
| 269
|
py
|
Python
|
python/ordenacao.py
|
valdirsjr/learning.data
|
a4b72dfd27f55f2f04120644b73232bf343f71e3
|
[
"MIT"
] | null | null | null |
python/ordenacao.py
|
valdirsjr/learning.data
|
a4b72dfd27f55f2f04120644b73232bf343f71e3
|
[
"MIT"
] | null | null | null |
python/ordenacao.py
|
valdirsjr/learning.data
|
a4b72dfd27f55f2f04120644b73232bf343f71e3
|
[
"MIT"
] | null | null | null |
numero1 = int(input("Digite o primeiro número: "))
numero2 = int(input("Digite o segundo número: "))
numero3 = int(input("Digite o terceiro número: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("não está em ordem crescente")
| 38.428571
| 50
| 0.69145
| 36
| 269
| 5.166667
| 0.555556
| 0.129032
| 0.225806
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 0.167286
| 269
| 7
| 51
| 38.428571
| 0.799107
| 0
| 0
| 0
| 0
| 0
| 0.418519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6a60999063f76386f01b79b85ecc655ec0929c57
| 25,232
|
py
|
Python
|
csld/phonon/head.py
|
jsyony37/csld
|
b0e6d5845d807174f24ca7b591bc164c608c99c8
|
[
"MIT"
] | null | null | null |
csld/phonon/head.py
|
jsyony37/csld
|
b0e6d5845d807174f24ca7b591bc164c608c99c8
|
[
"MIT"
] | null | null | null |
csld/phonon/head.py
|
jsyony37/csld
|
b0e6d5845d807174f24ca7b591bc164c608c99c8
|
[
"MIT"
] | null | null | null |
# to include all module here in order to cite
from numpy import *
from numpy.linalg import *
import string
import os
import scipy
import scipy.sparse
#import rwposcar
#import anaxdat
import math
#define touch file
def touch(file):#input string
if os.path.isfile(file):
os.system(str("rm"+" "+file))
os.system(str("touch"+" "+file))
else:
os.system(str("touch"+" "+file))
def mkdir(dir):
if os.path.isdir(dir):
os.system(str("rm"+" -r "+dir))
os.system(str("mkdir"+" "+dir))
else:
os.system(str("mkdir"+" "+dir))
if False:
mkdir("xixi/")
#define rm file
def rm(file):
if os.path.isfile(file):
os.system(str("rm"+" "+file))
else:
print("No file found, dont need to rm")
#define check file(1 exist; else0)
def check(file):
if os.path.isfile(file):
return int(1)
else:
return int(0)
#define check the file status (print the status)
def checkfile(file):
if os.path.isfile(file):
print(str(file)+" exists :)")
else:
print(str(file)+" not found :(")
#define readallline function
def readinline(file):
dataout=[]
if check(file):
fin=open(file,"r")
for line in fin:
dataout.append(line.split())#map(float,line.split()))
fin.close()
else:
print(str(file)+" not found :(")
return array(dataout)
#define write1dmat
def write1dmat(datain, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file, "w")
fout.writelines("\n".join(map(str,datain)))
fout.close()
#define one number to file
def writenumber(datain, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file,"w")
fout.writelines(str(datain))
fout.close()
#define write2dmat
def write2dmat(datain, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file, "w")
#cout line number
fout.writelines(str(len(datain))+"\n")
for i in datain:
fout.writelines(" ".join(map(str,i))+"\n")
fout.close()
#define write2dMTX
def write2dMTX(datain, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file, "w")
fout.writelines("%%MatrixMarket matrix coordinate real general\n")
fout.writelines("%Created by Wolfram Mathematica 9.0 : www.wolfram.com\n")
print("Transfering to sparse matrix----")
#get rid of small numbers
#for i in range(len(datain)):
# for j in range(len(datain[i])):
# datain[i][j]=round(datain[i][j],3)
BB=scipy.sparse.coo_matrix(datain)
print("Spare matrix obtained!")
# print BB.row
# print BB.col
# print BB.data
fout.writelines(str(len(datain))+" "+str(len(datain[0]))+" "+str(len(BB.data))+"\n")
for i in range(len(BB.data)):
fout.writelines(str(BB.row[i]+1)+" "+str(BB.col[i]+1)+" "+str(BB.data[i])+"\n")
#for i in range(len(datain)):
#for j in range(len(datain[0])):
#fout.writelines(str(i+1)+" "+str(j+1)+" "+str(datain[i][j])+"\n")
fout.close()
def read2dMTX(file):
if check(file):
counter=0
for line in open(file):
counter=counter+1
if counter <=2:
continue
if counter ==3:
inlist=list(map(int,line.split()))
nrow=inlist[0]
ncol=inlist[1]
dataout=array([[0.0]*ncol]*nrow)
continue
if counter >=4:
tmp=line.split()
#print str(tmp)+", "+str(tmp[2])
dataout[int(tmp[0])-1][int(tmp[1])-1]=float(tmp[2])
#print "\n"
return dataout.tolist()
else:
print(str(file)+" not found :(")
#test
if False:
Amat=[[0,1],[2,0],[0,0],[0,16]]
print(Amat)
write2dMTX(Amat, "test.mtx")
print(read2dMTX("test.mtx"))
#define read1dmat
#read float
def read1dmat(file):
mat=[]
if check(file):
for line in open(file):
mat.append(float(line))
return mat
else:
print(str(file)+" not found :(")
if False:
haha=[1,2,3,4,5]
write1dmat(haha, "haha")
xixi=read1dmat("haha")
print(xixi)
#define read2dmat (this is a relatively fast way: iter or chunck read)
def read2dmat(file,icomplex=False):
mat=[]
if check(file):
print("Read matrix start")
for line in open(file):
if not icomplex:
mat.append(list(map(float,line.split())))
else:
mat.append(list(map(complex,line.split())))
print("Read matrix end")
#delete line counter
del mat[0]
return mat
else:
print(str(file)+" not found :(")
#test
#mat=read2dmat("C-isoo.mat")
#print len(mat)
#print len(mat[0])
def clusstr(clus):
dataout=""
for item in clus:
dataout=dataout+str(item[0])+" "+str(item[1])+" "+str(item[2])+"\n"
return dataout
def lptstr(lpt):
dataout=""
for item in lpt:
dataout=dataout+str(item[0][0])+" "+str(item[0][1])+" "+str(item[0][2])+" "+str(item[1])+"\n"
return dataout
#define writeorb(orb)
def writeorb(orbset, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file, "w")
fout.write(str(len(orbset))+"\n\n")
for orb in orbset:
fout.write(str(len(orb))+"\n\n")
for item in orb:
npt=len(item[0])
fout.write(str(npt)+"\n")
fout.write(clusstr(item[0]))
fout.write(str(item[1])+"\n")
fout.write(str(item[2])+"\n")
fout.write(lptstr(item[3]))
fout.write("\n")
fout.close()
def writeclus(clus, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file,"w")
fout.write(str(len(clus))+"\n\n")
for item in clus:
fout.write(str(len(item))+"\n")
fout.write(clusstr(item))
fout.write("\n")
fout.close()
def writeSCinfo(SCinfo, file):
if check(file):
rm(file)
touch(file)
else:
touch(file)
fout=open(file, "w")
tmp=[SCinfo['SC'], SCinfo['invSC'], SCinfo['SCref'], SCinfo['SCpos'], SCinfo['SCmat'], SCinfo['invSCmat'], SCinfo['order']]
lentmp=[len(i) for i in tmp]
fout.write(" ".join(map(str,lentmp))+"\n")
for i in tmp:
if i==SCinfo['order']:
fout.write("\n".join(map(str,i))+"\n")
else:
for j in i:
fout.write(" ".join(map(str,j))+"\n")
fout.close()
def readSCinfo(file):
SCinfo={}
if check(file):
fin=open(file, "r")
lenlist=list(map(int,(fin.readline()).split()))
# tmp=[SCinfo['SC'], SCinfo['invSC'], SCinfo['SCref'], SCinfo['SCpos'], SCinfo['SCmat'], SCinfo['invSCmat'], SCinfo['order']]
tmp=[]
for i in range(7):
tmp1=[]
for j in range(lenlist[i]):
if i in [0,1,3,4,5]:
tmp1.append(list(map(float,(fin.readline()).split())))
elif i in [2]:
tmp1.append(list(map(int,(fin.readline()).split())))
else:
tmp1.append(list(map(int,(fin.readline()).split()))[0])
tmp.append(tmp1)
SCinfo['SC']=tmp[0]
SCinfo['invSC']=tmp[1]
SCinfo['SCref']=tmp[2]
SCinfo['SCpos']=tmp[3]
SCinfo['SCmat']=tmp[4]
SCinfo['invSCmat']=tmp[5]
SCinfo['order']=tmp[6]
else:
print(str(file)+" not found :(")
return SCinfo
#test
if False:
SCinfo={'invSCmat': [[-0.25, 0.25, 0.25], [0.25, -0.25, 0.25], [0.25, 0.25, -0.25]], 'SCmat': [[0.0, 2.0, 2.0], [2.0, 0.0, 2.0], [2.0, 2.0, 0.0]], 'SCref': [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 1, 2], [1, 2, 1], [1, 2, 2], [2, 1, 1], [2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 3], [2, 3, 2], [3, 2, 2], [3, 3, 3]], 'SCpos': [[0.75, 0.25, 0.5], [0.25, 0.75, 0.5], [0.5, 0.25, 0.75], [0.5, 0.75, 0.25], [0.25, 0.5, 0.75], [0.75, 0.5, 0.25], [0.785, 0.785, 0.0], [0.215, 0.215, 0.0], [0.0, 0.215, 0.215], [0.0, 0.785, 0.785], [0.785, 0.0, 0.785], [0.215, 0.0, 0.215], [0.5239, 0.0, 0.7543], [0.7543, 0.0, 0.5239], [0.4761, 0.2304, 0.4761], [0.2457, 0.7696, 0.2457], [0.5239, 0.7543, 0.0], [0.7543, 0.5239, 0.0], [0.2457, 0.2457, 0.7696], [0.4761, 0.4761, 0.2304], [0.7696, 0.2457, 0.2457], [0.2304, 0.4761, 0.4761], [0.0, 0.5239, 0.7543], [0.0, 0.7543, 0.5239], [0.0, 0.0, 0.0], [0.4636, 0.0, 0.0], [0.0, 0.0, 0.4636], [0.5364, 0.5364, 0.5364], [0.0, 0.4636, 0.0], [0.75, 1.25, 1.5], [0.25, 1.75, 1.5], [0.5, 1.25, 1.75], [0.5, 1.75, 1.25], [0.25, 1.5, 1.75], [0.75, 1.5, 1.25], [0.785, 1.785, 1.0], [0.215, 1.215, 1.0], [0.0, 1.215, 1.215], [0.0, 1.785, 1.785], [0.785, 1.0, 1.785], [0.215, 1.0, 1.215], [0.5239, 1.0, 1.7543], [0.7543, 1.0, 1.5239], [0.4761, 1.2304, 1.4761], [0.2457, 1.7696, 1.2457], [0.5239, 1.7543, 1.0], [0.7543, 1.5239, 1.0], [0.2457, 1.2457, 1.7696], [0.4761, 1.4761, 1.2304], [0.7696, 1.2457, 1.2457], [0.2304, 1.4761, 1.4761], [0.0, 1.5239, 1.7543], [0.0, 1.7543, 1.5239], [0.0, 1.0, 1.0], [0.4636, 1.0, 1.0], [0.0, 1.0, 1.4636], [0.5364, 1.5364, 1.5364], [0.0, 1.4636, 1.0], [1.75, 0.25, 1.5], [1.25, 0.75, 1.5], [1.5, 0.25, 1.75], [1.5, 0.75, 1.25], [1.25, 0.5, 1.75], [1.75, 0.5, 1.25], [1.785, 0.785, 1.0], [1.215, 0.215, 1.0], [1.0, 0.215, 1.215], [1.0, 0.785, 1.785], [1.785, 0.0, 1.785], [1.215, 0.0, 1.215], [1.5239, 0.0, 1.7543], [1.7543, 0.0, 1.5239], [1.4761, 0.2304, 1.4761], [1.2457, 0.7696, 1.2457], [1.5239, 0.7543, 1.0], [1.7543, 0.5239, 1.0], [1.2457, 0.2457, 1.7696], [1.4761, 0.4761, 1.2304], [1.7696, 0.2457, 1.2457], [1.2304, 0.4761, 1.4761], [1.0, 0.5239, 1.7543], [1.0, 0.7543, 1.5239], [1.0, 0.0, 1.0], [1.4636, 0.0, 1.0], [1.0, 0.0, 1.4636], [1.5364, 0.5364, 1.5364], [1.0, 0.4636, 1.0], [1.75, 1.25, 0.5], [1.25, 1.75, 0.5], [1.5, 1.25, 0.75], [1.5, 1.75, 0.25], [1.25, 1.5, 0.75], [1.75, 1.5, 0.25], [1.785, 1.785, 0.0], [1.215, 1.215, 0.0], [1.0, 1.215, 0.215], [1.0, 1.785, 0.785], [1.785, 1.0, 0.785], [1.215, 1.0, 0.215], [1.5239, 1.0, 0.7543], [1.7543, 1.0, 0.5239], [1.4761, 1.2304, 0.4761], [1.2457, 1.7696, 0.2457], [1.5239, 1.7543, 0.0], [1.7543, 1.5239, 0.0], [1.2457, 1.2457, 0.7696], [1.4761, 1.4761, 0.2304], [1.7696, 1.2457, 0.2457], [1.2304, 1.4761, 0.4761], [1.0, 1.5239, 0.7543], [1.0, 1.7543, 0.5239], [1.0, 1.0, 0.0], [1.4636, 1.0, 0.0], [1.0, 1.0, 0.4636], [1.5364, 1.5364, 0.5364], [1.0, 1.4636, 0.0], [1.75, 1.25, 1.5], [1.25, 1.75, 1.5], [1.5, 1.25, 1.75], [1.5, 1.75, 1.25], [1.25, 1.5, 1.75], [1.75, 1.5, 1.25], [1.785, 1.785, 1.0], [1.215, 1.215, 1.0], [1.0, 1.215, 1.215], [1.0, 1.785, 1.785], [1.785, 1.0, 1.785], [1.215, 1.0, 1.215], [1.5239, 1.0, 1.7543], [1.7543, 1.0, 1.5239], [1.4761, 1.2304, 1.4761], [1.2457, 1.7696, 1.2457], [1.5239, 1.7543, 1.0], [1.7543, 1.5239, 1.0], [1.2457, 1.2457, 1.7696], [1.4761, 1.4761, 1.2304], [1.7696, 1.2457, 1.2457], [1.2304, 1.4761, 1.4761], [1.0, 1.5239, 1.7543], [1.0, 1.7543, 1.5239], [1.0, 1.0, 1.0], [1.4636, 1.0, 1.0], [1.0, 1.0, 1.4636], [1.5364, 1.5364, 1.5364], [1.0, 1.4636, 1.0], [1.75, 1.25, 2.5], [1.25, 1.75, 2.5], [1.5, 1.25, 2.75], [1.5, 1.75, 2.25], [1.25, 1.5, 2.75], [1.75, 1.5, 2.25], [1.785, 1.785, 2.0], [1.215, 1.215, 2.0], [1.0, 1.215, 2.215], [1.0, 1.785, 2.785], [1.785, 1.0, 2.785], [1.215, 1.0, 2.215], [1.5239, 1.0, 2.7543], [1.7543, 1.0, 2.5239], [1.4761, 1.2304, 2.4761], [1.2457, 1.7696, 2.2457], [1.5239, 1.7543, 2.0], [1.7543, 1.5239, 2.0], [1.2457, 1.2457, 2.7696], [1.4761, 1.4761, 2.2304], [1.7696, 1.2457, 2.2457], [1.2304, 1.4761, 2.4761], [1.0, 1.5239, 2.7543], [1.0, 1.7543, 2.5239], [1.0, 1.0, 2.0], [1.4636, 1.0, 2.0], [1.0, 1.0, 2.4636], [1.5364, 1.5364, 2.5364], [1.0, 1.4636, 2.0], [1.75, 2.25, 1.5], [1.25, 2.75, 1.5], [1.5, 2.25, 1.75], [1.5, 2.75, 1.25], [1.25, 2.5, 1.75], [1.75, 2.5, 1.25], [1.785, 2.785, 1.0], [1.215, 2.215, 1.0], [1.0, 2.215, 1.215], [1.0, 2.785, 1.785], [1.785, 2.0, 1.785], [1.215, 2.0, 1.215], [1.5239, 2.0, 1.7543], [1.7543, 2.0, 1.5239], [1.4761, 2.2304, 1.4761], [1.2457, 2.7696, 1.2457], [1.5239, 2.7543, 1.0], [1.7543, 2.5239, 1.0], [1.2457, 2.2457, 1.7696], [1.4761, 2.4761, 1.2304], [1.7696, 2.2457, 1.2457], [1.2304, 2.4761, 1.4761], [1.0, 2.5239, 1.7543], [1.0, 2.7543, 1.5239], [1.0, 2.0, 1.0], [1.4636, 2.0, 1.0], [1.0, 2.0, 1.4636], [1.5364, 2.5364, 1.5364], [1.0, 2.4636, 1.0], [1.75, 2.25, 2.5], [1.25, 2.75, 2.5], [1.5, 2.25, 2.75], [1.5, 2.75, 2.25], [1.25, 2.5, 2.75], [1.75, 2.5, 2.25], [1.785, 2.785, 2.0], [1.215, 2.215, 2.0], [1.0, 2.215, 2.215], [1.0, 2.785, 2.785], [1.785, 2.0, 2.785], [1.215, 2.0, 2.215], [1.5239, 2.0, 2.7543], [1.7543, 2.0, 2.5239], [1.4761, 2.2304, 2.4761], [1.2457, 2.7696, 2.2457], [1.5239, 2.7543, 2.0], [1.7543, 2.5239, 2.0], [1.2457, 2.2457, 2.7696], [1.4761, 2.4761, 2.2304], [1.7696, 2.2457, 2.2457], [1.2304, 2.4761, 2.4761], [1.0, 2.5239, 2.7543], [1.0, 2.7543, 2.5239], [1.0, 2.0, 2.0], [1.4636, 2.0, 2.0], [1.0, 2.0, 2.4636], [1.5364, 2.5364, 2.5364], [1.0, 2.4636, 2.0], [2.75, 1.25, 1.5], [2.25, 1.75, 1.5], [2.5, 1.25, 1.75], [2.5, 1.75, 1.25], [2.25, 1.5, 1.75], [2.75, 1.5, 1.25], [2.785, 1.785, 1.0], [2.215, 1.215, 1.0], [2.0, 1.215, 1.215], [2.0, 1.785, 1.785], [2.785, 1.0, 1.785], [2.215, 1.0, 1.215], [2.5239, 1.0, 1.7543], [2.7543, 1.0, 1.5239], [2.4761, 1.2304, 1.4761], [2.2457, 1.7696, 1.2457], [2.5239, 1.7543, 1.0], [2.7543, 1.5239, 1.0], [2.2457, 1.2457, 1.7696], [2.4761, 1.4761, 1.2304], [2.7696, 1.2457, 1.2457], [2.2304, 1.4761, 1.4761], [2.0, 1.5239, 1.7543], [2.0, 1.7543, 1.5239], [2.0, 1.0, 1.0], [2.4636, 1.0, 1.0], [2.0, 1.0, 1.4636], [2.5364, 1.5364, 1.5364], [2.0, 1.4636, 1.0], [2.75, 1.25, 2.5], [2.25, 1.75, 2.5], [2.5, 1.25, 2.75], [2.5, 1.75, 2.25], [2.25, 1.5, 2.75], [2.75, 1.5, 2.25], [2.785, 1.785, 2.0], [2.215, 1.215, 2.0], [2.0, 1.215, 2.215], [2.0, 1.785, 2.785], [2.785, 1.0, 2.785], [2.215, 1.0, 2.215], [2.5239, 1.0, 2.7543], [2.7543, 1.0, 2.5239], [2.4761, 1.2304, 2.4761], [2.2457, 1.7696, 2.2457], [2.5239, 1.7543, 2.0], [2.7543, 1.5239, 2.0], [2.2457, 1.2457, 2.7696], [2.4761, 1.4761, 2.2304], [2.7696, 1.2457, 2.2457], [2.2304, 1.4761, 2.4761], [2.0, 1.5239, 2.7543], [2.0, 1.7543, 2.5239], [2.0, 1.0, 2.0], [2.4636, 1.0, 2.0], [2.0, 1.0, 2.4636], [2.5364, 1.5364, 2.5364], [2.0, 1.4636, 2.0], [2.75, 2.25, 1.5], [2.25, 2.75, 1.5], [2.5, 2.25, 1.75], [2.5, 2.75, 1.25], [2.25, 2.5, 1.75], [2.75, 2.5, 1.25], [2.785, 2.785, 1.0], [2.215, 2.215, 1.0], [2.0, 2.215, 1.215], [2.0, 2.785, 1.785], [2.785, 2.0, 1.785], [2.215, 2.0, 1.215], [2.5239, 2.0, 1.7543], [2.7543, 2.0, 1.5239], [2.4761, 2.2304, 1.4761], [2.2457, 2.7696, 1.2457], [2.5239, 2.7543, 1.0], [2.7543, 2.5239, 1.0], [2.2457, 2.2457, 1.7696], [2.4761, 2.4761, 1.2304], [2.7696, 2.2457, 1.2457], [2.2304, 2.4761, 1.4761], [2.0, 2.5239, 1.7543], [2.0, 2.7543, 1.5239], [2.0, 2.0, 1.0], [2.4636, 2.0, 1.0], [2.0, 2.0, 1.4636], [2.5364, 2.5364, 1.5364], [2.0, 2.4636, 1.0], [2.75, 2.25, 2.5], [2.25, 2.75, 2.5], [2.5, 2.25, 2.75], [2.5, 2.75, 2.25], [2.25, 2.5, 2.75], [2.75, 2.5, 2.25], [2.785, 2.785, 2.0], [2.215, 2.215, 2.0], [2.0, 2.215, 2.215], [2.0, 2.785, 2.785], [2.785, 2.0, 2.785], [2.215, 2.0, 2.215], [2.5239, 2.0, 2.7543], [2.7543, 2.0, 2.5239], [2.4761, 2.2304, 2.4761], [2.2457, 2.7696, 2.2457], [2.5239, 2.7543, 2.0], [2.7543, 2.5239, 2.0], [2.2457, 2.2457, 2.7696], [2.4761, 2.4761, 2.2304], [2.7696, 2.2457, 2.2457], [2.2304, 2.4761, 2.4761], [2.0, 2.5239, 2.7543], [2.0, 2.7543, 2.5239], [2.0, 2.0, 2.0], [2.4636, 2.0, 2.0], [2.0, 2.0, 2.4636], [2.5364, 2.5364, 2.5364], [2.0, 2.4636, 2.0], [2.75, 2.25, 3.5], [2.25, 2.75, 3.5], [2.5, 2.25, 3.75], [2.5, 2.75, 3.25], [2.25, 2.5, 3.75], [2.75, 2.5, 3.25], [2.785, 2.785, 3.0], [2.215, 2.215, 3.0], [2.0, 2.215, 3.215], [2.0, 2.785, 3.785], [2.785, 2.0, 3.785], [2.215, 2.0, 3.215], [2.5239, 2.0, 3.7543], [2.7543, 2.0, 3.5239], [2.4761, 2.2304, 3.4761], [2.2457, 2.7696, 3.2457], [2.5239, 2.7543, 3.0], [2.7543, 2.5239, 3.0], [2.2457, 2.2457, 3.7696], [2.4761, 2.4761, 3.2304], [2.7696, 2.2457, 3.2457], [2.2304, 2.4761, 3.4761], [2.0, 2.5239, 3.7543], [2.0, 2.7543, 3.5239], [2.0, 2.0, 3.0], [2.4636, 2.0, 3.0], [2.0, 2.0, 3.4636], [2.5364, 2.5364, 3.5364], [2.0, 2.4636, 3.0], [2.75, 3.25, 2.5], [2.25, 3.75, 2.5], [2.5, 3.25, 2.75], [2.5, 3.75, 2.25], [2.25, 3.5, 2.75], [2.75, 3.5, 2.25], [2.785, 3.785, 2.0], [2.215, 3.215, 2.0], [2.0, 3.215, 2.215], [2.0, 3.785, 2.785], [2.785, 3.0, 2.785], [2.215, 3.0, 2.215], [2.5239, 3.0, 2.7543], [2.7543, 3.0, 2.5239], [2.4761, 3.2304, 2.4761], [2.2457, 3.7696, 2.2457], [2.5239, 3.7543, 2.0], [2.7543, 3.5239, 2.0], [2.2457, 3.2457, 2.7696], [2.4761, 3.4761, 2.2304], [2.7696, 3.2457, 2.2457], [2.2304, 3.4761, 2.4761], [2.0, 3.5239, 2.7543], [2.0, 3.7543, 2.5239], [2.0, 3.0, 2.0], [2.4636, 3.0, 2.0], [2.0, 3.0, 2.4636], [2.5364, 3.5364, 2.5364], [2.0, 3.4636, 2.0], [3.75, 2.25, 2.5], [3.25, 2.75, 2.5], [3.5, 2.25, 2.75], [3.5, 2.75, 2.25], [3.25, 2.5, 2.75], [3.75, 2.5, 2.25], [3.785, 2.785, 2.0], [3.215, 2.215, 2.0], [3.0, 2.215, 2.215], [3.0, 2.785, 2.785], [3.785, 2.0, 2.785], [3.215, 2.0, 2.215], [3.5239, 2.0, 2.7543], [3.7543, 2.0, 2.5239], [3.4761, 2.2304, 2.4761], [3.2457, 2.7696, 2.2457], [3.5239, 2.7543, 2.0], [3.7543, 2.5239, 2.0], [3.2457, 2.2457, 2.7696], [3.4761, 2.4761, 2.2304], [3.7696, 2.2457, 2.2457], [3.2304, 2.4761, 2.4761], [3.0, 2.5239, 2.7543], [3.0, 2.7543, 2.5239], [3.0, 2.0, 2.0], [3.4636, 2.0, 2.0], [3.0, 2.0, 2.4636], [3.5364, 2.5364, 2.5364], [3.0, 2.4636, 2.0], [3.75, 3.25, 3.5], [3.25, 3.75, 3.5], [3.5, 3.25, 3.75], [3.5, 3.75, 3.25], [3.25, 3.5, 3.75], [3.75, 3.5, 3.25], [3.785, 3.785, 3.0], [3.215, 3.215, 3.0], [3.0, 3.215, 3.215], [3.0, 3.785, 3.785], [3.785, 3.0, 3.785], [3.215, 3.0, 3.215], [3.5239, 3.0, 3.7543], [3.7543, 3.0, 3.5239], [3.4761, 3.2304, 3.4761], [3.2457, 3.7696, 3.2457], [3.5239, 3.7543, 3.0], [3.7543, 3.5239, 3.0], [3.2457, 3.2457, 3.7696], [3.4761, 3.4761, 3.2304], [3.7696, 3.2457, 3.2457], [3.2304, 3.4761, 3.4761], [3.0, 3.5239, 3.7543], [3.0, 3.7543, 3.5239], [3.0, 3.0, 3.0], [3.4636, 3.0, 3.0], [3.0, 3.0, 3.4636], [3.5364, 3.5364, 3.5364], [3.0, 3.4636, 3.0]], 'SC': [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], 'order': [81, 33, 1, 65, 49, 17, 137, 129, 97, 105, 121, 113, 274, 285, 257, 363, 219, 213, 298, 193, 333, 225, 250, 243, 385, 461, 442, 401, 451, 85, 37, 5, 69, 53, 21, 141, 133, 101, 109, 125, 117, 278, 281, 261, 367, 223, 209, 302, 197, 329, 229, 254, 247, 389, 457, 446, 405, 455, 83, 35, 3, 67, 51, 19, 139, 131, 99, 107, 123, 115, 276, 287, 259, 361, 217, 215, 300, 195, 335, 227, 252, 241, 387, 463, 444, 403, 449, 82, 34, 2, 66, 50, 18, 138, 130, 98, 106, 122, 114, 273, 286, 258, 364, 220, 214, 297, 194, 334, 226, 249, 244, 386, 462, 441, 402, 452, 43, 93, 75, 10, 29, 58, 186, 177, 145, 157, 171, 161, 371, 379, 353, 265, 314, 306, 201, 289, 233, 321, 349, 341, 393, 425, 409, 433, 417, 87, 39, 7, 71, 55, 23, 143, 135, 103, 111, 127, 119, 280, 283, 263, 365, 221, 211, 304, 199, 331, 231, 256, 245, 391, 459, 448, 407, 453, 86, 38, 6, 70, 54, 22, 142, 134, 102, 110, 126, 118, 277, 282, 262, 368, 224, 210, 301, 198, 330, 230, 253, 248, 390, 458, 445, 406, 456, 47, 89, 79, 14, 25, 62, 190, 181, 149, 153, 175, 165, 375, 383, 357, 269, 318, 310, 205, 293, 237, 325, 345, 337, 397, 429, 413, 437, 421, 84, 36, 4, 68, 52, 20, 140, 132, 100, 108, 124, 116, 275, 288, 260, 362, 218, 216, 299, 196, 336, 228, 251, 242, 388, 464, 443, 404, 450, 41, 95, 73, 12, 31, 60, 188, 179, 147, 159, 169, 163, 369, 377, 355, 267, 316, 308, 203, 291, 235, 323, 351, 343, 395, 427, 411, 435, 419, 44, 94, 76, 9, 30, 57, 185, 178, 146, 158, 172, 162, 372, 380, 354, 266, 313, 305, 202, 290, 234, 322, 350, 342, 394, 426, 410, 434, 418, 88, 40, 8, 72, 56, 24, 144, 136, 104, 112, 128, 120, 279, 284, 264, 366, 222, 212, 303, 200, 332, 232, 255, 246, 392, 460, 447, 408, 454, 45, 91, 77, 16, 27, 64, 192, 183, 151, 155, 173, 167, 373, 381, 359, 271, 320, 312, 207, 295, 239, 327, 347, 339, 399, 431, 415, 439, 423, 48, 90, 80, 13, 26, 61, 189, 182, 150, 154, 176, 166, 376, 384, 358, 270, 317, 309, 206, 294, 238, 326, 346, 338, 398, 430, 414, 438, 422, 42, 96, 74, 11, 32, 59, 187, 180, 148, 160, 170, 164, 370, 378, 356, 268, 315, 307, 204, 292, 236, 324, 352, 344, 396, 428, 412, 436, 420, 46, 92, 78, 15, 28, 63, 191, 184, 152, 156, 174, 168, 374, 382, 360, 272, 319, 311, 208, 296, 240, 328, 348, 340, 400, 432, 416, 440, 424], 'invSC': [[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]}
writeSCinfo(SCinfo, "SCinfo")
haha=readSCinfo("SCinfo")
print(haha['SC'])
print(haha['invSC'])
print(haha['SCref'])
print(haha['SCpos'])
print(haha['SCmat'])
print(haha['invSCmat'])
print(haha['order'])
def readclus(file):
if check(file):
fin=open(file, "r")
nclus=int(fin.readline())
clus=[]
for i in range(nclus):
item=[]
fin.readline()
npt=int(fin.readline())
for j in range(npt):
item.append(list(map(float, fin.readline().split())))
clus.append(item)
return clus
else:
print(str(file)+" not found :(")
#writeclus(clus,"uniqueC")
#print "\n".join(map(str, readclus("uniqueC")))
def readorb(file):
if check(file):
orbset=[]
fin=open(file, "r")
Norb=int(fin.readline())
for i in range(Norb):
orb=[]
fin.readline()
nitem=int(fin.readline())
fin.readline()
for j in range(nitem):
item=[]
npt=int(fin.readline())
clus=[]
lpt=[]
for k in range(npt):
line=fin.readline()
clus.append(list(map(float,line.split())))
item.append(clus)
item.append(int(fin.readline()))
item.append(int(fin.readline()))
for k in range(npt):
line=fin.readline()
tmp=list(map(float,line.split()))
tmp=list(map(int, tmp))
lpt.append([[tmp[0],tmp[1],tmp[2]],tmp[3]])
item.append(lpt)
orb.append(item)
orbset.append(orb)
fin.close()
return orbset
else:
print(str(file)+" not found :(")
#test
if False:
orbset=[[[[[0.75, 0.25, 0.5]], 1, 1, [[[0.0, 0.0, 0.0], 1]]], [[[0.75, 0.5, 0.25]], 1, 2, [[[0.0, 0.0, 0.0], 6]]], [[[0.5, 0.25, -0.25]], 1, 3, [[[0.0, 0.0, -1.0], 3]]], [[[0.25, -0.25, -0.5]], 1, 4, [[[0.0, -1.0, -1.0], 2]]], [[[0.5, -0.25, 0.25]], 1, 5, [[[0.0, -1.0, 0.0], 4]]], [[[0.25, -0.5, -0.25]], 1, 6, [[[0.0, -1.0, -1.0], 5]]]],[[[[0.7696, 0.2457, 0.2457], [0.0, -0.215, -0.215]], 42, 1, [[[0.0, 0.0, 0.0], 21], [[0.0, -1.0, -1.0], 10]]], [[[0.5238999999999999, 0.0, -0.2457], [0.215, 0.0, 0.215]], 42, 3, [[[-0.0, 0.0, -1.0], 13], [[0.0, 0.0, 0.0], 12]]], [[[0.5238999999999999, -0.2457, 0.0], [0.215, 0.215, 0.0]], 42, 5, [[[-0.0, -1.0, 0.0], 17], [[0.0, 0.0, 0.0], 8]]], [[[-0.2457, 0.0, 0.5238999999999999], [0.215, 0.0, 0.215]], 42, 7, [[[-1.0, 0.0, -0.0], 14], [[0.0, 0.0, 0.0], 12]]], [[[0.2457, 0.2457, 0.7696], [-0.215, -0.215, 0.0]], 42, 9, [[[0.0, 0.0, 0.0], 19], [[-1.0, -1.0, 0.0], 7]]], [[[0.0, -0.2457, 0.5238999999999999], [0.0, 0.215, 0.215]], 42, 11, [[[0.0, -1.0, -0.0], 24], [[0.0, 0.0, 0.0], 9]]], [[[-0.7696, -0.5238999999999999, -0.5238999999999999], [0.0, -0.215, -0.215]], 42, 13, [[[-1.0, -1.0, -1.0], 22], [[0.0, -1.0, -1.0], 10]]], [[[-0.5238999999999999, -0.5238999999999999, -0.7696], [-0.215, -0.215, 0.0]], 42, 15, [[[-1.0, -1.0, -1.0], 20], [[-1.0, -1.0, 0.0], 7]]], [[[-0.5238999999999999, -0.7696, -0.5238999999999999], [-0.215, 0.0, -0.215]], 42, 17, [[[-1.0, -1.0, -1.0], 15], [[-1.0, 0.0, -1.0], 11]]], [[[-0.2457, 0.5238999999999999, 0.0], [0.215, 0.215, 0.0]], 42, 19, [[[-1.0, -0.0, 0.0], 18], [[0.0, 0.0, 0.0], 8]]], [[[0.2457, 0.7696, 0.2457], [-0.215, 0.0, -0.215]], 42, 21, [[[0.0, 0.0, 0.0], 16], [[-1.0, 0.0, -1.0], 11]]], [[[0.0, 0.5238999999999999, -0.2457], [0.0, 0.215, 0.215]], 42, 23, [[[0.0, -0.0, -1.0], 23], [[0.0, 0.0, 0.0], 9]]]]]
print("\n".join(map(str,orbset)))
writeorb(orbset,"test-orb")
print("\n")
print("\n".join(map(str,readorb("test-orb"))))
#def read fit.ou
def readfit(file):
if check(file):
counter=0
readflag=False
for line in open(file):
counter=counter+1
if counter==1:
nstruc=list(map(int, line.split()))[1]
fitlist=[0.0]*nstruc
if len(line.split())>=1 and (line.split())[0]=="found":
readflag=True
continue
if readflag:
index=int((line.split())[0])
resl=float((line.split())[1])
fitlist[index-1]=resl
print("Fit.our read successfully and length: "+str(len(fitlist)))
return fitlist
else:
print(str(file)+" not found :(")
#test:
if False:
print(readfit("fit.out-mu1"))
| 65.537662
| 12,884
| 0.507411
| 5,174
| 25,232
| 2.474295
| 0.128334
| 0.038432
| 0.03187
| 0.021872
| 0.645837
| 0.383065
| 0.345649
| 0.280191
| 0.259959
| 0.153648
| 0
| 0.372021
| 0.214965
| 25,232
| 384
| 12,885
| 65.708333
| 0.274283
| 0.044031
| 0
| 0.452229
| 0
| 0
| 0.029573
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070064
| false
| 0
| 0.022293
| 0
| 0.130573
| 0.095541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6a7d44f1e562967fd6fedbdfc2867ad65df6f217
| 2,163
|
py
|
Python
|
yekpay/migrations/0014_auto_20181120_1453.py
|
maryam-afzp/django-yekpay
|
f7b9d7914035ea4f27238eba9e0c70227cc65046
|
[
"MIT"
] | 3
|
2020-05-17T18:33:22.000Z
|
2021-12-06T08:31:42.000Z
|
yekpay/migrations/0014_auto_20181120_1453.py
|
Glyphack/django-yekpay
|
8c4a44853207be4ff0b1711c0524fb0201859b19
|
[
"MIT"
] | null | null | null |
yekpay/migrations/0014_auto_20181120_1453.py
|
Glyphack/django-yekpay
|
8c4a44853207be4ff0b1711c0524fb0201859b19
|
[
"MIT"
] | 4
|
2019-11-14T14:16:49.000Z
|
2021-12-06T08:31:44.000Z
|
# Generated by Django 2.0.9 on 2018-11-20 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('yekpay', '0013_auto_20181030_1911'),
]
operations = [
migrations.RenameField(
model_name='transaction',
old_name='authorityStart',
new_name='authority_start',
),
migrations.RenameField(
model_name='transaction',
old_name='authorityVerify',
new_name='authority_verify',
),
migrations.RenameField(
model_name='transaction',
old_name='failureReason',
new_name='failure_reason',
),
migrations.RenameField(
model_name='transaction',
old_name='firstName',
new_name='first_name',
),
migrations.RenameField(
model_name='transaction',
old_name='fromCurrencyCode',
new_name='from_currency_code',
),
migrations.RenameField(
model_name='transaction',
old_name='lastName',
new_name='last_name',
),
migrations.RenameField(
model_name='transaction',
old_name='orderNumber',
new_name='order_number',
),
migrations.RenameField(
model_name='transaction',
old_name='postalCode',
new_name='postal_code',
),
migrations.RenameField(
model_name='transaction',
old_name='toCurrencyCode',
new_name='to_currency_code',
),
migrations.AddField(
model_name='transaction',
name='simulation',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='transaction',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 30.041667
| 121
| 0.57374
| 192
| 2,163
| 6.208333
| 0.369792
| 0.083054
| 0.184564
| 0.22651
| 0.446309
| 0.446309
| 0.375839
| 0.174497
| 0
| 0
| 0
| 0.02116
| 0.3227
| 2,163
| 71
| 122
| 30.464789
| 0.792491
| 0.020804
| 0
| 0.507692
| 1
| 0
| 0.186673
| 0.01087
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046154
| 0
| 0.092308
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6a8199a221f44d9fef4df3ccc6d623b0243a377c
| 1,058
|
py
|
Python
|
tests/dummies.py
|
arvindmuralie77/gradsflow
|
d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f
|
[
"Apache-2.0"
] | 253
|
2021-08-17T17:42:25.000Z
|
2022-03-25T07:59:41.000Z
|
tests/dummies.py
|
arvindmuralie77/gradsflow
|
d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f
|
[
"Apache-2.0"
] | 161
|
2021-08-17T16:28:08.000Z
|
2022-03-27T02:36:45.000Z
|
tests/dummies.py
|
arvindmuralie77/gradsflow
|
d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f
|
[
"Apache-2.0"
] | 35
|
2021-08-23T16:26:15.000Z
|
2022-03-26T17:08:15.000Z
|
# Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from gradsflow.models import Model
class DummyModel(Model):
def __init__(self):
learner = torch.nn.Linear(1, 4)
super().__init__(learner)
def backward(self, loss: torch.Tensor):
return None
def train_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
def val_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
| 32.060606
| 75
| 0.697543
| 151
| 1,058
| 4.807947
| 0.615894
| 0.082645
| 0.035813
| 0.044077
| 0.146006
| 0.146006
| 0.146006
| 0.146006
| 0.146006
| 0.146006
| 0
| 0.016568
| 0.201323
| 1,058
| 32
| 76
| 33.0625
| 0.842604
| 0.548204
| 0
| 0.166667
| 0
| 0
| 0.082073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.25
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
6a8c272cd22c6193695ebfa5fa34ff4d88d4565d
| 579
|
py
|
Python
|
src/solutions/part2/q104_max_bi_tree_depth.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/part2/q104_max_bi_tree_depth.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
src/solutions/part2/q104_max_bi_tree_depth.py
|
hychrisli/PyAlgorithms
|
71e537180f3b371d0d2cc47b11cb68ec13a8ac68
|
[
"Apache-2.0"
] | null | null | null |
from src.base.solution import Solution
from src.tests.part2.q104_test_max_bi_tree_depth import MaxBiTreeDepthTestCases
class MaxBiTreeDepth(Solution):
def gen_test_cases(self):
return MaxBiTreeDepthTestCases()
def run_test(self, input):
return self.maxDepth(input)
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root: return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
if __name__ == '__main__':
sol = MaxBiTreeDepth()
sol.run_tests()
| 23.16
| 79
| 0.661485
| 70
| 579
| 5.228571
| 0.542857
| 0.098361
| 0.087432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013605
| 0.238342
| 579
| 25
| 80
| 23.16
| 0.816327
| 0.055268
| 0
| 0
| 0
| 0
| 0.015504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.153846
| 0.153846
| 0.692308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
6a921ec9df90e9d0bc4821cbf3d19c03f4f29792
| 1,882
|
py
|
Python
|
scripts/common/frozendict.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/common/frozendict.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/common/frozendict.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a frozen dictionary-like object"""
import collections
import copy
import common.memo as memo
class frozendict(collections.Mapping):
"""A frozen dictionary class"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
@memo.memo_i()
def __hash__(self):
return hash(self.itemtuple())
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self))
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return not self == other
def __deepcopy__(self, _memo):
return copy.deepcopy(self._data)
@memo.memo_i()
def itemtuple(self):
return tuple(sorted(self.iteritems()))
def mutableDict(self):
"""
Returns a mutable dictionary copy, replacing 'frozendict' with 'dict's.
This function uses the 'copy.deepcopy' method to create a mutable deep copy
of the dictionary.
Note that due to the one-size-fits-all behavior of 'deepcopy', the result
can be anything from heavyhanded to incorrect depending on the contents of
the dictionary. The caller should make sure they understand the operation
and its behavior on all of the dictionary's subtypes before using it.
Returns: (dict) A mutable clone of the dictionary and its members.
"""
return copy.deepcopy(self)
def extend(self, **kwargs):
"""Returns a copy of this object with the 'kwargs' fields updated."""
ndata = self.mutableDict()
ndata.update(kwargs)
return type(self)(**ndata)
| 26.507042
| 79
| 0.698193
| 266
| 1,882
| 4.736842
| 0.424812
| 0.044444
| 0.047619
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002653
| 0.198725
| 1,882
| 70
| 80
| 26.885714
| 0.832891
| 0.435175
| 0
| 0.058824
| 0
| 0
| 0.005988
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.382353
| false
| 0
| 0.088235
| 0.294118
| 0.852941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
6aab26683b9b2a063b1ca8928d6b0655775e0f6b
| 86,132
|
py
|
Python
|
model/entity_quotes.py
|
tkuculo/QuoteKG
|
a7b7d323679624a9cd3805e866028fad0a5a5408
|
[
"MIT"
] | null | null | null |
model/entity_quotes.py
|
tkuculo/QuoteKG
|
a7b7d323679624a9cd3805e866028fad0a5a5408
|
[
"MIT"
] | null | null | null |
model/entity_quotes.py
|
tkuculo/QuoteKG
|
a7b7d323679624a9cd3805e866028fad0a5a5408
|
[
"MIT"
] | null | null | null |
#main_section > lines > line > text
#main_section > lines > line > sub_line > text
#main_section > sub_sections
#main_section > templates > type
#main_section > templates > empty_values
#main_section > templates > values
#main_section > templates > sub_templates
#main_section > title > line > text
from transformers.models.auto import configuration_auto
from model.quote import *
import collections
languages_with_templates=["fr","da","nl","be","is","ca","bg","da","ka"]
hybrid_languages = ["uk","ru","sv","et"] + ["ko","fa","cs","fi", "hy"]
misattributed = {
'ar': ['ضعيف', 'متنازع عليه', 'بشكل غير صحيح', 'قائلا نعزى خطأ', 'يعزى خطأ إلى', 'ونقلت تم تعيينها', 'إساءة', 'نعزى بشكل غير صحيح', 'متصل بشكل غير صحيح', 'يعزى بشكل غير صحيح إلى', 'مثيرة للجدل', 'تم تعيينه بشكل غير صحيح', 'تم تعيينه بشكل غير صحيح', 'الفضل بشكل غير صحيح', 'مشكوك فيه', 'سوء المعاملة', 'سيئة', 'خاطئ', 'الفضل بشكل خاطئ', 'لم يتم التحقق منه', 'مرفقة بشكل غير صحيح', 'الفضل بشكل غير صحيح', 'غير صحيح', 'يعزى إلى الخطأ', 'مشبوه أو مشكوك فيه'],\
'az': ['zəif', 'mübahisəli', 'yanlış', 'yanlış şəkildə aid olduğunu söyləmək', 'səhv yanına aiddir', 'Təyin olunmuş sitatlar', 'yanılsaq', 'səhv aiddir', 'səhv bağlıdır', 'səhv aiddir', 'mübahisəli', 'səhv təyin olunur', 'səhv təyin olunmuşdur', 'səhv hesablanır', 'şübhəli', 'zəif', 'səhv', 'səhv hesablanır', 'təsdiqlənməmiş', 'səhv əlavə olunur', 'səhv hesablanır', 'yanlış', 'səhvən aiddir', 'şübhəli'],\
'be': ['слабы', 'спрэчны', 'няправільна', 'кажучы няправільна прыпісаны', 'памылкова звязаны з', 'Цытаты прызначаныя', 'misatributed', 'няправільна прыпісваецца', 'няправільна падлучаны', 'няправільна прыпісваецца', 'супярэчлівы', 'няправільна прызначаны', 'няправільна прызначаны', 'залічваецца няправільна', 'няпэўны', 'адварочваў', 'кепска', 'памылковы', 'памылкова залічана', 'неўверыў', 'няправільна прыкладаецца', 'няправільна залічаны', 'няправільны', 'прыпісваецца памылкова', 'падазроны'],\
'bg': ['слаб', 'оспорван', 'неправилно', 'казвайки погрешно приписване', 'погрешно се приписва', 'Misattributed.', 'неправилно приписано', 'неправилно свързани', 'неправилно', 'противоречиви', 'е неправилно назначен', 'неправилно зададен', 'кредитирани неправилно', 'съмнително', 'Млъкни', 'лошо', 'погрешно', 'неправилно кредитирани', 'Несъвършен', 'неправилно прикрепени', 'неправилно кредитирани', 'неправилен', 'се приписва на погрешно', 'подозрителен'],\
'bs': ['slab', 'sporan', 'pogrešno', 'govoreći pogrešno pripisano', 'pogrešno se pripisuje', 'Citati dodijeljene', 'misao', 'Netačno pripisan', 'Nepravilno povezani', 'pogrešno pripisan', 'kontroverzan', 'pogrešno je dodeljen', 'pogrešno dodijeljeno', 'pripisuju pogrešno', 'sumnjiv', 'maltretiran', 'slabo', 'pogrešno', 'pogrešno pripisan', 'neprovjeren', 'pogrešno priložen', 'pogrešno pripisan', 'netačan', 'pripisuje se pogrešno', 'sumnjiv'], \
'ca': ['feble', 'en disputa', 'incorrectament', 'dient incorrectament atribuït', "s'atribueix incorrectament a", 'Cotitzacions assignades', 'Misattributed', 'atribuïts incorrectament', 'connectat incorrectament', 'atribuït incorrectament a', 'controvertit', 'està assignat incorrectament', 'assignat incorrectament', 'acreditat incorrectament', 'dubtós', 'maltractat', 'pobrament', 'mal', 'acreditat incorrectament', 'no verificat', 'incorrectament adjunt', 'acreditat incorrectament', 'incorrecte', "s'atribueix a erròniament", 'sospitós'], \
'co': ['debuli', 'disputa', 'sbagliatu', 'dicendu attribuitu sbagliatu', 'sbagliatu hè attribuita à', 'Quotes assignati', 'misattribuitu', 'attribuitu sbagliatu', 'cunnessu sbagliatu', 'attribuitu sbagliatu à', 'cuntruversuale', 'hè incorrectamente assignatu', 'assignatu sbagliatu', 'creditu sbagliatu', 'dubbitu', 'MISTORATU', 'Poviru', 'sbagliatu', 'sbagliatu creditu', 'Unvererazionatu', 'sbagliatu attaccatu', 'incorrectamente creditu', 'sbagliatu', 'hè attribuita à sbaglià', 'suspicosu'],\
"cs": ['pochybný', 'nesprávně je připisován', 'je přičítán omylem', 'neosgejavané.', 'říká se nesprávně přiřazené', 'sporný', 'je nesprávně přiřazen', 'špatně', 'nesprávně připojeno', 'nesprávně', 'nezbytný', 'nesprávně přiřazeno', 'nesprávně přisuzováno', 'špatně zacházený', 'slabý', 'nesprávný', 'nesprávně připsány', 'nesprávně připsaný', 'přidělené nabídky', 'podezřelý', 'neověřené'],\
'da': ['svag', 'bestridt', 'forkert', 'siger fejlagtigt tilskrevet', 'fejlagtigt tilskrives', 'citater tildelt', 'misattributed.', 'forkert tilskrevet', 'forkert forbundet', 'forkert tilskrives', 'kontroversielt', 'er forkert tildelt', 'forkert tildelt', 'krediteret forkert', 'tvivlsom', 'mishandlet', 'Dårlig', 'forkert', 'fejlagtigt krediteret', 'unverified.', 'forkert vedhæftet', 'forkert krediteret', 'ukorrekt', 'er tilskrevet fejlagtigt', 'mistænksom'], \
"de": ['falsch verbunden', 'falsch angebracht', 'falsch zugewiesen', 'wird fehlerhaft zurückgeführt', 'schwach', 'fälschlich zugeschrieben', 'falsch zugerechnet', 'falsch wird zugeschrieben', 'falsch', 'falsch angeschlossen', 'misshandelt', 'unrecht zugeschrieben werden', 'misstrauisch', 'falsch gutgeschrieben', 'zweifelhaft', 'ist falsch zugewiesen', 'notwendig', 'zitate zugewiesen', 'nicht verifiziert'],\
'el': ['αδύναμος', 'αμφισβητούμενος', 'εσφαλμένα', 'λέγοντας εσφαλμένα αποδόσεις', 'λανθασμένα αποδίδεται σε', 'αποσπάσματα', 'απροσδόκητος', 'που αποδίδονται εσφαλμένα', 'εσφαλμένα συνδεδεμένο', 'που αποδοθεί εσφαλμένα', 'αμφιλεγόμενος', 'έχει ανατεθεί εσφαλμένα', 'εσφαλμένα αποδίδεται', 'πιστώθηκε λανθασμένα', 'αμφίβολος', 'κακομεταχειρίζομαι', 'πτωχώς', 'λανθασμένος', 'λάθος πιστώθηκε', 'ανεπιβεβαίωτος', 'Επισυνάπτεται εσφαλμένα', 'εσφαλμένα πιστώνεται', 'ανακριβής', 'αποδίδεται λανθασμένα', 'ύποπτος'],\
"en": ['weak', 'disputed', 'incorrectly', 'saying wrongly attributed', 'wrongly is attributed to', 'quotes assigned', 'misattributed', 'incorrectly attributed', 'incorrectly connected', 'incorrectly attributed to', 'controversial', 'is incorrectly assigned', 'incorrectly assigned', 'credited incorrectly', 'doubtful', 'mistreated', 'poorly', 'wrong', 'wrongly credited', 'unverified', 'incorrectly attached', 'incorrectly credited', 'incorrect', 'is attributed to mistakenly', 'suspicious'],\
"es": ['débil', 'disputado', 'incorrectamente', 'decir atribuido incorrectamente', 'atribuido incorrectamente a', 'citas asignadas', 'atribuido incorrectamente', 'atribuido incorrectamente', 'conectado incorrectamente', ' atribuido incorrectamente a ',' controvertido ',' asignado incorrectamente ',' asignado incorrectamente ',' acreditado incorrectamente ',' dudoso ',' maltratado ',' mal ',' incorrecto ',' acreditado incorrectamente ',' no verificado ', 'adjunto incorrectamente', 'acreditado incorrectamente', 'incorrecto', 'atribuido erróneamente', 'sospechoso'],\
'et': ['nõrk', 'vaidlustatud', 'valesti', 'öeldes valesti omistatud', 'valesti omistatakse', 'määratud hinnapakkumisi', 'eksima', 'valesti omistatud', 'valesti ühendatud', 'valesti omistatud', 'vastuoluline', 'on valesti määratud', 'valesti määratud', 'krediteeritud valesti', 'kahtlane', 'väärkohtlemine', 'halvasti', 'vale', 'valesti krediteeritud', 'vastamata jätmine', 'valesti kinnitatud', 'valesti krediteeritud', 'vale', 'omistatakse ekslikult', 'kahtlane'],\
'eu': ['ahul', 'jokatu', 'gaizki', 'gaizki egozten esanda', 'gaizki egozten zaio', 'esleitutako aipuak', 'Misattributatua', 'oker egotzi', 'Gaizki konektatuta', 'oker egotzita', 'Polemika', 'gaizki esleitzen da', 'gaizki esleituta', 'oker kreditua', 'zalantzazko', 'tratu txarrak', 'txarto', 'okerreko', 'gaizki kreditatu', 'irentetu gabe', 'oker erantsita', 'Gaizki kreditatu', 'ez zuzen', 'oker egozten zaio', 'goganbehartsu'],\
'fa': ['ضعیف', 'متضاد', 'نادرست', 'گفتن اشتباه است', 'اشتباه به آن نسبت داده می شود', 'نقل قول اختصاص داده شده', 'سوء تفاهم', 'نادرست نسبت داده شده است', 'نادرست متصل است', 'نادرست به', 'بحث برانگیز', 'نادرست اختصاص داده شده است', 'اشتباه اختصاص داده شده است', 'اعتبار نادرست', 'مشکوک', 'بدرفتاری', 'ضعیف', 'اشتباه', 'اشتباه اعتبار', 'غیر قابل تایید', 'اشتباه متصل شده', 'اشتباه اعتبار', 'غلط', 'به اشتباه نسبت داده شده است', 'مشکوک'],\
'fi': ['heikko', 'kiistanalainen', 'väärin', 'sanomalla väärin', 'virheellisesti johtuu', 'Lainaukset', 'huonosti', 'virheellisesti', 'Väärin kytketty', 'virheellisesti', 'kiistanalainen', 'on asetettu virheellisesti', 'Virheellisesti määritetty', 'hyvitetään väärin', 'epäilyttävä', 'kohteliaisuus', 'huonosti', 'väärä', 'Väärin hyvitetty', 'vahvistettu', 'Virheellisesti kiinnitetty', 'Virheellisesti hyvitetty', 'väärä', 'johtuu virheellisesti', 'epäilyttävä'],\
'fr': ['faible', 'contesté', 'incorrectement', 'dire attribué à tort', 'est attribué à tort à', 'citations attribuées', 'mal attribué', 'mal attribué', 'incorrectement connecté', ' attribué à tort à', 'controversé', 'est attribué de manière incorrecte', 'attribué de manière incorrecte', 'crédité de manière incorrecte', 'douteux', 'maltraité', 'mal', 'mauvais', 'crédité à tort', 'non vérifié', 'incorrectement joint', 'mal crédité', 'incorrect', 'est attribué à tort', 'suspect'],\
'he': ['חלש', 'משווקת', 'לא נכון', 'אומר מיוחסת בטעות', 'בטעות מיוחסת', 'ציטוטים שהוקצו', 'misattributed', 'המיוחס בצורה שגויה', 'קשור באופן שגוי', 'המיוחס לא נכון', 'שנוי במחלוקת', 'מוקצה באופן שגוי', 'שהוקצו באופן שגוי', 'זוכה באופן שגוי', 'מוטל בספק', 'התעללות', 'גרוע', 'שגוי', 'שזוכו בטעות', 'unverified', 'המצורפת באופן שגוי', 'זוכה לא נכון', 'לֹא נָכוֹן', 'מיוחסת לטעות בטעות', 'חָשׁוּד'], 'hi': ['कमज़ोर', 'विवादित', 'गलत तरीके से', 'गलत तरीके से कहना', 'गलत तरीके से जिम्मेदार है', 'उद्धरण सौंपा', 'गलत', 'गलत तरीके से जिम्मेदार', 'गलत तरीके से जुड़ा हुआ', 'गलत तरीके से जिम्मेदार ठहराया', 'विवादास्पद', 'गलत तरीके से सौंपा गया है', 'गलत तरीके से असाइन किया गया', 'गलत तरीके से श्रेय दिया गया', 'संदिग्ध', 'दुराचारित', 'बीमार', 'गलत', 'गलत तरीके से श्रेय दिया गया', 'असत्यापित', 'गलत तरीके से संलग्न', 'गलत तरीके से श्रेय दिया गया', 'ग़लत', 'गलती से जिम्मेदार है', 'संदेहजनक'],\
'hr': ['slab', 'osporen', 'nepravilno', 'govoreći pogrešno pripisuje se', 'pogrešno se pripisuje', 'dodijeljeni citati', 'pogrešan', 'Neispravno se pripisuje', 'pogrešno povezan', 'pogrešno pripisuje', 'kontroverzno', 'je pogrešno dodijeljen', 'pogrešno dodijeljen', 'pogrešno pripisano', 'sumnjiv', 'maltretiran', 'slabo', 'pogrešno', 'pogrešno pripisano', 'neveritičan', 'pogrešno pričvršćen', 'pogrešno pripisano', 'netočno', 'se pripisuje pogrešno', 'sumnjičav'],\
'hu': ['gyenge', 'vitatott', 'tévesen', 'rosszul mondván', 'helytelenül tulajdonítható', 'Idézetek hozzárendeltek', 'félreérthetetlen', 'helytelenül tulajdonítható', 'Helytelenül csatlakoztatva van', 'helytelenül tulajdonítható', 'vitatott', 'helytelenül hozzárendelt', 'Helytelenül hozzárendelt', 'helytelenül jóváírják', 'kétséges', 'rosszul kezelt', 'rosszul', 'rossz', 'tévesen jóváírta', 'ellenőrizetlen', 'Helytelenül csatolt', 'helytelenül jóváírta', 'helytelen', 'tévesen tulajdonítható', 'gyanús'],\
'hy': ['թույլ', 'վիճված', 'սխալ', 'սխալ ասելով, վերագրվում է', 'սխալ է վերագրվում', 'Նշված մեջբերումները', 'Մատսել է', 'Սխալ կերպով վերագրվում է', 'Սխալ միացված', 'սխալ է վերագրվել', 'վիճաբանական', 'սխալ է նշանակվել', 'Սխալ նշանակված', 'սխալվել է սխալ', 'կասկածելի', 'չարամտել', 'վատ', 'սխալ', 'սխալվել է', 'անավարտ', 'Սխալորեն կցված', 'սխալ է գնահատվել', 'սխալ', 'վերագրվում է սխալմամբ', 'կասկածելի'],\
'id': ['lemah', 'diperdebatkan', 'salah', 'mengatakan salah dikaitkan.', 'salah dikaitkan dengan', 'Kutipan ditugaskan', 'salah penyibaran', 'salah dikaitkan', 'salah terhubung', 'salah dikaitkan dengannya', 'kontroversial', 'salah ditugaskan', 'salah ditugaskan', 'dikreditkan secara salah', 'diragukan lagi', 'Dianiaya', 'buruk', 'salah', 'salah dikreditkan', 'tidak diverifikasi', 'salah melekat', 'salah dikreditkan', 'salah', 'dikaitkan dengan keliru', 'mencurigakan'],\
'is': ['veik', 'umdeildur', 'rangt', 'segja að ranglega rekja til', 'rangt stafar af', 'Tilvitnanir úthlutað', 'misertributed.', 'rangt rekja má', 'rangt tengt', 'rangt rekja til', 'umdeild', 'er rangt úthlutað', 'rangt úthlutað', 'lögð rangt', 'efast', 'mistreated.', 'illa', 'rangt', 'ranglega lögð inn', 'unverfied.', 'rangt fylgir', 'Rangt viðurkennt', 'rangt', 'er rekja til ranglega', 'grunsamlegt'],\
'it': ['debole', 'disputato', 'erroneamente', 'detto erroneamente attribuito', 'erroneamente attribuito a', 'virgolette assegnate', 'erroneamente attribuito', 'erroneamente attribuito', 'erroneamente connesso', ' erroneamente attribuito a', 'controverso', 'è assegnato in modo errato', 'assegnato in modo errato', 'accreditato in modo errato', 'dubbio', 'maltrattato', 'male', 'sbagliato', 'accreditato erroneamente', 'non verificato', 'erroneamente allegato', 'erroneamente accreditato', 'errato', 'è attribuito a erroneamente', 'sospetto'],\
'ja': ['弱い', '議論した', '誤って', '間違って帰ったことを言っています', '間違って帰属しています', '割り当てられた引用符', '誤動作しました', '間違って帰属しました', '誤って接続されています', '誤って帰属しました', '物議を醸す', '間違って割り当てられています', '間違って割り当てられています', '誤って入金されました', '疑わしい', '虐待された', '不完全に', '間違い', '間違ってクレジットされました', '未検証', '誤って添付されています', '誤ってクレジットされました', '正しくない', '誤って帰属されています', '疑わしい'],\
'ka': ['სუსტი', 'სადავო', 'არასწორად', 'არასწორად მიეკუთვნება', 'არასწორად მიეკუთვნება', 'შეთავაზებები', 'misattributed', 'არასწორად მიეკუთვნება', 'არასწორად უკავშირდება', 'არასწორად მიეკუთვნება', 'დროებითი', 'არასწორად არის მინიჭებული', 'არასწორად მინიჭებული', 'არასწორად დაკრედიტდება', 'საეჭვო', 'mistreated', 'ღარიბად', 'მცდარი', 'არასწორად დაკრედიტდება', 'გადაუსებული', 'არასწორად ერთვის', 'არასწორად დაკრედიტდება', 'არასწორი', 'შეცდომით მიეკუთვნება', 'საეჭვო'],\
'ko': ['약한', '분쟁', '틀리게', '잘못된 것으로 말하고있다', '잘못된 것은', '할당 된 따옴표', '미해시', '잘못된 것으로 잘못된 것입니다', '잘못 연결되었습니다', '잘못된 것으로 잘못된 것입니다', '논란이 많은', '잘못 지정됩니다', '잘못 지정되었습니다', '잘못 적립되었습니다', '불안한', '학대하다', '신통치 않게', '잘못된', '잘못된 적립 된 것', '확인되지 않았습니다', '잘못 첨부되었습니다', '잘못 적립되었습니다', '잘못된', '실수로 기인합니다', '의심스러운'],\
'lt': ['Silpnas', 'ginčijama', 'Neteisingai', 'sakydamas neteisingai priskirtas', 'neteisingai priskiriama', 'Citatos', 'nesuderinta', 'neteisingai priskiriama', 'neteisingai prijungta', 'neteisingai priskirta', 'prieštaringas', 'yra neteisingai priskirtas', 'neteisingai priskirtas', 'neteisingai įskaityta', 'abejotina', 'netinkamai elgiamasi', 'blogai', 'neteisingas', 'neteisingai įskaityta', 'nepatvirtinta', 'neteisingai prijungtas', 'neteisingai įskaityta', 'Neteisinga', 'priskiriama klaidingai', 'įtartinas'],\
'nl': ['zwak', 'twijfelachtig', 'onjuist', 'Samenstellen ten onrechte toegeschreven', 'ten onrechte wordt toegeschreven aan', 'Citaten toegewezen', 'verkeerd ingesteld', 'Onjuist toegeschreven', 'Onjuist aangesloten', 'onjuist toegeschreven aan', 'controverseel', 'is verkeerd toegewezen', 'Onjuist toegewezen', 'verkeerd gecrediteerd', 'twijfelachtig', 'mishandeld', 'slecht', 'mis', 'ten onrechte gecrediteerd', 'ongehroken', 'verkeerd bevestigd', 'onjuist gecrediteerd', 'niet correct', 'wordt toegeschreven aan ten onrechte', 'verdacht'],\
'no': ['svak', 'omstridt', 'feil', 'sier feilaktig tilskrives det', 'feil er tilskrevet', 'Sitater tildelt', 'misattributed.', 'feilaktig tilskrives det', 'feil tilkoblet', 'feilaktig tilskrives', 'kontroversiell', 'er feil tildelt', 'feilaktig tildelt', 'krediteres feil', 'tvilsom', 'feilbehandlet', 'dårlig', 'feil', 'feil kreditert', 'unverified.', 'feil festet', 'feil kreditert', 'stemmer ikke', 'er tilskrevet feilaktig', 'mistenkelig'],\
'ro': ['slab', 'contestată', 'incorect', 'spunând atribuit greșit', 'este atribuit în mod greșit', 'Citate atribuite', 'misattribuit', 'incorect atribuită', 'incorect conectat', 'incorect atribuită', 'controversat', 'este atribuită incorect', 'incorect atribuite', 'creditat incorect', 'îndoielnic', 'maltratat', 'slab', 'gresit', 'creditat greșit', 'neveriectificat', 'În mod incorect atașat', 'incorect creditate', 'incorect', 'este atribuită în mod eronat', 'suspicios'],\
'ru': ['слабый', 'оспариваемый', 'неправильно', 'говорить неправильно приписанным', 'неправильно объясняется', 'цитаты назначены', 'несущественно', 'неправильно приписан', 'неправильно подключен', 'неправильно приписан', 'спорный', 'неверно назначен', 'неверно назначен', 'зачислен неправильно', 'сомнительный', 'плохо обращаться', 'плохо', 'неправильный', 'неправильно приписывать', 'неверно', 'неправильно прилагается', 'неправильно зачислено', 'неверный', 'приписывается по ошибке', 'подозрительный'],\
'sk': ['slabý', 'sporný', 'nesprávne', 'hovorí nesprávne pripisované', 'nesprávne sa pripisuje', 'Pridelené citácie', 'nesprávny', 'Nesprávne pripísané', 'Nesprávne pripojené', 'nesprávne pripísané', 'kontroverzný', 'je nesprávne priradený', 'Nesprávne priradené', 'nesprávne pripísané', 'pochybný', 'nespokojný', 'úboho', 'vhodný', 'nesprávne pripísané', 'neoverený', 'Nesprávne pripojené', 'Nesprávne pripísané', 'nesprávny', 'sa pripisuje mylne', 'podozrivý'],\
"sl": ["neozdrojované"'napačno prijavljeno', 'rekel napačno pripisano', 'napačno nakazana', 'napačno povezan', 'slabo', 'sumljivega', 'nepravilno dodeljena', 'neosgejavan.', 'dodeljeni citati', 'sporno', 'nepravilno pritrjena', 'nepreverjeno', 'napačno', 'je nepravilno dodeljen', 'nepravilno', 'napačno pripisano', 'se pripisuje pomotoma', 'in pavipe.', 'napačno pripisuje', 'dvomljiv', 'šibko', 'narobe', 'nepravilno pripisana'],\
"sq": ['i diskutueshëm', 'atribuohet gabimisht', 'i keqtrajtuar', 'i atribuohet gabimisht', 'i pasaktë', 'kredituar gabimisht', 'caktohet gabimisht', 'i lidhur gabimisht', 'i dyshimtë', 'i pavepi', 'i gabuar', 'thënie të atribuara gabimisht', 'bashkangjitur gabimisht', 'dobet'],\
"pl": ['zło', 'błędny', 'misattriruted.', 'źle traktować', 'słabo', 'wątpliwy', 'nieprawidłowo przymocowany', 'nieprawidłowo przypisany do', 'niepoprawnie przypisany', 'niepoprawnie połączony', 'mówiąc błędnie przypisany', 'kwestionować', 'cytaty przypisywane', 'niesprawdzony', 'błędnie przypisany', 'nieprawidłowo przypisany'], \
'pt': ['fraca', 'contestada', 'incorretamente', 'dizendo atribuída incorretamente', 'atribuída incorretamente a', 'citações atribuídas', 'atribuída incorretamente', 'atribuída incorretamente', 'conectada incorretamente', ' atribuído incorretamente a ',' controverso ',' atribuído incorretamente ',' atribuído incorretamente ',' creditado incorretamente ',' duvidoso ',' maltratado ',' mal ',' errado ',' creditado incorretamente ',' não verificado ', 'incorretamente anexado', 'incorretamente creditado', 'incorreto', 'atribuído a incorretamente', 'suspeito'], \
'ta': ['பலவீனமான', 'விவாதத்திற்குரியது', 'தவறாக', 'தவறாக சொல்லப்பட்டது', 'தவறாக காரணம்', 'மேற்கோள் ஒதுக்கப்படும்', 'misattributed.', 'தவறாக காரணம்', 'தவறாக இணைக்கப்பட்டுள்ளது', 'தவறாக காரணம்', 'சர்ச்சைக்குரிய', 'தவறாக ஒதுக்கப்பட்டுள்ளது', 'தவறாக ஒதுக்கப்படும்', 'தவறாக வழங்கப்பட்டது', 'சந்தேகம்', 'தவறாக நடத்தப்பட்டது', 'மோசமாக', 'தவறு', 'தவறாக வரவு', 'சரிபார்க்கப்படவில்லை', 'தவறாக இணைக்கப்பட்டுள்ளது', 'தவறாக நம்பப்படுகிறது', 'தவறானது', 'தவறுதலாக காரணம்', 'சந்தேகத்திற்கிடமான'],\
'te': ['బలహీనమైన', 'వివాదాస్పదంగా', 'తప్పుగా', 'తప్పుగా ఆపాదించబడినది', 'తప్పుగా ఆపాదించబడినది', 'కేటాయించిన కోట్స్', 'myatattributed', 'తప్పుగా ఆపాదించబడినది', 'తప్పుగా కనెక్ట్ చేయబడింది', 'తప్పుగా ఆపాదించబడినది', 'వివాదాస్పద', 'తప్పుగా కేటాయించబడుతుంది', 'తప్పుగా కేటాయించబడింది', 'తప్పుగా జమ చేయబడుతుంది', 'అనుమానాస్పద', 'బాధితుడు', 'పేలవంగా', 'తప్పు', 'తప్పుగా ఘనత పొందింది', 'ధృవీకరించనిది', 'తప్పుగా జతచేయబడింది', 'తప్పుగా ఘనత పొందింది', 'తప్పు', 'తప్పుగా ఆపాదించబడింది', 'అనుమానాస్పద'],\
'uk': ['слабкий', 'спірний', 'неправильно', 'кажучи неправильно віднесено', 'неправильно пояснюється', 'Призначені цитати', 'мізерний', 'неправильно віднесено', 'неправильно підключено', 'неправильно віднесено', 'суперечливий', 'неправильно призначено', 'неправильно призначено', 'неправильно приписується', 'сумнівний', 'погано', 'погано', 'неправильний', 'неправильно зарахований', 'неперевірений', 'неправильно прикріплені', 'неправильно зараховано', 'неправильний', 'пояснюється помилково', 'підозрілий'],\
'ur': ['کمزور', 'متنازعہ', 'غلط طور پر', 'غلط طور پر منسوب کیا گیا ہے', 'غلط طور پر منسوب کیا جاتا ہے', 'حوالہ جات', 'غلط استعمال کی اطلاع دیتے ہوئے ایرر آ گیا ہے', 'غلط طور پر منسوب', 'غلط طور پر منسلک', 'غلط طور پر منسوب', 'متضاد', 'غلط طور پر تفویض کیا جاتا ہے', 'غلط طور پر تفویض', 'غلط طریقے سے کریڈٹ', 'شکست', 'غلطی', 'غریب', 'غلط', 'غلط طور پر کریڈٹ', 'غیر تصدیق شدہ', 'غلط طریقے سے منسلک', 'غلط طریقے سے کریڈٹ', 'غلط', 'غلطی سے منسوب کیا جاتا ہے', 'مشکوک'],\
'vi': ['Yếu', 'tranh chấp', 'không chính xác', 'nói sai quy kết', 'sai được quy cho', 'Báo giá được giao', 'sai lệch', 'quy cho không chính xác', 'kết nối không chính xác', 'quy cho không chính xác cho.', 'gây tranh cãi', 'được giao không chính xác', 'chỉ định không chính xác', 'ghi có không chính xác', 'nghi ngờ', 'ngược đãi', 'kém', 'Sai lầm', 'Tín dụng sai', 'chưa được xác minh', 'đính kèm không chính xác', 'Credited không chính xác', 'không đúng', 'được quy cho nhầm', 'khả nghi'],\
'zh': ['弱', '有争议', '不正确', '错误归因', '错误归因于', '引用分配', '错误归因', '错误归因', '错误连接', ' 错误地归因于', '有争议的', '被错误地分配', '错误地分配','记入错误','可疑','虐待','差','错误','错误记入','未验证', '错误附加','错误记入','错误','归因于错误','可疑']
}
#attributed? Neověřené disputed
# to be checked: Djela, Obras, Povedali o
forbidden_by_language = {
"ar" : ["قالوا عنه","قالوا عنه","أشهر مؤلفاتها","الوصلات الخارجية"],\
"az" : ["İstinadlar","Mənbə","Xarici keçidlər","Haqqında deyilənlər","istinadlar"],\
"be":["Выказванні пра", "зноскі","спасылкі"],\
"bg":["За нея","за него","Източници","Бележки","Външни препратки","литература"],\
"bs":["Drugi o njemu","Djela","Također pogledajte","Vanjski linkovi","Izdanja"],\
"ca":["citacions sobre","Referències","Bibliografia","Enllaços externs","referències"],\
"co":["daveoù"],\
"cs":["ve výrocích","Reference","Externí odkazy","Související"],\
"da":["Eksterne henvisninger","Kilder"],\
"de":["zitate mit bezug auf", ],\
"el":["εξωτερικοί σύνδεσμοι"],\
"es":["sobre", "Obras", "Véase también", "Bibliografía","referencias"],\
"et":["välislingid"],\
"en":["quotes about", "filmography", "footnote", "sources", "resources", "other projects","external links","links",\
"notes", "note", "weblinks", "bibliogprahy", "related items","works", "references","literature","see","see also",\
"footnote","other projects"],\
"eu":["Kanpo loturak","Erreferentziak"],\
"fa":["دربارهٔ او","پیوند به بیرون","جستارهای وابسته","منبعدار", "منابع","پیوند بهبیرون"],\
"fi":["sanottua","lähteet"],\
"fr":["sur "],\
"he":["על עצמה", "נאמר עליה","מקורות","קישורים חיצוניים","נאמר עליו","ראו גם"],\
"hi":["बाहरी कडियाँ"],\
"hr":["vanjske poveznice"],\
"hu":["róla mondták","külső hivatkozások","Művei"],\
"hy":["Աղբյուրներ","Ծանոթագրություններ","ծանոթագրություններ"],\
"is":["tenglar"],\
"id":["pranala luar"],\
"it":["citazioni su","Doppiaggio","film","filmografia","altri progetti","voci correlate"], \
"ja":["外部リンク"],\
"ka":["რესურსები ინტერნეტში"],\
"ko":["각주","관련 어록"],\
"lt":["nuorodos"],\
"nl":["over "], \
"no":["eksterne lenker","referanser"],\
"pl":["zobacz też","o "],\
"pt":["obras", "sobre","Ligações externas"],\
"ro":["legături externe","despre"],\
"ru":["Об","Фильмография","примечания","ссылки", "см. также"],\
"sk":["Povedali o","iné projekty","referencie"],\
"sl":["viri","sklici"],\
"sq":["Thënie për të","Referimet","Shiko edhe","lidhje të jashtme","referime"],\
"ta":["வெளி இணைப்புகள்","சான்றுகள்"],\
"te":["మూలాలు"],\
"tr":["Hakkında","kaynakça"],\
"uk":["Про","Джерела","примітки","література"],\
"ur":["حوالہ جات"],\
"vi":["Liên kết ngoài","notennoù"],\
"zh":["外部链接","参见","参考文献"]
}
forbidden_by_language["ar"] = ["قالوا عنه", "قالوا عنه", "أشهر مؤلفاتها", "الوصلات الخارجية", "انظر أيضا إلى", "فهرس", "ويعمل", "ملحوظة", "المرجعي", "آخر حول هذا الموضوع", "في الذكرى", "قالوا عن ذلك", "فيلموجرافيا.", "قوله", "روابط", "قالوا عنه", "يقال عن", "يقتبس", "رابط ل", "الإحالات", "الأكثر شهرة الكتب", "الفمود الخارجي", "وصلات خارجية", "مصادر:", "عنه", "استفسارات الاعتماد", "المرجعي", "دبلجة", "فيلموجرافيا.", "له", "في", "روابط خارجية", "يلعب", "هامش", "قيل عنه", "ويعمل", "يلعب", "على", "مشاريع أخرى", "عن", "عنها", "موارد", "رابط خارجي", "المراجع", "مصادر", "قيل عنها", "الحواشي", "المراجع الخارجية", "الأصناف ذات الصلة", "مصدر", "ملحوظات:", "روابط", "لها", "إطلاق", "الشهادات - التوصيات", "ملحوظات", "قل", "الموارد في الإنترنت", "أنظر أيضا", "daveoù.", "رابط إلى الخارج", "عنه", "أنظر أيضا", "فيلم", "تشغيل", "مراجع", "قالوا O.", "متعلق ب", "رابط خارجي", "بيانات حول", "حول", "الاستشهادات المذكورة أعلاه", "مصادر", "سفير", "يقال له", "المؤلفات", "حول نفسها", "روابط خارجية", "التطبيقات ذات الصلة", "ونقلت فيما يتعلق", "ارى", "على", "الروابط الزائدة", "ونقلت حول", "فيلموجرافيا.", "هامش", "مصادر", "مصادر", "مشاريع أخرى", "روابط خارجية", "روابط", "ملحوظات", "ملاحظة", "روابط انترنت", "فهرس", "الأصناف ذات الصلة", "ويعمل", "المراجع", "المؤلفات", "ارى", "أنظر أيضا", "هامش", "مشاريع أخرى"]
forbidden_by_language["az"] = ["İstinadlar", "Mənbə", "Xarici keçidlər", "Haqqında deyilənlər", "istinadlar", "Də baxmaq", "Biblioqrafiya", "əsər", "Əlamətdar", "istinad", "Bu barədə başqa bir şey", "Yubileydə", "Bu barədə dedilər", "Filmoqrafiya", "Deyən", "linklər", "Onun haqqında dedilər", "Haqqında deyilir", "sitat gətirən", "Birləşdirmək", "refertral", "Ən məşhur kitablar", "Xarici TADS", "Xarici əlaqələr", "Mənbələr:", "onun haqqında", "sualları asılı idi", "İstinad", "Dublying", "filmoqrafiya", "onun üçün", "O", "xarici linklər", "pyes", "izdihamlı", "Onun haqqında deyildi", "Əsər", "Pyes", "üstünə", "Digər layihələr", "Haqqında", "onun haqqında", "Resurslar", "Xarici əlaqə", "arayışlar", "Mənbələr", "Onun haqqında deyildi", "izahat", "Xarici İstinadlar", "Oxşar əşyalar", "Mənbəyi", "Qeydlər:", "Linklər", "Onun üçün", "Buraxılış", "Şöhrətli", "Qeydlər", "ərz etmək", "İnternetdəki mənbələr", "Həmçinin bax", "daveoùù", "Xarici ilə əlaqə", "Onun haqqında", "həmçinin bax", "filmə", "yan", "Arayışlar", "Dedilər.", "Bahalı", "xarici əlaqə", "Haqqında ifadələr", "haqqında", "Yuxarıdakı sitatlar", "mənbələr", "Səfir", "Ona deyilir", "ədəbiyyat", "haqqında özü haqqında", "xarici linklər", "Əlaqədar tətbiqlər", "Hörmətlə sitatlar", "Görmək", "artıq", "Həddindən artıq bağlantılar", "haqqında sitatlar", "filmoqrafiya", "izdihamlı", "mənbələr", "resurslar", "Digər layihələr", "xarici linklər", "linklər", "qeydlər", "Qeyd", "vaklar", "biblioqrafiya", "Oxşar əşyalar", "əsər", "arayışlar", "ədəbiyyat", "görmək", "həmçinin bax", "izdihamlı", "Digər layihələr"]
forbidden_by_language["be"] = ["Выказванні пра", "зноскі", "спасылкі", "Таксама глядзець на", "Бібліяграфія", "работы", "нотенно", "спасылка", "Іншы пра гэта", "У гадавіне", "Яны сказалі пра гэта", "Фільмаграфія", "Кажучы", "спасылкі", "Яны сказалі пра яго", "Кажуць пра", "каціроўкі", "Спасылка на", "рэфералы", "Самыя вядомыя кнігі", "Знешнія казалі", "Знешнія злучэння", "Крыніцы:", "пра яго", "Залежныя запыты", "Спасылка", "Нядбайнны", "фільмаграфія", "для яго", "Аб", "Знешнія спасылкі", "п'есы", "знос", "было сказана пра яго", "Работы", "П'есы", "на", "Іншыя праекты", "Пра", "пра яе", "Рэсурсы", "Знешняя спасылка", "рэкамендацыі", "Крыніцы", "Было сказана пра яе", "знос", "Знешнія спасылкі", "Звязаныя элементы", "Крыніца", "Нататкі:", "Спасылкі", "Для яе", "Верхі", "Водгукі", "Нататкі", "гаварыць", "Рэсурсы ў Інтэрнэце", "См таксама", "Спасылка на вонкавым боку", "Пра яго", "см таксама", "плёнка", "на", "Рэкамендацыі", "Яны сказалі, што О.", "Звязаны", "знешняя спасылка", "Заявы аб", "пра", "Цытаты вышэй", "крыніцы", "літаратура", "знешнія спасылкі", "Звязаныя з прыкладаннямі", "Каціроўкі ў адносінах да", "Бачыць", "больш", "Лішак спасылкі", "цытаты аб", "фільмаграфія", "крыніцы", "рэсурсы", "іншыя праекты", "знешнія спасылкі", "спасылкі", "нататкі", "нататка", "weblinks", "бібліяграфія", "Звязаныя элементы", "работы", "рэкамендацыі", "літаратура", "бачыць", "см таксама", "іншыя праекты"]
forbidden_by_language["bg"] = ["За нея", "за него", "Източници", "Бележки", "Външни препратки", "литература", "Също погледнете", "Библиография", "върши работа", "Бележит", "справка", "Друг за него", "в годишнината", "Те казаха за това", "Филмография", "Да се каже", "Връзки", "Те казаха за него", "Се казва", "Връзка към", "Реферали", "Най-известните книги", "Външни тади", "Външни връзки", "Източници:", "за него", "Запитвания", "Справка", "Дублиране", "Филмография", "за него", "О", "външни връзки", "Играе", "Бележка под линия", "Беше казано за него", "Върши работа", "Играе", "в", "Други проекти", "относно", "за нея", "Ресурси", "Външен линк", "препратки", "Източници", "Беше казано за нея", "Бележки под линия", "Външни препратки", "Подобни продукти", "Източник", "Забележки:", "Връзки", "За нея", "Освобождаване", "Отзиви", "Бележки", "казвам", "Ресурси в Интернет", "Вижте също", "Дъстина", "Връзка с външната страна", "За него", "Вижте също", "филм", "На", "Препратки", "Те казаха О.", "Свързани", "външен линк", "Изявления за", "относно", "Цитати над", "Източници", "Посланик", "Му се казва", "Литература", "за себе си", "външни връзки", "Свързани приложения", "Цитати по отношение на", "Вж", "над", "Излишните връзки", "цитати за", "Филмография", "Бележка под линия", "Източници", "Ресурси", "Други проекти", "външни връзки", "Връзки", "Бележки", "Забележка", "WeBlinks.", "Библиография", "подобни продукти", "върши работа", "препратки", "Литература", "вж", "Вижте също", "Бележка под линия", "Други проекти"]
forbidden_by_language["bs"] = ["Drugi o njemu", "Djela", "Također pogledajte", "Vanjski linkovi", "Izdanja", "Takođe pogledajte", "Bibliografija", "radovi", "Primijetan", "referenca", "Još jedan o tome", "u godišnjici", "Rekli su o tome", "Filmografija", "Govoreći", "linkove", "Rekli su o njemu", "Su rekli o", "citati", "Link na", "preporuke", "Najpoznatije knjige", "Vanjski tads", "Vanjske veze", "Izvori:", "o njemu", "Zavito upiti", "Referenca", "Presnimav", "Filmografija", "za njega", "O", "Vanjske veze", "igra", "fusnota", "Rečeno je o njemu", "Radovi", "Igra", "na", "Ostali projekti", "O", "o njoj", "Resursi", "Vanjska veza", "reference", "Izvori", "Rečeno je o njoj", "fusnote", "Vanjske reference", "Srodni predmeti", "Izvor", "Napomene:", "Linkove", "Za nju", "Izdanja", "Testimonials", "Bilješke", "izgovoriti", "Resursi na Internetu", "Vidjeti i", "Daveoù", "Veza sa spolja", "O njemu", "vidjeti i", "film", "na", "Reference", "Rekli su O.", "Povezani", "Vanjska veza", "Izjave o", "o", "Citati gore", "izvori", "Ambasador", "Kaže mu", "literatura", "o sebi", "Vanjske veze", "Srodne aplikacije", "Citati u odnosu na", "Vidjeti", "preko", "Višak veze", "citati o", "Filmografija", "fusnota", "izvori", "resursi", "Ostali projekti", "Vanjske veze", "linkove", "bilješke", "Bilješka", "Webliks", "bibliografija", "Srodni predmeti", "radovi", "reference", "literatura", "vidjeti", "vidjeti i", "fusnota", "Ostali projekti"]
forbidden_by_language["ca"] = ["citacions sobre", "Referències", "Bibliografia", "Enllaços externs", "referències", "També mireu", "Bibliografia", "treballa", "Notable", "referència", "Un altre sobre això", "En l'aniversari", "Van dir sobre això", "Filtrografia", "Dient", "enllaç", "Van dir sobre ell", "Es diu sobre", "cites", "Enllaç a", "referències", "Els llibres més famosos", "Tads exteriors", "Connexions externes", "Fonts:", "sobre ell", "Consultes dependents", "Referència", "% De comportament", "filtrografia", "per ell", "O a", "Enllaços externs", "obert", "Nota al peu", "Es va dir sobre ell", "Treballa", "Obert", "a sobre de", "Altres projectes", "Sobre", "sobre ella", "Recursos", "Enllaç extern", "referències", "Fonts", "Es va dir sobre ella", "Notes al peu de pàgina", "Referències externes", "Articles relacionats", "Font", "NOTES:", "Enllaç", "Per ella", "Llançaments", "Testimonis", "Notes", "dir", "Recursos a Internet", "Vegeu també", "daveoù", "Enllaç a l'exterior", "Sobre ell", "Vegeu també", "pel·lícula", "conectada", "Referències", "Van dir O.", "Relacionada", "Enllaç extern", "Declaracions sobre", "Sobre", "Cites anteriors", "fonts", "Ambaixador", "Se li diu", "literatura", "sobre ella mateixa", "Enllaços externs", "Aplicacions relacionades", "Cites respecte a", "Veure", "sobrar", "Enllaços d'excés", "cites sobre", "filtrografia", "Nota al peu", "fonts", "recursos", "Altres projectes", "Enllaços externs", "enllaç", "notes", "nota", "Weblinks", "bibliografia", "Articles relacionats", "treballa", "referències", "literatura", "veure", "Vegeu també", "Nota al peu", "Altres projectes"]
forbidden_by_language["co"] = ["daveoù", "Fighjà ancu", "Bibliografia", "FUNZIONI", "Notabile", "Riferimentu", "Un altru nantu à questu", "In l'anniversariu", "Anu dettu di questu", "Filmografia", "Dicendu à", "Ligami", "Anu dettu di ellu", "Sò dettu di circa", "Ligame cù", "I referenze", "I libri più famosi", "Tadri esterni", "Cunnessioni esterni", "FONTI:", "circa ellu", "Quistioni dipendenti", "Riferimentu", "Dubaghju", "Filmografia", "per ellu", "O", "Ligami esterni", "Ghjucà", "nota di nota", "si dicia di ellu", "FUNZIONI", "Ghjucà", "à", "Altri prughjetti", "Circa à", "circa ella", "Risorse", "Link esternu", "Riferimenti", "Fonti", "Si dicia di ella", "Testrootes", "Riferimenti esterni", "Oggetti Relativi", "Fonte", "NOTI:", "Ligami", "Per ella", "Release", "Testimonianza", "Note", "dì", "Risorse in Internet", "Vede ancu", "daveoù", "Ligame à l'esterno", "Circa ellu", "vede ancu", "film", "avanti", "Riferimenti", "Anu dettu O.", "Ligatu", "Link esternu", "Dichjarazioni circa", "circa à", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice à ellu", "Letteratura", "circa ella stessu", "ligami esterni", "Applicazioni ligate", "Quotes cun rispettu à", "Vede", "finitu", "Ligami d'uccasioni", "citazioni circa", "Filmografia", "nota di nota", "fonti", "Risorse", "altri prughjetti", "ligami esterni", "Ligami", "Note", "Nota", "weblinks", "bibliografia", "Oggetti Relativi", "FUNZIONI", "Riferimenti", "Letteratura", "vede", "vede ancu", "nota di nota", "altri prughjetti"]
forbidden_by_language["cs"] = ["ve výrocích", "Reference", "Externí odkazy", "Související", "Také se podívejte na", "Bibliografie", "práce", "Pozoruhodný", "odkaz", "Další o tom", "v výročí", "Řekli o tom", "Filmografie", "Říkat", "Odkazy", "Řekli o něm", "Říkají se asi", "citáty", "Odkaz na", "odkazy", "Nejznámější knihy", "Vnější Tads.", "Externí připojení", "Prameny:", "o něm", "Závislé dotazy", "Odkaz", "Dabing", "filmografie", "pro něj", "Ó", "externí odkazy", "hra", "poznámka pod čarou", "Řekl to o něm", "Práce", "Hra", "na", "Další projekty", "O", "o ní", "Zdroje", "Externí odkaz", "Reference", "Prameny", "Řekl to o ní", "poznámky pod čarou", "Externí odkazy", "Související zboží", "Zdroj", "Poznámky:", "Odkazy", "Pro ni", "Releases", "Svědectví", "Poznámky", "říci", "Zdroje v Internetu", "Viz také", "daveoù.", "Odkaz na vnější stranu", "O něm", "viz také", "film", "na", "Reference", "Řekli O.", "Příbuzný", "Externí odkaz", "Výkazy", "o", "Citace výše", "prameny", "Velvyslanec", "Říká se mu", "literatura", "o sobě", "externí odkazy", "Související aplikace", "S ohledem na", "Vidět", "přes", "Přebytečné odkazy", "cituje", "filmografie", "poznámka pod čarou", "prameny", "zdroje", "Další projekty", "externí odkazy", "Odkazy", "poznámky", "Poznámka", "webové odkazy", "bibliografie", "Související zboží", "práce", "Reference", "literatura", "vidět", "viz také", "poznámka pod čarou", "Další projekty"]
forbidden_by_language["da"] = ["Eksterne henvisninger", "Kilder", "Se også på", "Bibliografi.", "arbejder", "Bemærkelsesværdig", "reference", "En anden om det", "i jubilæet.", "de sagde om det", "Filmografi.", "Siger til", "links.", "De sagde om ham", "Er sagt omkring", "citater", "Link til", "henvisninger.", "De mest berømte bøger", "Ydre tads.", "Eksterne forbindelser", "Kilder:", "om ham", "Afhængige forespørgsler", "Reference", "Dubbing.", "Filmografi.", "For ham", "O.", "eksterne links", "spiller.", "fodnote.", "Det blev sagt om ham", "Arbejder", "Spiller.", "på", "Andre projekter", "Om", "om hende", "Ressourcer.", "Eksternt link", "Referencer.", "Kilder.", "Det blev sagt om hende", "fodnoter.", "Eksterne referencer.", "Relaterede elementer.", "Kilde", "Noter:", "Links.", "For hende", "Udgivelser.", "Testimonials.", "Noter.", "sige", "Ressourcer på internettet", "Se også", "daveoù.", "Link til ydersiden", "Om ham", "se også", "film", "på", "Referencer.", "De sagde O.", "Relaterede", "Eksternt link", "Udsagn om", "om", "Citater ovenfor", "Kilder.", "Ambassadør", "Det siges til ham", "litteratur", "om sig selv.", "eksterne links", "Relaterede applikationer", "Citater med hensyn til", "Se", "over", "Overskydende links.", "citater om", "Filmografi.", "fodnote.", "Kilder.", "ressourcer.", "andre projekter", "eksterne links", "links.", "noter.", "Bemærk", "Weblinks.", "bibliografi", "relaterede elementer.", "arbejder", "Referencer.", "litteratur", "se", "se også", "fodnote.", "andre projekter"]
forbidden_by_language["de"] = ["Zitate über", "Filmografie", "Fußnote", "Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Literaturverzeichnis", "verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Fußnote", "andere Projekte", "Auch anschauen", "Bibliographie", "Werke", "Bemerkenswert", "Referenz", "Noch einer darüber", "im Jubiläum", "Sie sagten darüber", "Filmografie", "Sagen zu", "Links", "Sie sagten über ihn", "Sind sagte über", "Zitate", "Link zu", "Empfehlungen", "Die berühmtesten Bücher", "Outer tads", "Externe Verbindungen", "Quellen:", "über ihn", "Abhängige Anfragen", " Referenz", "Synchronisation", "Filmografie", "für ihn", "O", "Externe Links", "Spiele", "Fußnote", "es wurde über ihn gesagt", "Werke", "Spiele", " auf", "Andere Projekte", "Über", "Über sie", "Ressourcen", "Externer Link", "Referenzen", "Quellen", "Es wurde über sie gesagt", "Fußnoten", "Externe Verweise", "Verwandte Artikel", "Quelle", "Notizen:", "Links", "Für sie", "Veröffentlichungen", "Testimonials", "Nicht es", "sagen", "Ressourcen im Internet", "Siehe auch", "daveoù", "Link nach außen", "Über ihn", "Siehe auch", "Film", "on", "Referenzen", "Sie sagten O.", "Verwandte", "externer Link", "Aussagen über", "über", "Zitate oben", "Quellen", "Botschafter", "Es wird ihm gesagt", "Literatur", "über sich selbst", "externe Links", "Verwandte Anwendungen", "Zitate in Bezug auf", "Siehe", "über", "Überzählige Links", "Zitate über", "Filmografie", "Fußnote", " Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Bibliographie", "Verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Fußnote", "andere Projekte"]
forbidden_by_language["el"] = ["εξωτερικοί σύνδεσμοι", "Επίσης κοιτάξτε", "Βιβλιογραφία", "έργα", "Αξιοσημείωτος", "αναφορά", "Ένα άλλο για αυτό", "Στην επέτειο", "είπαν γι 'αυτό", "Φωτοτυπογραφία", "Λέγοντας", "συνδέσεις", "Είπαν γι 'αυτόν", "Λέγονται", "αποσπάσματα", "Συνδέω με", "παραπομπές", "Τα πιο διάσημα βιβλία", "Εξωτερικά μαύρα", "Εξωτερικές συνδέσεις", "Πηγές:", "για αυτόν", "εξαρτώμενα ερωτήματα", "Αναφορά", "Μεταγλώ", "φωτοτυπογραφία", "για εκείνον", "O", "εξωτερικοί σύνδεσμοι", "παίζει", "υποσημείωση", "Είχε ειπωθεί γι 'αυτόν", "Εργα", "Παίζει", "επάνω σε", "Άλλα έργα", "Σχετικά με", "σχετικά με αυτήν", "Πόροι", "Εξωτερικός σύνδεσμος", "βιβλιογραφικές αναφορές", "Πηγές", "Είχε ειπωθεί γι 'αυτήν", "υποσημειώσεις", "Εξωτερικές αναφορές", "Σχετικά Αντικείμενα", "Πηγή", "Σημειώσεις:", "Συνδέσεις", "Για εκείνη", "Απελευθερώνει", "Μαρτυρίες", "Σημειώνει", "λένε", "Πόροι στο Διαδίκτυο", "Δείτε επίσης", "daveoù", "Σύνδεσμος προς το εξωτερικό", "Για αυτόν", "δείτε επίσης", "ταινία", "επί", "βιβλιογραφικές αναφορές", "Είπαν Ο.", "Σχετίζεται με", "εξωτερικός σύνδεσμος", "Δηλώσεις σχετικά με", "σχετικά με", "Παραπάνω αναφορές", "πηγές", "Πρεσβευτής", "Του λέγεται", "λογοτεχνία", "Σχετικά με τον εαυτό της", "εξωτερικοί σύνδεσμοι", "Σχετικές εφαρμογές", "Αποσπάσματα σε σχέση με", "Βλέπω", "πάνω από", "Υπερβολικοί σύνδεσμοι", "αποσπάσματα περίπου", "φωτοτυπογραφία", "υποσημείωση", "πηγές", "πόροι", "Άλλα έργα", "εξωτερικοί σύνδεσμοι", "συνδέσεις", "σημειώνει", "Σημείωση", "διαδικτυακοί σύνδεσμοι", "βιβλιογραφία", "Σχετικά Αντικείμενα", "έργα", "βιβλιογραφικές αναφορές", "λογοτεχνία", "βλέπω", "δείτε επίσης", "υποσημείωση", "Άλλα έργα"]
forbidden_by_language["et"] = ["välislingid", "Vaata ka", "Bibliograafia", "töötama", "Märkimisväärne", "viide", "Teine sellest", "aastapäeval", "Nad ütlesid sellest", "Filmograafia", "Öeldes", "lingid", "Nad ütlesid temast", "Öeldakse", "tsitaat", "Link", "viited", "Kõige kuulsamad raamatud", "Outer Tads", "Välised ühendused", "Allikad:", "temast", "sõltus päringutest", "Viide", "Dubleerimine", "filmograafia", "tema jaoks", "O", "Välised lingid", "mängima", "joonealune märkus", "Ta ütles temast", "Töötama", "Mängima", "peale", "Muud projektid", "Umbes", "temast", "Vahendid", "Väline link", "viited", "Allikad", "Tema kohta öeldi", "joonealused märkused", "Välised viited", "Seotud üksused", "Allikas", "Märkused:", "Lingid", "Temale", "Väljaanded", "Iseloomustused", "Märgib", "ütlema", "Ressursid Internetis", "Vaata ka", "daveoù", "Link väljastpoolt", "Temast", "Vaata ka", "film", "peal", "Viited", "Nad ütlesid O.", "Seotud", "Väline link", "Avaldused", "umbes", "Valitud tsitaadid", "allikad", "Suursaadik", "See on talle öeldud", "kirjandus", "ennast", "Välised lingid", "Seotud rakendused", "Hinnapakkumisi", "Nägema", "üle", "Liigne lingid", "hinnapakkumisi", "filmograafia", "joonealune märkus", "allikad", "vahendid", "Muud projektid", "Välised lingid", "lingid", "märgib", "Märge", "weblinks", "bibliograafia", "Seotud üksused", "töötama", "viited", "kirjandus", "nägema", "Vaata ka", "joonealune märkus", "Muud projektid"]
forbidden_by_language["en"] = ["quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects", "Also look at", "Bibliography", "works", "Notable", "reference", "Another about it", "in the anniversary", "they said about it", "Filmography", "Saying to", "links", "They said about him", "Are said about", "Link to", "referrals", "The most famous books", "Outer tads", "External connections", "Sources:", "about him", "depended queries", "Reference", "Dubbing", "filmography", "for him", "O", "External links", "plays", "footnote", "it was said about him", "Works", "Plays", "upon", "Other projects", "About", "about her", "Resources", "External link", "references", "Sources", "It was said about her", "footnotes", "External references", "Related items", "Source", "Notes:", "Links", "For her", "Releases", "Testimonials", "Notes", "say", "resources in Internet", "See also", "daveoù", "Link to the outside", "About him", "see also", "film", "on", "References", "They said O.", "Related", "external link", "Statements about", "about", "Citations above", "sources", "Ambassador", "It is said to him", "literature", "about herself", "external links", "Related Applications", "Quotes with respect to", "See", "over", "Excess links", "quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects"]
forbidden_by_language["eu"] = ["Kanpo loturak", "Erreferentziak", "Begira ere", "Bibliografia", "zeregin", "Nabarmen", "kontsulta", "Horri buruz", "Urteurrenean", "Esan zuten", "Filmografia", "Esanda", "estekak", "Berari buruz esan zuten", "Esaten da", "aipamen", "Esteka", "ikuskapen", "Liburu ospetsuenak", "Kanpoko Tads", "Kanpoko konexioak", "Iturriak:", "Berari buruz", "Dependatutako kontsultak", "Kontsulta", "Bosbing", "Filmografia", "harentzat", "O", "Kanpoko estekak", "Plays", "oharra", "Berari buruz esan zen", "Zeregin", "Plays", "-en gainean", "Beste proiektu batzuk", "Ei buruz", "haren inguruan", "Baliabide", "Kanpoko esteka", "erreferentziak", "Iturriak", "Berari buruz esan zen", "Oharrak", "Kanpoko erreferentziak", "Lotutako elementuak", "Iturri", "Oharrak:", "Estekak", "Berarentzat", "Oheratu", "Testigantzak", "Ohar", "esan", "Baliabideak Interneten", "Ikusi ere", "Daveoù", "Kanpotik estekatu", "Berari buruz", "ikusi ere", "mintz", "-en gainean", "Erreferentziak", "Esan zuten O.", "Lotinduta", "Kanpoko esteka", "Adierazpenak", "ei buruz", "Goiko aipuak", "iturriak", "Enbaxadore", "Esan dio", "literatura", "bere buruari buruz", "Kanpoko estekak", "Lotutako aplikazioak", "Aipamenak", "Ikusi", "-en gainetik", "Gehiegizko estekak", "aipamenak buruz", "Filmografia", "oharra", "iturriak", "baliabide", "Beste proiektu batzuk", "Kanpoko estekak", "estekak", "ohar", "ohar", "Weblinkak", "Bibliografia", "Lotutako elementuak", "zeregin", "erreferentziak", "literatura", "ikusi", "ikusi ere", "oharra", "Beste proiektu batzuk"]
forbidden_by_language["fa"] = ["دربارهٔ او", "پیوند به بیرون", "جستارهای وابسته", "منبع\u200cدار", "منابع", "پیوند به\u200cبیرون", "همچنین نگاه کن", "کتابشناسی - فهرست کتب", "آثار", "قابل توجه", "مرجع", "یکی دیگر در مورد آن", "در سالگرد", "آنها درباره آن گفتند", "فیلمنامه نویسی", "گفتن به", "پیوندها", "آنها درباره او گفتند", "در مورد آنها گفته شده است", "نقل قول", "پیوند به", "ارجاع", "مشهورترین کتاب ها", "بیرونی", "اتصالات خارجی", "منابع:", "درباره ی او", "پرسش های وابسته", "ارجاع", "دوبله", "فیلمنامه نویسی", "برای او", "o", "لینک های خارجی", "نمایشنامه", "پاورقی", "در مورد او گفته شد", "آثار", "نمایشنامه", "بر", "پروژه های دیگر", "در باره", "در مورد او", "منابع", "لینک خارجی", "منابع", "منابع", "در مورد او گفته شد", "پانویسها و منابع", "منابع خارجی", "آیتم های مرتبط", "منبع", "یادداشت:", "پیوندها", "برای او", "منتشر شده", "توصیفات", "یادداشت", "گفتن", "منابع در اینترنت", "همچنین ببینید", "daveoù", "پیوند به خارج", "درباره ی او", "همچنین ببینید", "فیلم", "بر", "منابع", "آنها گفتند O.", "مربوط", "لینک خارجی", "اظهارات در مورد", "در باره", "نقل قول بالا", "منابع", "سفیر", "به او گفته شده است", "ادبیات", "درباره خودش", "لینک های خارجی", "برنامه های مرتبط", "نقل قول با توجه به", "دیدن", "بر فراز", "لینک های اضافی", "نقل قول در مورد", "فیلمنامه نویسی", "پاورقی", "منابع", "منابع", "پروژه های دیگر", "لینک های خارجی", "پیوندها", "یادداشت", "توجه داشته باشید", "weblinks", "کتابشناسی - فهرست کتب", "آیتم های مرتبط", "آثار", "منابع", "ادبیات", "دیدن", "همچنین ببینید", "پاورقی", "پروژه های دیگر"]
forbidden_by_language["es"] = ["citas sobre", "filmografía", "nota al pie", "fuentes", "recursos", "otros proyectos", "enlaces externos", "enlaces", "notas", "nota", "enlaces web", "bibliografía"," artículos relacionados"," obras"," referencias"," literatura"," ver"," ver también"," nota al pie"," otros proyectos"," Mirar también"," Bibliografía"," obras", "Notable", "referencia", "Otro sobre eso", "en el aniversario", "Ellos dijeron al respecto", "Filmografía", "Diciendo a", "Enlaces", "Ellos dijeron sobre él", "Son dijo sobre"," citas"," Enlace a"," referencias"," Los libros más famosos"," Tads externos"," Conexiones externas"," Fuentes:"," sobre él"," consultas dependientes"," Referencia"," Doblaje"," filmografía"," para él"," O"," Enlaces externos"," obras de teatro"," nota al pie"," se dijo sobre él"," Obras"," Obras de teatro"," sobre"," Otros proyectos"," Acerca de"," Acerca de ella"," Recursos"," Enlace externo"," Referencias"," Fuentes"," Se dijo sobre ella"," Notas al pie"," Referencias externas", "Artículos relacionados", "Fuente", "Notas:", "Enlaces", "Para ella", "Lanzamientos", "Testimonios", "No es"," decir"," recursos en Internet"," Ver también"," daveoù"," Enlace con el exterior"," Acerca de él"," ver también"," película"," sobre"," Referencias", "Dijeron O.", "Relacionado", "Enlace externo", "Declaraciones sobre", "Sobre", "Citas arriba", "Fuentes", "Embajador", "Se le dice a él", "Literatura", "sobre ella", "enlaces externos", "Aplicaciones relacionadas", "Citas con respecto a", "Ver", "sobre", "Enlaces en exceso", "Citas sobre", "filmografía", "nota al pie", " fuentes"," recursos"," otros proyectos"," enlaces externos"," enlaces"," notas"," nota"," enlaces web"," bibliografía"," artículos relacionados"," obras"," referencias", "literatura", "ver", "ver también", "nota al pie", "otros proyectos"]
forbidden_by_language["fi"] = ["lainaukset aiheesta","Aiheesta muualla" , "filmografia", "alaviite", "lähteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "weblinks", "bibliografia", "liittyvät kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso myös", "alaviite", "muut projektit", "katso myös", "Bibliografia", "teokset", "Huomattava", "viite", "Toinen siitä", "juhlapäivänä", "he sanoivat siitä", "Filmografia", "Sanominen", "linkit", "He sanoivat hänestä", "Ovatko sanoi aiheesta", "lainaukset", "Linkki", "viittaukset", "kuuluisimmat kirjat", "Ulkoiset", "Ulkoiset yhteydet", "Lähteet:", "Hänestä", "riippuvaiset kyselyt", " Viite", "Kopiointi", "filmografia", "hänelle", "O", "ulkoiset linkit", "näytelmät", "alaviite", "hänestä sanottiin", "teokset", "näytelmät", " upon", "Muut projektit", "Tietoja", "Hänestä", "Resurssit", "Ulkoinen linkki", "viitteet", "Lähteet", "Hänestä sanottiin", "alaviitteet", "Ulkoiset viitteet", "Aiheeseen liittyvät kohteet", "Lähde", "Huomautukset:", "Linkit", "Hänelle", "Julkaisut", "Lausunnot", "Ei es", "sano", "resurssit Internetissä", "Katso myös", "daveoù", "Linkki ulkopuolelta", "Tietoa hänestä", "katso myös", "elokuva", "päällä", "viitteet", "He sanoivat O.", "Aiheeseen liittyvä", "ulkoinen linkki", "Lausunnot aiheesta", "Tietoja", "Yllä olevat lainaukset", "lähteet", "suurlähettiläs", "Hänelle sanotaan", "kirjallisuus", "itsestään", "ulkoiset linkit", "Aiheeseen liittyvät sovellukset", "Lainaukset suhteessa", "Katso", "yli", "Ylimääräiset linkit", "lainauksia", "filmografia", "alaviite", " lähteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "verkkolinkit", "bibliografia", "liittyvät kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso myös", "alaviite", "muut hankkeet"]
forbidden_by_language["fr"] = ["citations sur", "filmographie", "note de bas de page", "sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliogprahie", "éléments liés", "œuvres", "références", "littérature", "voir", "voir aussi", "note de bas de page", "autres projets", "Regarder aussi", "Bibliographie", "œuvres", "Remarquable", "référence", "Un autre à ce sujet", "à l'anniversaire", "ils en ont dit", "Filmographie", "En disant à", "liens", "Ils ont dit à propos de lui", "Sont dit à propos de", "citations", "Lien vers", "références", "Les livres les plus célèbres", "Tads externes", "Connexions externes", "Sources :", "à propos de lui", "requêtes dépendantes", " Référence", "Doublage", "filmographie", "pour lui", "O", "Liens externes", "pièces", "note de bas de page", "on a dit de lui", "Travaux", "Joues", " sur", "Autres projets", "À propos", "à propos d'elle", "Ressources", "Lien externe", "Références", "Sources", "On a dit d'elle", "Notes de bas de page", "Références externes", "Articles associés", "Source", "Notes :", "Liens", "Pour elle", "Releases", "Témoignages", "Non es", "dire", "ressources sur Internet", "Voir aussi", "daveoù", "Lien vers l'extérieur", "A propos de lui", "voir aussi", "film", "sur", "Références", "Ils ont dit O.", "Connexe", "lien externe", "Déclarations sur", "à propos", "Citations ci-dessus", "sources", "Ambassadeur", "On lui dit", "littérature", "à propos d'elle-même", "liens externes", "Applications associées", "Citations concernant", "Voir", "over", "Liens excédentaires", "Citations sur", "filmographie", "note de bas de page", " sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliographie", "éléments associés", "ouvrages", "références", "littérature", "voir", "voir aussi", "note de bas de page", "autres projets"]
forbidden_by_language["he"] = ["ציטוטים על", "פילמוגרפיה", "הערת שוליים", "מקורות", "משאבים", "פרויקטים אחרים", "קישורים חיצוניים", "קישורים", "הערות", "הערה", "קישורי אינטרנט", "ביבליוגפרה'", "פריטים קשורים", "עבודות", "הפניות", "ספרות", "ראה", "ראה גם", "הערת שוליים", "פרויקטים אחרים", "הסתכל גם על", "ביבליוגרפיה", "עבודות", "ראוי לציון", "התייחסות", "עוד על זה", "ביום השנה", "אמרו על זה", "פילמוגרפיה", "אומרים ל", "קישורים", "אמרו עליו", "האם אמר על", "ציטוטים", "קישור אל", "הפניות", "'הספרים המפורסמים ביותר", "תקשורים חיצוניים", "חיבורים חיצוניים", "מקורות:", "עליו", "שאילתות תלויות", " הפניה", "דיבוב", "פילמוגרפיה", "בשבילו", "O", "קישורים חיצוניים", "הצגות", "הערת שוליים", "אמרו עליו", "עבודות", "מחזות", " על", "פרויקטים אחרים", "אודות", "עליה", "משאבים", "קישור חיצוני", "הפניות", "מקורות", "נאמר עליה", "הערות שוליים", "הפניות חיצוניות", "פריטים קשורים", "מקור", "הערות:", "קישורים", "בשבילה", "פרסומים", "המלצות", "לא es", "אמר", "משאבים באינטרנט", "ראה גם", "daveoù", "קישור אל החוץ", "אודותיו", "ראה גם", "סרט", "על", "הפניות", "הם אמרו O", "קשורים", "קישור חיצוני", "הצהרות על", "על", "ציטוטים למעלה", "מקורות", "שגריר", "נאמר לו", "ספרות", "על עצמה", "קישורים חיצוניים", "יישומים קשורים", "ציטוטים ביחס ל", "ראה", "מעל", "קישורים עודפים", "ציטוטים על", "פילמוגרפיה", "הערת שוליים", " מקורות", "משאבים", "פרויקטים אחרים", "קישורים חיצוניים", "קישורים", "הערות", "הערה", "קישורי אינטרנט", "ביבליוגרפיה", "פריטים קשורים", "עבודות", "הפניות", "ספרות", "ראה", "ראה גם", "הערת שוליים", "פרויקטים אחרים"]
forbidden_by_language["hi"] = ["के बारे में उद्धरण", "फिल्मोग्राफी", "फुटनोट", "स्रोत", "संसाधन", "अन्य परियोजनाएं", "बाहरी लिंक", "लिंक", "नोट्स", "नोट", "वेबलिंक", "ग्रंथ सूची", "संबंधित आइटम", "कार्य", "संदर्भ", "साहित्य", "देखें", "यह भी देखें", "फुटनोट", "अन्य परियोजनाएं", "भी देखें", "ग्रंथ सूची", "काम करता है", "उल्लेखनीय", "संदर्भ", "इसके बारे में एक और", "वर्षगांठ में", "उन्होंने इसके बारे में कहा", "फिल्मोग्राफी", "सेइंग टू", "लिंक्स", "उन्होंने उसके बारे में कहा", "हैं के बारे में कहा", "उद्धरण", "लिंक टू", "रेफ़रल", "सबसे प्रसिद्ध किताबें", "बाहरी बच्चे", "बाहरी कनेक्शन", "स्रोत:", "उसके बारे में", "आश्रित प्रश्न", " संदर्भ", "डबिंग", "फिल्मोग्राफी", "उसके लिए", "ओ", "बाहरी लिंक", "नाटक", "फुटनोट", "उसके बारे में कहा गया", "काम करता है", "नाटक", "अन्य प्रोजेक्ट", "अबाउट", "उसके बारे में", "संसाधन", "बाहरी लिंक", "संदर्भ", "स्रोत", "उसके बारे में कहा गया", "फुटनोट", "बाहरी संदर्भ", "संबंधित आइटम", "स्रोत", "नोट्स:", "लिंक", "उसके लिए", "रिलीज़", "प्रशंसापत्र", "नहीं es", "कहते हैं", "इंटरनेट में संसाधन", "यह भी देखें", "डेवो", "बाहर से लिंक करें", "उसके बारे में", "यह भी देखें", "फिल्म", "पर", "संदर्भ", "उन्होंने कहा ओ", "संबंधित", "बाहरी लिंक", "बयानों के बारे में", "के बारे में", "उपरोक्त उद्धरण", "स्रोत", "राजदूत", "यह उसे कहा जाता है", "साहित्य", "अपने बारे में", "बाहरी लिंक", "संबंधित अनुप्रयोग", "के संबंध में उद्धरण", "देखें", "ओवर", "अतिरिक्त लिंक", "उद्धरण के बारे में", "फिल्मोग्राफी", "फुटनोट", " स्रोत", "संसाधन", "अन्य परियोजनाएं", "बाहरी लिंक", "लिंक", "नोट्स", "नोट", "वेबलिंक", "ग्रंथ सूची", "संबंधित आइटम", "कार्य", "संदर्भ", "साहित्य", "देखें", "यह भी देखें", "फुटनोट", "अन्य परियोजनाएं"]
forbidden_by_language["hr"] = ["navodnici o", "filmografija", "fusnota", "izvori", "izvori", "drugi projekti", "vanjske veze", "veze", "bilješke", "napomena", "weblinks", "bibliografija", "srodne stavke", "radovi", "reference", "literatura", "vidi", "vidi također", "fusnota", "drugi projekti", "također pogledajte", "Bibliografija", "radovi", "Zapaženo", "referenca", "Još jedan o tome", "u obljetnici", "rekli su o tome", "Filmografija", "Kaže se", "linkovi", "Rekli su o njemu", "Jesu li rekao o", "citati", "Veza na", "preporuke", "Najpoznatije knjige", "Vanjski tad", "Vanjske veze", "Izvori:", "o njemu", "ovisni upiti", " Referenca", "Sinhronizacija", "filmografija", "za njega", "O", "Vanjske veze", "predstave", "fusnota", "rečeno je o njemu", "Djela", "Predstave", " na", "Drugi projekti", "O njoj", "O njoj", "Resursi", "Vanjski link", "reference", "Izvori", "Rečeno je o njoj", "fusnote", "Vanjske reference", "Povezane stavke", "Izvor", "Napomene:", "Veze", "Za nju", "Izdanja", "Izjave", "Ne es", "recimo", "resursi na Internetu", "Vidi također", "daveoù", "Veza prema van", "O njemu", "vidi također", "film", "on", "Reference", "Rekli su O.", "Povezano", "vanjska veza", "Izjave o", "o", "Navodi gore", "izvori", "Ambasador", "Rečeno mu je", "književnost", "o sebi", "vanjske veze", "Povezane aplikacije", "Citati s obzirom na", "Vidi", "preko", "Višak veza", "citati o", "filmografija", "fusnota", " izvori", "resursi", "ostali projekti", "vanjske veze", "veze", "bilješke", "bilješka", "web-veze", "bibliografija", "srodne stavke", "radovi", "reference", "književnost", "vidi", "vidi također", "fusnota", "drugi projekti"]
forbidden_by_language["is"] = ["tilvitnanir um", "kvikmyndafræði", "neðanmálsgrein", "heimildir", "auðlindir", "önnur verkefni", "ytri tenglar", "tenglar", "aths", "ath", "weblinks", "heimildaskrá", "tengd atriði", "verk", "tilvísanir", "bókmenntir", "sjá", "sjá einnig", "neðanmálsgrein", "önnur verkefni", "Skoðaðu líka", "Heimildaskrá", "verk", "Athyglisvert", "tilvísun", "Annað um það", "í afmælinu", "þeir sögðu um það", "Kvikmyndataka", "Seggja við", "tenglar", "Þeir sögðu um hann", "Eru sagði um", "tilvitnanir", "Tengill á", "tilvísanir", "Frægustu bækurnar", "Ytri tads", "Ytri tengingar", "Heimildir:", "um hann", "háðar fyrirspurnir", " Tilvísun", "talsetning", "kvikmyndataka", "fyrir hann", "O", "Ytri hlekkir", "leikrit", "neðanmálsgrein", "það var sagt um hann", "verk", "leikrit", " á", "Önnur verkefni", "Um", "um hana", "Auðlindir", "Ytri tengill", "tilvísanir", "Heimildir", "Það var sagt um hana", "neðanmálsgrein", "Ytri tilvísanir", "Tengd atriði", "Heimild", "Athugasemdir:", "Tenglar", "Fyrir hana", "Útgáfur", "Vitnisburður", "Ekki es", "segja", "tilföng á internetinu", "Sjá líka", "daveoù", "Tengill að utan", "Um hann", "sjá líka", "kvikmynd", "on", "Tilvísanir", "Þeir sögðu O.", "Tengd", "ytri tengill", "Yfirlýsingar um", "um", "Tilvitnanir að ofan", "heimildir", "sendiherra", "það er sagt við hann", "bókmenntir", "um sjálfa sig", "ytri tenglar", "Tengd forrit", "Tilvitnanir með tilliti til", "Sjá", "yfir", "Umframtenglar", "tilvitnanir um", "kvikmyndafræði", "neðanmáls", " heimildir", "tilföng", "önnur verkefni", "ytri hlekkir", "tenglar", "athugasemdir", "aths", "veftenglar", "heimildaskrá", "tengd atriði", "verk", "tilvísanir", "bókmenntir", "sjá", "sjá einnig", "neðanmálsgrein", "önnur verkefni"]
forbidden_by_language["it"] = ["citazioni su", "filmografia", "nota", "fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "opere", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a piè di pagina", "altri progetti", "guarda anche", "bibliografia", "lavori", "Notevole", "riferimento", "Un altro a riguardo", "nell'anniversario", "hanno detto a riguardo", "Filmografia", "Detto a", "link", "Hanno detto di lui", "Sono ha detto su", "citazioni", "Link a", "riferimenti", "I libri più famosi", "Schede esterne", "Connessioni esterne", "Fonti:", "su di lui", "domande dipendenti", " Riferimento", "Doppiaggio", "filmografia", "per lui", "O", "Link esterni", "ascolta", "nota a piè di pagina", "si diceva di lui", "Lavori", "Riproduzioni", " su", "Altri progetti", "Su", "su di lei", "Risorse", "Link esterno", "riferimenti", "Fonti", "Si diceva di lei", "note a piè di pagina", "Riferimenti esterni", "Articoli correlati", "Fonte", "Note:", "Link", "Per lei", "Pubblicazioni", "Testimonianze", "Non es", "say", "risorse in Internet", "Vedi anche", "daveoù", "Link all'esterno", "Su di lui", "vedi anche", "film", "on", "Riferimenti", "Hanno detto O.", "Correlato", "link esterno", "Dichiarazioni su", "su", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice a lui", "letteratura", "su di sé", "link esterni", "applicazioni correlate", "citazioni rispetto a", "vedere", "sopra", "collegamenti in eccesso", "citazioni su", "filmografia", "nota a piè di pagina", " fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "lavori", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a piè di pagina", "altri progetti"]
forbidden_by_language["ja"] = ["引用'、 '脚注'、 'ソース'、 'リソース'、 'その他のプロジェクト'、 '外部リンク'、 'リンク'、 'ノート'、 '注'、 'ウェブリンク'、 '参考文献' ' '、'作品 '、'参考文献 '、'文学 '、'参照 '、'関連項目 '、'脚注 '、'その他のプロジェクト '、'関連項目 '、'参考文献 '、'作品 ' 、 '注目すべき'、 '参照'、 'それについての別の'、 '記念日'、 '彼らはそれについて言った'、 '映画誌'、 '言っている'、 'リンク'、 '彼らは彼について言った'、 ' '、'引用符 '、'リンク先 '、'参照 '、'最も有名な本 '、'外部接続 '、'外部接続 '、'出典: '、'彼について '、'依存クエリ '、'参照 '、'ダビング '、'フィルムグラフィー '、'彼のために '、' O '、'外部リンク '、'演劇 '、'脚注 '、'彼について言われた '、'作品 '、'演劇 '、' '、'その他のプロジェクト '、'について '、'彼女について '、'リソース '、'外部リンク '、'参照 '、'ソース '、'彼女について言われた '、'脚注 '、'外部参照 ' 、 '関連項目'、 'ソース'、 '注:'、 'リンク'、 '彼女のために'、 'リリース'、 '証言","脚注'、 '言う'、 'インターネットのリソース'、 '関連項目' 、、 '外部へのリンク'、 '彼について'、 '関連項目'、 '映画'、 '参考文献'、 '参照はO '、'リソース'、 '映画リンク'、 'リンク'、 '注'、 '注'、 'レリンク'、 '参照参照'、 '化参照'、 '作品 '、'参照 '、'文献 '、'参照 '、'関連項目 '、'脚注 '、'その他のプロジェクト "]
forbidden_by_language["ka"] = ["ციტატები", "ფილმოგრაფია", "სქოლიო", "წყაროები", "რესურსები", "სხვა პროექტები", "გარე ბმულები", "ბმულები", "შენიშვნები", "შენიშვნა", "ვებლინკები", "ბიბლიოგრაფია'", "დაკავშირებული ერთეულები", "ნამუშევრები", "ცნობები", "ლიტერატურა", "იხილეთ", "ასევე იხილეთ", "სქოლიო", "სხვა პროექტები", "ასევე შეხედე", "ბიბლიოგრაფია", "ნამუშევრები' , „აღსანიშნავი“, „მინიშნება“, „კიდევ ერთი ამის შესახებ“, „იუბილეზე“, „ამის შესახებ თქვეს“, „ფილმოგრაფია“, „ამბობენ“, „ბმულები“, „მასზე თქვეს“, „არის. ნათქვამია შესახებ", "ციტატები", "ბმული", "რეფერატები", "ყველაზე ცნობილი წიგნები", "გარე ბავშვები", "გარე კავშირები", "წყაროები:", "მის შესახებ", "დამოკიდებული შეკითხვები", " მითითება“, „დუბლირება“, „ფილმოგრაფია“, „მისთვის“, „ო“, „გარე ბმულები“, „სპექტაკლები“, „სქოლიო“, „მასზე ითქვა“, „ნამუშევრები“, „სპექტაკლები“, „ საფუძველზე", "სხვა პროექტები", "შესახებ", "მის შესახებ", "რესურსები", "გარე ბმული", "ცნობები", "წყაროები", "ითქვა მის შესახებ", "სქოლიოები", "გარე ცნობები", "დაკავშირებული ნივთები", "წყარო", "შენიშვნები:", "ბმულები", "მისთვის", "გამოშვებები", "ჩვენებები", "N ოტესები", "ვთქვათ", "რესურსები ინტერნეტში", "ასევე იხილეთ", "დავეოუ", "გარედან ბმული", "მის შესახებ", "ასევე იხილეთ", "ფილმი", "ჩართული", "ცნობები", "მათ თქვეს ო.", "დაკავშირებული", "გარე ბმული", "განცხადებები", "შესახებ", "ციტატები ზემოთ", "წყაროები", "ელჩი", "მას უთხრეს", "ლიტერატურა", "თავის შესახებ", "გარე ბმულები", "დაკავშირებული აპლიკაციები", "ციტატები დაკავშირებით", "იხილეთ", "ზედა", "ჭარბი ბმულები", "ციტატები", "ფილმოგრაფია", "სქოლიო", " წყაროები", "რესურსები", "სხვა პროექტები", "გარე ბმულები", "ბმულები", "შენიშვნები", "შენიშვნა", "ვებლინკები", "ბიბლიოგრაფია", "დაკავშირებული ერთეულები", "ნამუშევრები", "ცნობები", "ლიტერატურა", "იხილეთ", "ასევე იხილეთ", "სქოლიო", "სხვა პროექტები"]
forbidden_by_language["pl"] = ['zobacz też', 'o ', 'Zasoby', 'Wydanie', 'o nim', 'Link do zewnątrz', 'Cytaty w odniesieniu do', 'Bibliografia', 'Najbardziej znane książki', 'powiedzieli o tym', 'Powiedziane są o', 'Powiązane przedmioty', 'na', 'spinki do mankietów', 'Powiązane zastosowania', 'referencja', 'Powiedzieli o nim', 'Również patrzeć', 'Pracuje', 'literatura', 'Link zewnętrzny', 'Referencje.', 'Bibliografia', 'zależało zapytania', 'Daveoù.', 'Powiedział o niej', 'Spinki do mankietów', 'Pracuje', 'Uwagi:', 'Dubbing.', 'przypisy', 'Widzieć', 'Mówiono o nim', 'o niej', 'Ambasador', 'cytaty', 'bawić się', 'film', 'O.', 'Filmografia', 'O nim', 'Związane z', 'Zewnętrzne odniesienia', 'Cytaty powyżej', 'link zewnętrzny', 'Bibliografia', 'Inne projekty', 'Filmografia', 'Outer Tads.', 'Źródło', 'Zewnętrzne linki', 'Zasoby w Internecie.', 'notatka', 'Zobacz też', 'Referencja', 'Powiedzieli O.', 'Notatki', 'Dla niej', 'Znaczny', 'nad', 'Mówi się mu', 'Nadmiarowe linki', 'o', 'O sobie', 'Bawić się', 'ŹRÓDŁA', 'mowić', 'Inny o tym', 'Mówiąc do', 'Połączenia zewnętrzne', 'Zobacz też', 'od', 'O', 'w rocznicy.', 'Łączyć z', 'skierowania', 'dla niego', 'Źródła:', 'Oświadczenia o', 'ŹRÓDŁA', 'Zewnętrzne linki', 'cytaty', 'Filmografia', 'notatka', 'ŹRÓDŁA', 'Surowce', 'inne projekty', 'Zewnętrzne linki', 'spinki do mankietów', 'notatki', 'Notatka', 'linki internetowe', 'bibliografia', 'powiązane przedmioty', 'Pracuje', 'Bibliografia', 'literatura', 'zobaczyć', 'Zobacz też', 'notatka', 'inne projekty']
forbidden_by_language["pt"] = ["Ligações externas","citações sobre ele", "citações sobre ela", "filmografia", "nota de rodapé", "fontes", "recursos", "outros projetos", "links externos", "links", "notas", "nota", "links da web", "bibliografia", "itens relacionados", "obras", "referências", "literatura", "ver", "ver também", "nota de rodapé", "outros projetos" , "Veja também", "Bibliografia", "obras", "Notável", "Referência", "Outra sobre isso", "no aniversário", "foi dito sobre ela", "Filmografia", "Dizendo a "," links "," Disseram sobre ele "," Dizem sobre "," Link para "," referências "," Os livros mais famosos "," Meninos de fora "," Conexões externas "," Fontes: ", "sobre ele", "consultas dependentes", "Referência", "Dublagem", "filmografia", "para ele", "O", "Ligações externas", "peças", "nota de rodapé", "foi falado sobre ele "," Funciona "," Joga "," sobre "," Outros projetos "," Sobre "," sobre ela "," Recursos "," Link externo "," Referências "," Fontes "," Foi dito sobre ela "," notas de rodapé "," Referências externas "," Itens relacionados "," Fonte "," Notas: "," Link s "," Releases "," Notes "," resources in Internet "," See also "," daveoù "," Link to the outside "," About him "," see also "," film ", "Referências", "Disseram sobre ele", "Relacionadas", "link externo", "Declarações sobre" , "Citações acima", "fontes", "Embaixador", "Diz-se sobre ele", "literatura "," Disseram sobre ela "," links externos "," Aplicativos relacionados "," Citações a respeito de "," Ver ", " sobre "," Excesso de links "," citações sobre "," filmografia "," nota de rodapé "," fontes "," recursos "," outros projetos "," links externos "," links "," notas "," nota "," links da web "," bibliografia "," itens relacionados "," trabalhos "," referências "," literatura "," ver "," ver também "," nota de rodapé "," outros projetos "]
forbidden_by_language["ro"] = ['legături externe', 'despre', 'NOTE:', 'literatură', 'sa spus despre el', 'despre el', 'Dobbing.', 'Pentru ea', 'Se spune despre', 'Articole conexe', 'Notabil', 'Notele de subsol', 'Aplicații înrudite', 'Filmografie', 'Surse:', 'depinde de interogări', 'Referințe externe', 'Au spus despre el', 'Alte proiecte', 'Vedea', 'Uitați de asemenea la', 'Filmografie', 'Despre', 'pe', 'Legate de', 'O.', 'Ambasador', 'joacă', 'referinţă', 'pentru el', 'TADS OUTER.', 'Bibliografie', 'linkuri externe', 'În aniversare', 'Link-uri', 'Releases.', 'despre ea însăși', 'Link-uri', 'lucrări', 'Referinţă', 'Declarații despre', 'Vezi si', 'Cele mai cunoscute cărți', 'Lucrări', 'Sa spus despre ea', 'Link-uri excesive', 'citate', 'Link-ul la exterior', 'Sursă', 'Altul despre el', 'Spunând', 'film', 'Citate cu privire la', 'Spune', 'Daveoù.', 'Link extern', 'Citări de mai sus', 'Vezi si', 'peste', 'Surse.', 'Îi se spune', 'Au spus O.', 'Referințe', 'despre', 'peste', 'Legătura cu', 'Joacă', 'Referințe', 'despre ea', 'Surse.', 'linkuri externe', 'Au spus despre asta', 'Link extern', 'Mărturii', 'notă de subsol', 'Referințe', 'Note', 'Resurse pe Internet', 'Despre el', 'Resurse', 'Conexiuni externe', 'Citate despre', 'Filmografie', 'notă de subsol', 'Surse.', 'resurse', 'Alte proiecte', 'linkuri externe', 'Link-uri', 'note', 'Notă', 'Link-uri web', 'bibliografie', 'Articole conexe', 'lucrări', 'Referințe', 'literatură', 'vedea', 'Vezi si', 'notă de subsol', 'Alte proiecte']
forbidden_by_language["ru"] = ['Об ', 'Фильмография', 'примечания', 'ссылки ', 'см. также', 'Примечания:', 'литература', 'Было сказано о нем', 'о нем', 'Дублировка', 'Для нее', 'Говорится о', 'Похожие материалы', 'нотенно', 'сноски', 'Похожие приложения', 'Фильмография', 'Источники:', 'Взял запросы', 'Внешние ссылки', 'Они сказали о нем', 'Другие проекты', 'Видеть', 'Также смотрите', 'фильмография', 'О', 'на', 'Связанный', 'О', 'Посол', 'пьесы', 'ссылка', 'для него', 'Внешние тады', 'Библиография', 'внешние ссылки', 'в годовщине', 'Ссылки', 'Релизы', 'о себе', 'ссылки', 'работает', 'Ссылка', 'Утверждение о', 'смотрите также', 'Самые известные книги', 'Работает', 'Было сказано о ней', 'Избыточные ссылки', 'Ссылка на улицу', 'Источник', 'Другой об этом', 'Говорить', 'пленка', 'Цитаты по отношению к', 'сказать', 'Daveoù.', 'Внешняя ссылка', 'Цитаты выше', 'Смотрите также', 'над', 'Источники', 'Это сказано ему', 'Они сказали О.', 'использованная литература', 'о', 'на', 'Ссылка на', 'Пьесы', 'рефералы', 'о ней', 'источники', 'внешние ссылки', 'Они сказали об этом', 'внешняя ссылка', 'Отзывы', 'сноска', 'использованная литература', 'Примечания', 'Ресурсы в интернете', 'О нем', 'Ресурсы', 'Внешние соединения', 'цитаты о', 'фильмография', 'сноска', 'источники', 'Ресурсы', 'другие проекты', 'внешние ссылки', 'ссылки', 'Примечания', 'Примечание', 'веб ссылки', 'Библиография', 'Похожие материалы', 'работает', 'использованная литература', 'литература', 'видеть', 'смотрите также', 'сноска', 'другие проекты']
forbidden_by_language["sk"] = ['Povedali o', 'iné projekty', 'referencie', 'Poznámky:', 'literatúra', 'Hovorilo sa o ňom', 'o ňom', 'Dabovanie', 'Pre ňu', 'Hovoria', 'Súvisiace položky', 'Pozoruhodný', 'poznámky pod čiarou', 'Súvisiace aplikácie', 'Filmograf', 'Zdroje:', 'závislé dotazy', 'Externé referencie', 'Povedali o ňom', 'Ostatné projekty', 'Pozrieť sa', 'Pozrite sa aj na', 'filmograf', 'O', 'zapnutý', 'Súvisiaci', 'O', 'Veľvyslanec', 'hrať', 'referencia', 'pre neho', 'Vonkajšie tads', 'Bibliografia', 'vonkajšie odkazy', 'v výročnom', 'Spojenie', 'Vydania', 'o sebe', 'spojenie', 'Tvorba', 'Referencia', 'Vyhlásenia', 'pozri tiež', 'Najznámejšie knihy', 'Tvorba', 'Povedala sa o ňom', 'Prebytočné odkazy', 'citácie', 'Odkaz na vonkajšiu stranu', 'Zdroj', 'O tom', 'Hovoriť', 'film', 'Citáty s ohľadom na', 'povedať', 'daveoù', 'Externý odkaz', 'Vyššie uvedené citácie', 'Pozri tiež', 'nad', 'Zdroje', 'Hovorí sa mu', 'Povedali o.', 'Referencie', 'o', 'na', 'Odkaz na', 'Hrať', 'referencie', 'o nej', 'zdroje', 'vonkajšie odkazy', 'Povedali o tom', 'externý odkaz', 'Referencie', 'poznámka pod čiarou', 'referencie', 'Poznámky', 'Zdroje na internete', 'O ňom', 'Prostriedky', 'Externé pripojenia', 'cituje', 'filmograf', 'poznámka pod čiarou', 'zdroje', 'prostriedky', 'Ostatné projekty', 'vonkajšie odkazy', 'spojenie', 'poznámky', 'Poznámka', 'weblinks', 'Bibliografia', 'Súvisiace položky', 'Tvorba', 'referencie', 'literatúra', 'pozrieť sa', 'pozri tiež', 'poznámka pod čiarou', 'Ostatné projekty']
forbidden_by_language["sl"] = ['viri', 'sklici', 'Opombe:', 'Literatura.', 'Rečeno je bilo o njem', 'o njem', 'Dubbing.', 'Za njo', 'Rečeno', 'Podobni elementi', 'Opazno', 'Opombe', 'Povezane aplikacije', 'Filmografija', 'Viri:', 'odvisne poizvedbe', 'Zunanje reference', 'Rekli so o njem', 'Drugi projekti', 'Glejte', 'Oglejte si tudi', 'filmografija', 'Približno', 'On.', 'Povezano', 'O.', 'Veleposlanik', 'igra', 'Referenca', 'zanj', 'Zunanji tads.', 'Bibliografija', 'Zunanje povezave', 'V obletnici', 'Povezave', 'Sprosti', 'o sebi', 'Povezave', 'dela', 'Referenca', 'Izjave', 'Poglej tudi', 'Najbolj znane knjige', 'Dela', 'Rečeno je bilo o njej', 'Presežne povezave', 'citate', 'Povezava na zunanjost', 'Vir.', 'Drugo o tem', 'Rekel', 'film', 'Citati v zvezi s tem', 'reči.', 'daveoù.', 'Zunanja povezava', 'Zgoraj', 'Poglej tudi', 'nad', 'Viri', 'Rečeno mu je', 'Rekli so O.', 'Reference', 'približno', 'AN.', 'Povezava do', 'Igra', 'napotitve', 'o njej', 'Viri', 'Zunanje povezave', 'Rekli so o tem', 'Zunanja povezava', 'Pričevanja', 'opomba', 'Reference', 'Opombe', 'Viri na internetu', 'O njem', 'Viri', 'Zunanje povezave', 'navaja', 'filmografija', 'opomba', 'Viri', 'Viri', 'Drugi projekti', 'Zunanje povezave', 'Povezave', 'Opombe', 'Opomba', 'weblinks.', 'Bibliografija', 'Podobni elementi', 'dela', 'Reference', 'Literatura.', 'Glejte', 'Poglej tudi', 'opomba', 'Drugi projekti']
forbidden_by_language["sq"] = ['Thënie për të', 'Referimet', 'Shiko edhe', 'lidhje të jashtme', 'referime', 'Shënime:', 'letërsi', 'U tha për të', 'për të', 'Dublim', 'Për të', 'Janë thënë', 'Artikuj të ngjashëm', 'I dukshëm', 'fusnotat', 'Aplikime të ngjashme', 'Film', 'Burimet:', 'Pyetje të varura', 'Referencat e jashtme', 'Ata thanë për të', 'Projekte të tjera', 'Shiko', 'Gjithashtu shikoni', 'film', 'Rreth', 'në', 'I lidhur', 'O', 'Ambasador', 'luaj', 'referim', 'per atë', 'Tads e jashtme', 'Bibliografi', 'Linqe te jashtme', 'Në përvjetorin', 'Lidhje', 'Liron', 'për veten', 'lidhje', 'vepron', 'Referim', 'Deklaratat rreth', 'Shiko gjithashtu', 'Librat më të famshëm', 'Vepron', 'U tha për të', 'Lidhje të tepërta', 'kuotat', 'Lidhje me pjesën e jashtme', 'Burim', 'Një tjetër për këtë', 'Duke thënë', 'film', 'Kuotat në lidhje me', 'thua', 'daveoù', 'Lidhje e jashtme', 'Citimet e mësipërme', 'Shiko gjithashtu', 'mbi', 'Burime', 'Është thënë atij', 'Ata thanë O.', 'Referencat', 'rreth', 'në', 'Lidh me', 'Luaj', 'referime', 'për të', 'burime', 'Linqe te jashtme', 'ata thanë për këtë', 'lidhje e jashtme', 'Dëshmi', 'shënim shënim', 'referencat', 'Shënim', 'Burimet në Internet', 'Për të', 'Burime', 'Lidhjet e jashtme', 'citon rreth', 'film', 'shënim shënim', 'burime', 'burime', 'Projekte të tjera', 'Linqe te jashtme', 'lidhje', 'shënim', 'shënim', 'weblinks', 'bibliografi', 'Artikuj të ngjashëm', 'vepron', 'referencat', 'letërsi', 'Shiko', 'Shiko gjithashtu', 'shënim shënim', 'Projekte të tjera']
forbidden_by_language["ta"] = ['வெளி இணைப்புகள்', 'சான்றுகள்', 'குறிப்புகள்:', 'இலக்கியம்', 'அது அவரைப் பற்றி கூறப்பட்டது', 'அவரை பற்றி', 'டுபிங்', 'அவளுக்கு', 'பற்றி கூறப்படுகிறது', 'தொடர்புடைய பொருட்கள்', 'குறிப்பிடத்தக்கது', 'அடிக்குறிப்புகள்', 'தொடர்புடைய பயன்பாடுகள்', 'திரைப்படவியல்', 'ஆதாரங்கள்:', 'சார்ந்த கேள்விகள்', 'வெளிப்புற குறிப்புகள்', 'அவர்கள் அவரைப் பற்றி சொன்னார்கள்', 'பிற திட்டங்கள்', 'பார்க்க', 'மேலும் பாருங்கள்', 'திரைப்படவியல்', 'பற்றி', 'மீது', 'தொடர்புடைய', 'ஓ', 'தூதர்', 'நாடகம்', 'குறிப்பு', 'அவருக்கு', 'வெளிப்புற tads.', 'நூலகம்', 'வெளி இணைப்புகள்', 'ஆண்டு விழாவில்', 'இணைப்புகள்', 'வெளியீடுகள்', 'தன்னை பற்றி', 'இணைப்புகள்', 'வேலை', 'குறிப்பு', 'பற்றி அறிக்கைகள்', 'மேலும் காண்க', 'மிகவும் பிரபலமான புத்தகங்கள்', 'வேலை', 'அது அவளைப் பற்றி கூறப்பட்டது', 'அதிக இணைப்புகள்', 'மேற்கோள்கள்', 'வெளியே இணைப்பு', 'மூல', 'அது பற்றி மற்றொரு', 'சொல்லுங்கள்', 'திரைப்படம்', 'மரியாதையுடன் மேற்கோள்கள்', 'சொல்', 'daveoù.', 'வெளிப்புற இணைப்பு', 'மேலே மேற்கோள்கள்', 'மேலும் காண்க', 'மேல்', 'ஆதாரங்கள்', 'அது அவரிடம் கூறப்படுகிறது', 'அவர்கள் ஓ என்று சொன்னார்கள்.', 'குறிப்புகள்', 'பற்றி', 'மீது', 'இணைப்பு', 'நாடகம்', 'பரிந்துரைகளை', 'அவளை பற்றி', 'ஆதாரங்கள்', 'வெளி இணைப்புகள்', 'அவர்கள் அதைப் பற்றி சொன்னார்கள்', 'வெளிப்புற இணைப்பு', 'சான்றுகள்', 'அடிகுறிப்பு', 'குறிப்புகள்', 'குறிப்புகள்', 'இணையத்தில் வளங்கள்', 'அவரை பற்றி', 'வளங்கள்', 'வெளிப்புற இணைப்புகள்', 'மேற்கோள்கள் பற்றி', 'திரைப்படவியல்', 'அடிகுறிப்பு', 'ஆதாரங்கள்', 'வளங்கள்', 'பிற திட்டங்கள்', 'வெளி இணைப்புகள்', 'இணைப்புகள்', 'குறிப்புகள்', 'குறிப்பு', 'weblinks.', 'நூலகம்', 'தொடர்புடைய பொருட்கள்', 'வேலை', 'குறிப்புகள்', 'இலக்கியம்', 'பார்க்க', 'மேலும் காண்க', 'அடிகுறிப்பு', 'பிற திட்டங்கள்']
forbidden_by_language["te"] = ['మూలాలు', 'గమనికలు:', 'సాహిత్యం', 'ఇది అతని గురించి చెప్పబడింది', 'అతని గురించి', 'డబ్బింగ్', 'ఆమె కోసం', 'గురించి చెప్పారు', 'సంబంధిత అంశాలు', 'గుర్తించదగినది', 'ఫుట్నోట్స్', 'సంబంధిత అనువర్తనాలు', 'ఫిల్మోగ్రఫీ', 'సోర్సెస్:', 'వివరించిన ప్రశ్నలు', 'బాహ్య సూచనలు', 'వారు అతని గురించి చెప్పారు', 'ఇతర ప్రాజెక్టులు', 'చూడండి', 'కూడా చూడండి', 'ఫిల్మోగ్రఫీ', 'గురించి', 'పై', 'సంబంధిత', 'O.', 'రాయబారి', 'ప్లేస్', 'సూచన', 'అతనికి', 'ఔటర్ tads.', 'బిబ్లియోగ్రఫీ', 'బాహ్య లింకులు', 'వార్షికోత్సవంలో', 'లింకులు', 'విడుదలలు', 'ఆమె గురించి', 'లింకులు', 'పనిచేస్తుంది', 'సూచన', 'గురించి ప్రకటనలు', 'ఇది కూడ చూడు', 'అత్యంత ప్రసిద్ధ పుస్తకాలు', 'పనిచేస్తుంది', 'ఆమె గురించి చెప్పబడింది', 'అదనపు లింకులు', 'కోట్స్', 'వెలుపల లింక్', 'మూల', 'దాని గురించి మరొకటి', 'చెప్పడం', 'సినిమా', 'సంబంధించి కోట్స్', 'చెప్పండి', 'daveoù.', 'బాహ్య లింక్', 'పైన ఉన్న అనులేఖనాలు', 'ఇది కూడ చూడు', 'పైగా', 'సోర్సెస్', 'అది అతనికి చెప్పబడింది', 'వారు ఓ అన్నారు', 'ప్రస్తావనలు', 'గురించి', 'దీని తరువాత', 'లింక్', 'ప్లేస్', 'రెఫరల్స్', 'ఆమె గురించి', 'సోర్సెస్', 'బాహ్య లింకులు', 'వారు దాని గురించి చెప్పారు', 'బాహ్య లింక్', 'టెస్టిమోనియల్స్', 'ఫుట్నోట్', 'ప్రస్తావనలు', 'గమనికలు', 'ఇంటర్నెట్లో వనరులు', 'అతని గురించి', 'వనరులు', 'బాహ్య కనెక్షన్లు', 'కోట్స్ గురించి', 'ఫిల్మోగ్రఫీ', 'ఫుట్నోట్', 'సోర్సెస్', 'వనరులు', 'ఇతర ప్రాజెక్టులు', 'బాహ్య లింకులు', 'లింకులు', 'గమనికలు', 'గమనిక', 'weblinks.', 'బిబ్లియోగ్రఫీ', 'సంబంధిత అంశాలు', 'పనిచేస్తుంది', 'ప్రస్తావనలు', 'సాహిత్యం', 'చూడండి', 'ఇది కూడ చూడు', 'ఫుట్నోట్', 'ఇతర ప్రాజెక్టులు']
forbidden_by_language["tr"] = ['Hakkında', 'kaynakça', 'Notlar:', 'Edebiyat', 'Onun hakkında söylendi', 'onun hakkında', 'Dublaj', 'Onun için', 'Hakkında söyleniyor', 'İlgili öğeler', 'Dikkate değer', 'dipnotlar', 'İlgili uygulamalar', 'Filmografi', 'Kaynaklar:', 'SORUMLULUKLAR', 'Dış referanslar', 'Onun hakkında söylediler', 'Diğer projeler', 'Görmek', 'Ayrıca bak', 'filmografi', 'Hakkında', 'üzerinde', 'İlgili', 'Ö', 'Büyükelçi', 'oynar', 'referans', 'onun için', 'Dış tads', 'Bibliyografya', 'Dış bağlantılar', 'yıldönümünde', 'Linkler', 'Salıverme', 'kendisi hakkında', 'linkler', 'İşler', 'Referans', 'Hakkında açıklamalar', 'Ayrıca bakınız', 'En ünlü kitaplar', 'İşler', 'Onun hakkında söylendi', 'Aşırı bağlantılar', 'alıntı', 'Dışa bağlantı', 'Kaynak', 'Bunun hakkında başka', 'Söyleyerek', 'film', 'İle ilgili alıntılar', 'söylemek', 'Daveoù', 'Harici bağlantı', 'Yukarıdaki alıntılar', 'Ayrıca bakınız', 'üzerinde', 'Kaynaklar', 'Ona söyleniyor', 'O dediler.', 'Referanslar', 'hakkında', 'üzerine', 'Bağlamak', 'Oynar', 'yönlendirmeler', 'Onun hakkında', 'kaynaklar', 'Dış bağlantılar', 'Bunun hakkında söylediler', 'harici bağlantı', 'Tanıklık', 'dipnot', 'Referanslar', 'Notlar', 'İnternetteki kaynaklar', 'Onun hakkında', 'Kaynaklar', 'Harici Bağlantılar', 'hakkında alıntılar', 'filmografi', 'dipnot', 'kaynaklar', 'Kaynaklar', 'diğer projeler', 'Dış bağlantılar', 'linkler', 'notalar', 'Not', 'İnternet linkleri', 'bibliyografya', 'ilgili öğeler', 'İşler', 'Referanslar', 'Edebiyat', 'görmek', 'Ayrıca bakınız', 'dipnot', 'diğer projeler']
forbidden_by_language["uk"] = ['Про ', 'Джерела', 'примітки', 'література', 'Примітки:', 'література', 'Про це сказано', 'про нього', 'Дублювання', 'Для неї', 'Сказані', "Пов'язані елементи", 'Нотен', 'виноски', "Пов'язані заявки", 'Фільмографія', 'Джерела:', 'залежати від запитів', 'Зовнішні посилання', 'Вони сказали про нього', 'Інші проекти', 'Побачити', 'Також подивіться', 'фільмографія', 'Про', 'на', 'Споріднений', 'O', 'Посла', 'грає', 'довідник', 'для нього', 'Зовнішні tads', 'Бібліографія', 'зовнішні посилання', 'у річницю', 'Посилання', 'Релізи', 'про себе', 'посилання', 'робіт', 'Довідник', 'Заяви про', 'Дивись також', 'Найвідоміші книги', 'Робіт', 'Це було сказано про неї', "Надлишкові зв'язки", 'котирування', 'Посилання назовні', 'Джерело', 'Інше про це', 'Кажуть', 'плівка', 'Цитати по відношенню до', 'казати', 'дав', 'Зовнішня посилання', 'Цитати вище', 'Дивись також', 'надмірно', 'Джерела', 'Йому сказано', 'Вони сказали О.', 'Посилання', 'про', 'на', 'Посилання на', 'Грає', 'рефераль', 'про неї', 'джерела', 'зовнішні посилання', 'вони сказали про це', 'Зовнішня посилання', 'Відгуки', 'виноска', 'посилання', 'Ноти', 'Ресурси в Інтернеті', 'Про нього', 'Ресурси', "Зовнішні з'єднання", 'фільмографія', 'виноска', 'джерела', 'ресурси', 'Інші проекти', 'зовнішні посилання', 'посилання', 'ноти', 'Примітка', 'weblinks', 'бібліографія', "Пов'язані елементи", 'робіт', 'посилання', 'література', 'побачити', 'Дивись також', 'виноска', 'Інші проекти']
forbidden_by_language["ur"] = ['حوالہ جات', 'نوٹ:', 'ادب', 'اس کے بارے میں یہ کہا گیا تھا', 'اس کے بارے میں', 'ڈوبنگ', 'اس لڑکی کے لئے', 'کے بارے میں کہا جاتا ہے', 'متعلقہ اشیاء', 'قابل ذکر', 'فوٹیاں', 'متعلقہ ایپلی کیشنز', 'فلمگرافی', 'ذرائع:', 'منحصر سوالات', 'بیرونی حوالہ جات', 'انہوں نے اس کے بارے میں کہا', 'دیگر منصوبوں', 'دیکھو', 'بھی دیکھو', 'فلمگرافی', 'کے بارے میں', 'پر', 'متعلقہ', 'اے', 'سفیر', 'ادا کرتا ہے', 'حوالہ', 'اس کے لیے', 'بیرونی ٹاد', 'بائبلگرافی', 'بیرونی روابط', 'سالگرہ میں', 'روابط', 'ریلیز', 'خود کے بارے میں', 'روابط', 'کام', 'حوالہ', 'کے بارے میں بیانات', 'بھی دیکھو', 'سب سے مشہور کتابیں', 'کام', 'اس کے بارے میں یہ کہا گیا تھا', 'اضافی لنکس', 'حوالہ جات', 'باہر سے رابطہ کریں', 'ذریعہ', 'اس کے بارے میں ایک اور', 'کہہ رہا ہے', 'فلم', 'احترام کے ساتھ حوالہ جات', 'کہہ دو', 'ڈیویو', 'بیرونی لنک', 'حوالہ اوپر', 'بھی دیکھو', 'زیادہ', 'ذرائع', 'اس سے کہا جاتا ہے', 'انہوں نے کہا اے', 'حوالہ جات', 'کے بارے میں', 'پر', 'سے رابطہ کریں', 'ادا کرتا ہے', 'حوالہ جات', 'اس کے بارے میں', 'ذرائع', 'بیرونی روابط', 'انہوں نے اس کے بارے میں کہا', 'بیرونی لنک', 'تعریف', 'فوٹیوٹ', 'حوالہ جات', 'نوٹس', 'انٹرنیٹ میں وسائل', 'اس کے بارے میں', 'حوالہ جات', 'بیرونی کنکشن', 'کے بارے میں حوالہ جات', 'فلمگرافی', 'فوٹیوٹ', 'ذرائع', 'حوالہ جات', 'دیگر منصوبوں', 'بیرونی روابط', 'روابط', 'نوٹس', 'نوٹ', 'ویب لنکس', 'بائبلگرافی', 'متعلقہ اشیاء', 'کام', 'حوالہ جات', 'ادب', 'دیکھو', 'بھی دیکھو', 'فوٹیوٹ', 'دیگر منصوبوں']
forbidden_by_language["zh"] = ["引用","片目","脚注","来源","资源","其他项目","外部链接","链接","注释","注释","网络链接","参考书目","相关项目","作品","参考文献","文献","参见","另见","脚注","其他项目","另看","参考书目","作品", "著名","参考","他们说的","电影","关于他","相关查询","参考","配音","电影","为他","外部链接","戏剧","脚注","有人说他","作品","戏剧","其他项目"]
forbidden = [f.lower() for l in list(forbidden_by_language.values()) for f in l]
class EntityWithQuotes:
def __init__(self, entity, id, language):
def getQuotesFromUnstructuredText(section, id, wikiquote_id):
def getQ(section, id):
nonlocal quotes
nonlocal n
nonlocal level
nonlocal section_titles
section_titles = section_titles[:level]
section_titles.append(section.title.text)
for line in section.lines:
n+=1
quote = untemplatedQuote(section_titles, line, id, n, language, wikiquote_id)
quotes.update({quote.id:quote})
for sec in section.sub_sections:
if sec.title.text.lower() in forbidden+[i+" "+wikiquote_id.lower() for i in forbidden]:
continue
level=level+1
getQ(sec, id)
level=level-1
# filtering for empty Quotes using __bool__
temp_quotes = dict(quotes)
for quote_id in temp_quotes:
if not quotes[quote_id]:
del quotes[quote_id]
quotes = {}
n = 1
level = 0
section_titles = []
getQ(section, id)
return quotes
def getQuotesFromTemplates(section, id, wikiquote_id):
def getTempQ(section, id):
nonlocal quotes
nonlocal n
nonlocal level
nonlocal section_titles
section_titles = section_titles[:level]
section_titles.append(section.title.text)
for template in section.templates:
n+=1
templ = template.values
quote = templatedQuote(id, n, language, section_titles, wikiquote_id, **templ)
quotes.update({quote.id:quote})
for sec in section.sub_sections:
if sec.title.text.lower() in forbidden+[i+" "+wikiquote_id.lower() for i in forbidden]:
continue
level=level+1
getTempQ(sec, id)
level=level-1
# filtering for empty Quotes using __bool__
temp_quotes = dict(quotes)
for quote_id in temp_quotes:
if not quotes[quote_id]:
del quotes[quote_id]
quotes = {}
n = 1
level = 0
section_titles = []
getTempQ(section, id)
return quotes
self.lang=language
self.entity = entity
self.wikiquote_id = entity.wikiquote_id
self.wikiquote_page_id = entity.wikiquote_page_id
self.wikidata_id = entity.wikidata_id
self.wikipedia_id = entity.wikipedia_id
self.types = []
self.id = id
self.quotes = dict()
if self.lang in languages_with_templates:
self.quotes = getQuotesFromTemplates(entity.main_section, id, self.wikiquote_id)
elif self.lang in hybrid_languages:
self.quotes = getQuotesFromTemplates(entity.main_section, id, self.wikiquote_id)
self.quotes.update(getQuotesFromUnstructuredText(entity.main_section, self.id, self.wikiquote_id))
else:
self.quotes = getQuotesFromUnstructuredText(entity.main_section, self.id, self.wikiquote_id)
self.quotes = collections.OrderedDict(sorted(self.quotes.items()))
class CompleteEntity():
def __init__(self, id, entities):
self.entities = entities
self.wikiquoteIds = dict()
self.wikiquotePageIds= dict()
self.wikipediaIds= dict()
for language in self.entities:
self.wikiquoteIds.update({language:self.entities[language][0].entity.wikiquote_id})
self.wikiquotePageIds.update({language:self.entities[language][0].entity.wikiquote_page_id})
self.wikipediaIds.update({language:self.entities[language][0].entity.wikipedia_id})
self.wikidata_id = id
| 337.772549
| 1,878
| 0.652603
| 12,537
| 86,132
| 4.613065
| 0.282683
| 0.002594
| 0.013141
| 0.002282
| 0.184649
| 0.164471
| 0.154459
| 0.144984
| 0.13518
| 0.125999
| 0
| 0.000253
| 0.127757
| 86,132
| 254
| 1,879
| 339.102362
| 0.745701
| 0.005074
| 0
| 0.202643
| 0
| 0.030837
| 0.688585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026432
| false
| 0
| 0.013216
| 0
| 0.057269
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6ab43a62d7fe9c3851fb93216f017948e9264012
| 4,665
|
py
|
Python
|
vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py
|
gpierre42/optraj
|
53beb81c669093b866a786f2c1df9c663bbd7224
|
[
"Apache-2.0"
] | null | null | null |
vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py
|
gpierre42/optraj
|
53beb81c669093b866a786f2c1df9c663bbd7224
|
[
"Apache-2.0"
] | null | null | null |
vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py
|
gpierre42/optraj
|
53beb81c669093b866a786f2c1df9c663bbd7224
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf8
'''
Created on 29 Oct 2013
@author: Nicolas Poirey
'''
from Worker import Worker
from Phase import Phase
class Assignment(object):
'''
Classe Assignment définissant une affectation
attributs publics :
- num : l'id de l'affectation (int)
- worker : l'ouvrier de l'affectation (Worker.Worker)
- phase : la phase de l'affectation (Phase.Phase)
'''
def __init__(self, num=-1, worker=Worker(), phase=Phase()):
'''
Constructeur d'une affectation d'un ouvrier à un chantier
Args:
num : le numero unique identifiant l'affectation. (int)
worker : l'ouvrier concerné. (Worker.Worker)
phase : la phase de l'affectation (Phase.Phase)
'''
self._num = num
self._worker = worker
self._phase = phase
def __str__(self):
'''
Retourne l'affecation sous une forme lisible pour l'humain
Returns:
l'affectation sous forme de string.
Examples:
>>> p.__str__()
>>> "L'affectation, d'id 3, de l'ouvrier (id 5) Doe John, sur la phase d'id 4"
'''
return "L'affectation, d'id {}, de l'ouvrier (id {}) {} {}, sur la phase d'id {}".format(self.num, self.worker.num, self.worker.firstName, self.worker.name, \
self.phase.num) + " " + str(self._phase)
'''
/// @cond
========================= Setters/accesseurs ==============================
'''
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@property
def num(self):
"""
Getter du num
"""
return self._num
@num.setter
def num(self, value):
"""
Setter du num
"""
self._num = value
@property
def worker(self):
"""
Getter du worker
"""
return self._worker
@worker.setter
def worker(self, value):
"""
Setter du worker
"""
self._worker = value
@property
def phase(self):
"""
Getter de la phase
"""
return self._phase
@phase.setter
def phase(self, value):
"""
Setter de la phase
"""
self._phase = value
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
'''
/// @endcond
================ Méthodes publiques ================
'''
def serial(self):
'''
Sérialise une affectation
Returns:
un dict contenant l'affectation serialisé
Example:
>>> {'phase': {'needs': {33: {'num': 33, '__class__': 'Need', 'need': 10, 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 4, '__class__': 'Qualification', 'name': u'N3P2'}, 'phase': 19}, 34: {'num': 34, '__class__': 'Need', 'need': 20, 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 5, '__class__': 'Qualification', 'name': u'N3P1'}, 'phase': 19}, 92: {'num': 92, '__class__': 'Need', 'need': 2, 'craft': {'num': 7, '__class__': 'Craft', 'name': u"Agent d'entretien"}, 'qualification': {'num': 6, '__class__': 'Qualification', 'name': u'N2'}, 'phase': 19}, 79: {'num': 79, '__class__': 'Need', 'need': 2, 'craft': {'num': 10, '__class__': 'Craft', 'name': u"Chef d'\xe9quipe"}, 'qualification': {'num': 2, '__class__': 'Qualification', 'name': u'N4P2'}, 'phase': 19}}, 'num': 19, 'numYear': 2014, 'numWeek': 15, 'totalWorkers': 0, 'nbWorkers': 0, '__class__': 'Phase', 'numSite': 4}, 'num': 391, 'worker': {'num': 101, 'licence': u' ', 'name': u'JOUSSEAUME', 'firstName': u'MICKAEL', 'birthdateY': '1972', '__class__': 'Worker', 'birthdateM': '11', 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 4, '__class__': 'Qualification', 'name': u'N3P2'}, 'position': {'latitude': 47.9292, '__class__': 'Position', 'longitude': -1.94175, 'address': u'6 RUE DE RENNES 35330 LA CHAPELLE BOUEXIC'}, 'birthdateD': '26'}, '__class__': 'Assignment'}
'''
return {"__class__": "Assignment",
"num": self.num,
"worker": self.worker.serial(),
"phase": self.phase.serial()
}
def phaseNumber(self):
'''
Retourne le numéro de la phase associée
Returns:
numPhase (int).
'''
return self._phase.num
def workerNumber(self):
'''
retourne le numéro de l'ouvrier associé
Returns:
numWorker (int).
'''
return self._worker.num
| 34.555556
| 1,463
| 0.520043
| 499
| 4,665
| 4.649299
| 0.304609
| 0.023707
| 0.030172
| 0.032328
| 0.197845
| 0.167672
| 0.123707
| 0.123707
| 0.123707
| 0.123707
| 0
| 0.032012
| 0.296892
| 4,665
| 134
| 1,464
| 34.813433
| 0.675305
| 0.552412
| 0
| 0.078947
| 0
| 0.026316
| 0.073407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.289474
| false
| 0
| 0.052632
| 0
| 0.552632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
6abef106632056c480a54511ff7725bfc1193a55
| 4,116
|
py
|
Python
|
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
import stl_path
class MyNDRPlugin():
def __init__(self):
pass
def pre_iteration(self, finding_max_rate, run_results=None, **kwargs):
""" Function ran before each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration.
kwargs: dict
List of tunables passed as parameters.
"""
# Pre iteration function. This function will run before TRex transmits to the DUT.
# Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues.
# You can receive tunables in the command line, through the kwargs argument.
pass
def post_iteration(self, finding_max_rate, run_results, **kwargs):
""" Function ran after each iteration.
:parameters:
finding_max_rate: boolean
Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant.
run_results: dict
A dictionary that contains the following keys:
queue_full_percentage: Percentage of packets that are queued.
drop_rate_percentage: Percentage of packets that were dropped.
rate_tx_bps: TX rate in bps.
rate_rx_bps: RX rate in bps.
tx_util: TX utilization percentage.
latency: Latency groups.
cpu_util: CPU utilization percentage.
tx_pps: TX in pps.
rx_pps: RX in pps.
tx_bps: TX in bps.
rx_bps: RX in bps.
bw_per_core: Bandwidth per core.
rate_p: Running rate in percentage out of max.
total_tx_L1: Total TX L1.
total_rx_L1: Total RX L1.
iteration: Description of iteration (not necessarily a number)
kwargs: dict
List of tunables passed as parameters.
:returns:
bool: should stop the benchmarking or not.
"""
# Post iteration function. This function will run after TRex transmits to the DUT.
# Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run.
# You can receive tunables in the command line, through the kwargs argument.
should_stop = False
return should_stop
# dynamic load of python module
def register():
return MyNDRPlugin()
| 35.482759
| 196
| 0.557337
| 486
| 4,116
| 4.58642
| 0.292181
| 0.017945
| 0.025123
| 0.052041
| 0.71153
| 0.71153
| 0.679228
| 0.64603
| 0.608345
| 0.57694
| 0
| 0.003259
| 0.403547
| 4,116
| 116
| 197
| 35.482759
| 0.904684
| 0.749028
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.181818
| 0.090909
| 0.090909
| 0.727273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
6ac6979dc72c67c44ef423ebf8b3a34cc0b6d4cc
| 539
|
py
|
Python
|
gva/data/validator/is_valid_enum.py
|
gva-jjoyce/gva_data
|
cda990d0abb4b175025aaf16e75192bd9cc213af
|
[
"Apache-2.0"
] | null | null | null |
gva/data/validator/is_valid_enum.py
|
gva-jjoyce/gva_data
|
cda990d0abb4b175025aaf16e75192bd9cc213af
|
[
"Apache-2.0"
] | 24
|
2020-12-24T12:21:42.000Z
|
2021-01-28T14:22:38.000Z
|
gva/data/validator/is_valid_enum.py
|
gva-jjoyce/gva_data
|
cda990d0abb4b175025aaf16e75192bd9cc213af
|
[
"Apache-2.0"
] | null | null | null |
"""
Enumerator Test
"""
from typing import Any
class is_valid_enum():
"""
Test if a variable is on a list of valid values
"""
__slots__ = ('symbols')
def __init__(self, **kwargs):
"""
-> "type": "enum", "symbols": ["up", "down"]
symbols: list of allowed values (case sensitive)
"""
self.symbols = kwargs.get('symbols', ())
def __call__(self, value: Any) -> bool:
return value and value in self.symbols
def __str__(self):
return f'enum {self.symbols}'
| 22.458333
| 56
| 0.569573
| 65
| 539
| 4.446154
| 0.584615
| 0.103806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283859
| 539
| 24
| 57
| 22.458333
| 0.748705
| 0.293135
| 0
| 0
| 0
| 0
| 0.10061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0.222222
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
6ac89300a5b9e4ad6f97864631998446abb69eb0
| 313
|
py
|
Python
|
proto_3/ddq/topics/logics/topic.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
proto_3/ddq/topics/logics/topic.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | 6
|
2021-03-19T12:06:56.000Z
|
2022-03-12T00:23:09.000Z
|
proto_3/ddq/topics/logics/topic.py
|
jadnohra/connect
|
8eb21e6f122898094447bc3d5edb3053d5a2adf2
|
[
"Unlicense"
] | null | null | null |
from typing import List
from ddq.taxonomy.reference import Reference
from ddq.topics.topic import Topic
class Logic(Topic):
def references(self) -> List[Reference]:
return [
Reference("Classical and Nonclassical Logics",
[("Eric", "Schechter")])
]
| 26.083333
| 59
| 0.610224
| 32
| 313
| 5.96875
| 0.65625
| 0.073298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29393
| 313
| 12
| 60
| 26.083333
| 0.864253
| 0
| 0
| 0
| 0
| 0
| 0.146497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0.111111
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 3
|
6ad38265801ddbc75fcce3bfaba00694854f353b
| 690
|
py
|
Python
|
PyGame/pygame1/tutorial1/startercode.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyGame/pygame1/tutorial1/startercode.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
PyGame/pygame1/tutorial1/startercode.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
from pygamehelper import *
from pygame import *
from pygame.locals import *
from vec2d import *
from random import uniform
import numpy as np
class Starter(PygameHelper):
def __init__(self):
self.w, self.h = 800, 600
PygameHelper.__init__(self, size=(self.w, self.h), fill=((0,0,0)))
def update(self):
pass
def keyUp(self, key):
pass
def mouseUp(self, button, pos):
pass
def mouseMotion(self, buttons, pos, rel):
pass
def draw(self):
self.screen.fill((np.random.random()*255, np.random.random()*255, np.random.random()*255))
s = Starter()
s.mainLoop(40)
| 22.258065
| 98
| 0.588406
| 90
| 690
| 4.422222
| 0.433333
| 0.100503
| 0.105528
| 0.128141
| 0.128141
| 0.128141
| 0.128141
| 0.128141
| 0
| 0
| 0
| 0.042857
| 0.289855
| 690
| 30
| 99
| 23
| 0.769388
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0.181818
| 0.272727
| 0
| 0.590909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
0a750b96f7d83d3d539bea6b3d201533cd437b4f
| 832
|
py
|
Python
|
redirink/insights/tests/test_models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/insights/tests/test_models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | null | null | null |
redirink/insights/tests/test_models.py
|
Egor4ik325/redirink
|
17ef85f48145ee6112f2fcbab60dcd9d65ba78bf
|
[
"MIT"
] | 1
|
2021-12-31T00:46:31.000Z
|
2021-12-31T00:46:31.000Z
|
"""Test insight model is working the way it should."""
import pytest
from django.core.exceptions import ValidationError
from django.db import DataError
from .factories import InsightFactory
pytestmark = pytest.mark.django_db
def test_create_new_fake_visitor_instance_using_factory(visitor):
pass
def test_create_new_instance_using_model_factory(insight):
pass
def test_fake_instance_is_valid(insight):
# Should not raise ValidationError
insight.full_clean()
def test_fake_instance_have_right_fields(insight):
assert isinstance(insight.id, int)
assert insight.time is not None
def test_invalid_ip_address():
with pytest.raises(DataError):
InsightFactory(visitor__ip_address="invalid ip")
def test_valid_fake_ip_v6_address(faker):
InsightFactory(visitor__ip_address=faker.ipv6())
| 23.111111
| 65
| 0.796875
| 114
| 832
| 5.482456
| 0.464912
| 0.0672
| 0.0416
| 0.0512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002782
| 0.135817
| 832
| 35
| 66
| 23.771429
| 0.866481
| 0.098558
| 0
| 0.105263
| 0
| 0
| 0.013441
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.315789
| false
| 0.105263
| 0.210526
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
0a78179deb3bba9140ba6fad7537f792839802d1
| 826
|
py
|
Python
|
compyle/api.py
|
nauaneed/compyle
|
218c76de8aa684e1fb198072e40cb97a5e6845b3
|
[
"BSD-3-Clause"
] | null | null | null |
compyle/api.py
|
nauaneed/compyle
|
218c76de8aa684e1fb198072e40cb97a5e6845b3
|
[
"BSD-3-Clause"
] | null | null | null |
compyle/api.py
|
nauaneed/compyle
|
218c76de8aa684e1fb198072e40cb97a5e6845b3
|
[
"BSD-3-Clause"
] | null | null | null |
from .array import Array, wrap
from .ast_utils import (get_symbols, get_assigned,
get_unknown_names_and_calls, has_return, has_node)
from .config import get_config, set_config, use_config, Config
from .cython_generator import (
CythonGenerator, get_func_definition
)
from .ext_module import ExtModule
from .extern import Extern
from .low_level import Kernel, LocalMem, Cython, cast
from .parallel import (
Elementwise, Reduction, Scan, elementwise
)
from .profile import (
get_profile_info, named_profile, profile, profile_ctx, print_profile,
profile_kernel, ProfileContext, profile2csv
)
from .translator import (
CConverter, CStructHelper, OpenCLConverter, detect_type, ocl_detect_type,
py2c
)
from .types import KnownType, annotate, declare
from .utils import ArgumentParser
| 34.416667
| 77
| 0.77724
| 103
| 826
| 5.980583
| 0.553398
| 0.043831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002886
| 0.161017
| 826
| 23
| 78
| 35.913043
| 0.886003
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.521739
| 0
| 0.521739
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
0a8392531b265c3630ab7efd862cf9bb543e8116
| 126
|
py
|
Python
|
py_tdlib/constructors/get_chat_member.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/get_chat_member.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/get_chat_member.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Method
class getChatMember(Method):
chat_id = None # type: "int53"
user_id = None # type: "int32"
| 18
| 32
| 0.690476
| 17
| 126
| 5
| 0.764706
| 0.141176
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.190476
| 126
| 6
| 33
| 21
| 0.794118
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0a8510d776fffba4b52eff1b8a24d1b7d723d4dd
| 1,836
|
py
|
Python
|
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/csslo/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...lo.xml.attribute import Attribute as Attribute
from ...lo.xml.attribute_container import AttributeContainer as AttributeContainer
from ...lo.xml.attribute_data import AttributeData as AttributeData
from ...lo.xml.export_filter import ExportFilter as ExportFilter
from ...lo.xml.fast_attribute import FastAttribute as FastAttribute
from ...lo.xml.import_filter import ImportFilter as ImportFilter
from ...lo.xml.namespace_container import NamespaceContainer as NamespaceContainer
from ...lo.xml.para_user_defined_attributes_supplier import ParaUserDefinedAttributesSupplier as ParaUserDefinedAttributesSupplier
from ...lo.xml.text_user_defined_attributes_supplier import TextUserDefinedAttributesSupplier as TextUserDefinedAttributesSupplier
from ...lo.xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier
from ...lo.xml.x_export_filter import XExportFilter as XExportFilter
from ...lo.xml.x_import_filter import XImportFilter as XImportFilter
from ...lo.xml.x_import_filter2 import XImportFilter2 as XImportFilter2
from ...lo.xml.xml_export_filter import XMLExportFilter as XMLExportFilter
from ...lo.xml.xml_import_filter import XMLImportFilter as XMLImportFilter
| 57.375
| 130
| 0.827887
| 242
| 1,836
| 6.169421
| 0.417355
| 0.060281
| 0.090422
| 0.036169
| 0.091762
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007317
| 0.106754
| 1,836
| 31
| 131
| 59.225806
| 0.903049
| 0.313725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
0a91aa29e60075c1d841a9fa42cfbeabf426976a
| 2,225
|
py
|
Python
|
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/__init__.py
|
kkahatapitiya/pytorch-image-models
|
94f9d54ac22354f3cf7ada9a7304ac97143deb14
|
[
"Apache-2.0"
] | null | null | null |
from .activations import *
from .adaptive_avgmax_pool import \
adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
from .blur_pool import BlurPool2d
from .classifier import ClassifierHead, create_classifier
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from .conv2d_same import Conv2dSame, conv2d_same
from .conv_bn_act import ConvBnAct
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_attn import get_attn, create_attn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
from .evo_norm import EvoNormBatch2d, EvoNormSample2d
from .gather_excite import GatherExcite
from .global_context import GlobalContext
from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible
from .inplace_abn import InplaceAbn
from .involution import Involution
from .linear import Linear
from .mixed_conv2d import MixedConv2d
from .mlp import Mlp, GluMlp, GatedMlp, ConvMlpGeneral, ConvMlpGeneralv2
from .non_local_attn import NonLocalAttn, BatNonLocalAttn
from .norm import GroupNorm, LayerNorm2d
from .norm_act import BatchNormAct2d, GroupNormAct
from .padding import get_padding, get_same_padding, pad_same
from .patch_embed import PatchEmbed
from .pool2d_same import AvgPool2dSame, create_pool2d
from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
from .selective_kernel import SelectiveKernel
from .separable_conv import SeparableConv2d, SeparableConvBnAct
from .space_to_depth import SpaceToDepthModule
from .split_attn import SplitAttn
from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
from .test_time_pool import TestTimePoolHead, apply_test_time_pool
from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_
| 54.268293
| 105
| 0.865169
| 288
| 2,225
| 6.368056
| 0.447917
| 0.019084
| 0.008724
| 0.015267
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016377
| 0.094382
| 2,225
| 40
| 106
| 55.625
| 0.893797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.95
| 0
| 0.95
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
0a94bf94b86f2d0bf5c868fffdac3fa72c685955
| 19,040
|
py
|
Python
|
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | null | null | null |
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | 1
|
2016-09-14T21:11:19.000Z
|
2016-09-14T21:11:19.000Z
|
cfgov/ask_cfpb/tests/test_views.py
|
atuggle/cfgov-refresh
|
5a9cfd92b460b9be7befb39f5845abf56857aeac
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import unicode_literals
import json
from django.apps import apps
from django.core.urlresolvers import NoReverseMatch, reverse
from django.http import Http404, HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.utils import timezone
from wagtail.wagtailcore.models import Site
from wagtailsharing.models import SharingSite
import mock
from model_mommy import mommy
from ask_cfpb.models import ENGLISH_PARENT_SLUG, SPANISH_PARENT_SLUG
from ask_cfpb.views import annotate_links, ask_search, redirect_ask_search
from v1.util.migrations import get_or_create_page
now = timezone.now()
class AnswerPagePreviewCase(TestCase):
def setUp(self):
from v1.models import HomePage
from ask_cfpb.models import Answer
self.ROOT_PAGE = HomePage.objects.get(slug='cfgov')
self.english_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Ask CFPB',
ENGLISH_PARENT_SLUG,
self.ROOT_PAGE,
language='en',
live=True)
self.spanish_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Obtener respuestas',
SPANISH_PARENT_SLUG,
self.ROOT_PAGE,
language='es',
live=True)
self.test_answer = mommy.make(
Answer,
answer="Test answer.",
question="Test question.",
slug='test-question',
update_english_page=True,
update_spanish_page=False)
self.site = mommy.make(
Site,
root_page=self.ROOT_PAGE,
hostname='localhost',
port=8000,
is_default_site=True)
self.sharing_site = mommy.make(
SharingSite,
site=self.site,
hostname='preview.localhost',
port=8000)
@mock.patch('ask_cfpb.views.ServeView.serve_latest_revision')
def test_preview_page(self, mock_serve):
from ask_cfpb.views import view_answer
page = self.test_answer.english_page
revision = page.save_revision()
revision.publish()
test_request = HttpRequest()
test_request.META['SERVER_NAME'] = 'preview.localhost'
test_request.META['SERVER_PORT'] = 8000
view_answer(
test_request, 'test-question', 'en', self.test_answer.pk)
self.assertEqual(mock_serve.call_count, 1)
def test_answer_page_not_live(self):
from ask_cfpb.views import view_answer
page = self.test_answer.english_page
page.live = False
page.save()
test_request = HttpRequest()
with self.assertRaises(Http404):
view_answer(
test_request,
'test-question',
'en',
self.test_answer.pk)
class AnswerViewTestCase(TestCase):
def setUp(self):
from v1.models import HomePage
self.ROOT_PAGE = HomePage.objects.get(slug='cfgov')
self.english_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Ask CFPB',
ENGLISH_PARENT_SLUG,
self.ROOT_PAGE,
language='en',
live=True)
self.spanish_parent_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerLandingPage',
'Obtener respuestas',
SPANISH_PARENT_SLUG,
self.ROOT_PAGE,
language='es',
live=True)
def test_annotate_links(self):
mock_answer = (
'<p>Answer with a <a href="http://fake.com">fake link.</a></p>')
(annotated_answer, links) = annotate_links(mock_answer)
self.assertEqual(
annotated_answer,
'<html><body><p>Answer with a <a href="http://fake.com">fake '
'link.</a><sup>1</sup></p></body></html>')
self.assertEqual(links, [(1, str('http://fake.com'))])
def test_annotate_links_no_href(self):
mock_answer = (
'<p>Answer with a <a>fake link.</a></p>')
(annotated_answer, links) = annotate_links(mock_answer)
self.assertEqual(links, [])
def test_annotate_links_no_site(self):
site = Site.objects.get(is_default_site=True)
site.is_default_site = False
site.save()
with self.assertRaises(RuntimeError) as context:
annotate_links('answer')
self.assertIn('no default wagtail site', str(context.exception))
def test_bad_language_search(self):
with self.assertRaises(NoReverseMatch):
self.client.get(reverse(
'ask-search-en',
kwargs={'language': 'zz'}), {'q': 'payday'})
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_en_search_results_page_not_created(self, mock_filter):
mock_queryset = mock.Mock()
mock_queryset.count.return_value = 0
mock_filter.return_value = [mock_queryset]
response = self.client.get(reverse(
'ask-search-en'), {'q': 'payday'})
self.assertEqual(mock_filter.call_count, 1)
self.assertTrue(mock_filter.called_with(language='en', q='payday'))
self.assertEqual(response.status_code, 404)
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.ROOT_PAGE,
language='en')
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_sqs_instance = mock_sqs.return_value.models.return_value
mock_sqs_instance.filter.return_value = mock_queryset
mock_sqs_instance.spelling_suggestion.return_value = 'payday'
response = self.client.get(reverse(
'ask-search-en'), {'q': 'payday'})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context_data['page'],
mock_page)
self.assertEqual(
response.context_data['page'].suggestion,
None)
self.assertEqual(mock_sqs_instance.filter.call_count, 1)
self.assertTrue(mock_sqs_instance.filter.called_with(
language='en', q='payday'))
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search_no_term(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.ROOT_PAGE,
language='en')
response = self.client.get(reverse(
'ask-search-en'), {'q': ''})
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context_data['page'],
mock_page)
self.assertEqual(
response.context_data['page'].query,
'')
self.assertEqual(
response.context_data['page'].result_query,
'')
@override_settings(FLAGS={'ASK_SEARCH_TYPOS': {'boolean': True}})
@mock.patch('ask_cfpb.views.SearchQuerySet')
def test_en_search_suggestion(self, mock_sqs):
from v1.util.migrations import get_or_create_page
mock_page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 0
mock_sqs_instance = mock_sqs.return_value.models.return_value
mock_sqs_instance.filter.return_value = mock_queryset
mock_sqs_instance.spelling_suggestion.return_value = 'payday'
response = self.client.get(reverse(
'ask-search-en'), {'q': 'paydya'})
self.assertEqual(response.status_code, 200)
response_page = response.context_data['page']
self.assertEqual(response_page, mock_page)
self.assertEqual(response_page.suggestion, 'paydya')
self.assertEqual(response_page.result_query, 'payday')
self.assertEqual(response_page.query, 'paydya')
@mock.patch('ask_cfpb.views.redirect_ask_search')
def test_ask_search_encounters_facets(self, mock_redirect):
request = HttpRequest()
request.GET['selected_facets'] = 'category_exact:my_category'
ask_search(request)
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.redirect')
def test_redirect_ask_search_passes_query_string(self, mock_redirect):
request = HttpRequest()
request.GET['q'] = 'hoodoo'
redirect_ask_search(request)
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.redirect')
def test_spanish_redirect_ask_search_passes_query_string(
self, mock_redirect):
request = HttpRequest()
request.GET['selected_facets'] = 'category_exact:my_categoria'
redirect_ask_search(request, language='es')
self.assertEqual(mock_redirect.call_count, 1)
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_es_search(self, mock_filter):
get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock Spanish results page',
'respuestas',
self.spanish_parent_page,
language='es',
live=True)
mock_return = mock.Mock()
mock_return.url = 'mockcfpb.gov'
mock_return.autocomplete = 'A mock question'
mock_return.text = 'Mock answer text.'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-es', kwargs={'language': 'es'}), {'q': 'payday'})
self.assertEqual(mock_filter.call_count, 1)
self.assertTrue(mock_filter.called_with(language='es', q='payday'))
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_search_page_en_selection(self, mock_filter):
page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = 'url'
mock_return.autocomplete = 'question text'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-en'), {'q': 'tuition'})
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(page.language, 'en')
self.assertEqual(page.answers, [])
self.assertEqual(
page.get_template(HttpRequest()),
'ask-cfpb/answer-search-results.html')
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_search_page_es_selection(self, mock_filter):
page = get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock Spanish results page',
'respuestas',
self.spanish_parent_page,
language='es',
live=True)
mock_return = mock.Mock()
mock_return.url = 'url'
mock_return.autocomplete = 'question text'
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
self.client.get(reverse(
'ask-search-es', kwargs={'language': 'es'}), {'q': 'hipotecas'})
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(page.language, 'es')
self.assertEqual(page.answers, [])
self.assertEqual(
page.get_template(HttpRequest()),
'ask-cfpb/answer-search-spanish-results.html')
@mock.patch('ask_cfpb.views.SearchQuerySet.filter')
def test_json_response(self, mock_filter):
get_or_create_page(
apps,
'ask_cfpb',
'AnswerResultsPage',
'Mock results page',
'ask-cfpb-search-results',
self.english_parent_page,
language='en',
live=True)
mock_return = mock.Mock()
mock_return.url = "inscisive_url.com"
mock_return.autocomplete = "inscisive question"
mock_return.text = "inscisive text"
mock_queryset = mock.Mock()
mock_queryset.__iter__ = mock.Mock(return_value=iter([mock_return]))
mock_queryset.count.return_value = 1
mock_filter.return_value = mock_queryset
response = self.client.get(reverse(
'ask-search-en-json',
kwargs={'as_json': 'json'}), {'q': 'tuition'})
self.assertEqual(response.status_code, 200)
self.assertEqual(mock_filter.call_count, 1)
self.assertEqual(json.loads(response.content)['query'], 'tuition')
def test_autocomplete_en_blank_term(self):
result = self.client.get(reverse(
'ask-autocomplete-en'), {'term': ''})
output = json.loads(result.content)
self.assertEqual(output, [])
def test_autocomplete_es_blank_term(self):
result = self.client.get(reverse(
'ask-autocomplete-es',
kwargs={'language': 'es'}), {'term': ''})
output = json.loads(result.content)
self.assertEqual(output, [])
@mock.patch('ask_cfpb.views.SearchQuerySet.autocomplete')
def test_autocomplete_en(self, mock_autocomplete):
mock_search_result = mock.Mock()
mock_search_result.autocomplete = 'question'
mock_search_result.url = 'url'
mock_autocomplete.return_value = [mock_search_result]
result = self.client.get(reverse(
'ask-autocomplete-en'), {'term': 'question'})
self.assertEqual(mock_autocomplete.call_count, 1)
output = json.loads(result.content)
self.assertEqual(
sorted(output[0].keys()),
['question', 'url'])
@mock.patch('ask_cfpb.views.SearchQuerySet.autocomplete')
def test_autocomplete_es(self, mock_autocomplete):
mock_search_result = mock.Mock()
mock_search_result.autocomplete = 'question'
mock_search_result.url = 'url'
mock_autocomplete.return_value = [mock_search_result]
result = self.client.get(reverse(
'ask-autocomplete-es',
kwargs={'language': 'es'}), {'term': 'question'})
self.assertEqual(mock_autocomplete.call_count, 1)
output = json.loads(result.content)
self.assertEqual(
sorted(output[0].keys()),
['question', 'url'])
class RedirectAskSearchTestCase(TestCase):
def test_redirect_search_no_facets(self):
request = HttpRequest()
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_blank_facets(self):
request = HttpRequest()
request.GET['selected_facets'] = ''
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_no_query(self):
request = HttpRequest()
request.GET['q'] = ' '
with self.assertRaises(Http404):
redirect_ask_search(request)
def test_redirect_search_with_category(self):
category_querystring = (
'selected_facets=category_exact:my_category'
'&selected_facets=category_exact:my_category2'
'&selected_facets=audience_exact:Older+Americans'
'&selected_facets=audience_exact:my_audience2'
'&selected_facets=tag_exact:mytag1'
'&selected_facets=tag_exact:mytag2')
request = HttpRequest()
request.GET = QueryDict(category_querystring)
result = redirect_ask_search(request)
self.assertEqual(result.get('location'),
'/ask-cfpb/category-my_category/')
def test_redirect_search_with_audience(self):
audience_querystring = (
'selected_facets=audience_exact:Older+Americans'
'&selected_facets=audience_exact:my_audience2')
request = HttpRequest()
request.GET = QueryDict(audience_querystring)
result = redirect_ask_search(request)
self.assertEqual(
result.get('location'),
'/ask-cfpb/audience-older-americans/')
def test_spanish_redirect_search_with_tag(self):
target_tag = 'spanishtag1'
tag_querystring = (
'selected_facets=tag_exact:{}'
'&selected_facets=tag_exact:spanishtag2'.format(target_tag))
request = HttpRequest()
request.GET = QueryDict(tag_querystring)
result = redirect_ask_search(request, language='es')
self.assertEqual(
result.get('location'),
'/es/obtener-respuestas/buscar-por-etiqueta/{}/'.format(
target_tag))
def test_english_redirect_search_with_tag(self):
target_tag = 'englishtag1'
tag_querystring = (
'selected_facets=tag_exact:{}'
'&selected_facets=tag_exact:englishtag2'.format(target_tag))
request = HttpRequest()
request.GET = QueryDict(tag_querystring)
result = redirect_ask_search(request, language='en')
self.assertEqual(
result.get('location'),
'/ask-cfpb/search-by-tag/{}/'.format(
target_tag))
def test_redirect_search_with_unrecognized_facet_raises_404(self):
querystring = \
'sort=-updated_at&selected_facets=imtkfidycqszgfdb&page=60'
request = HttpRequest()
request.GET = QueryDict(querystring)
with self.assertRaises(Http404):
redirect_ask_search(request)
| 37.628458
| 76
| 0.624475
| 2,101
| 19,040
| 5.383627
| 0.099952
| 0.05835
| 0.018036
| 0.019892
| 0.778888
| 0.736893
| 0.708956
| 0.688799
| 0.668376
| 0.636902
| 0
| 0.006301
| 0.266439
| 19,040
| 505
| 77
| 37.70297
| 0.803537
| 0
| 0
| 0.634146
| 0
| 0.004435
| 0.163603
| 0.07584
| 0
| 0
| 0
| 0
| 0.121951
| 1
| 0.068736
| false
| 0.004435
| 0.04878
| 0
| 0.124169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0a96db7bc8255b1b1b651c9085fc3a06e4243461
| 1,753
|
py
|
Python
|
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/regression/tls/tst_u_tls_vs_u_ens_03.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from mmtbx.tls import tools
import math
import time
pdb_str_1 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 3.000 0.000 0.000 1.00 0.00 C
"""
pdb_str_2 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 3.000 0.000 1.00 0.00 C
"""
pdb_str_3 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 0.000 0.000 3.000 1.00 0.00 C
"""
pdb_str_4 = """
CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P1
ATOM 1 CA THR A 6 0.000 0.000 0.000 1.00 0.00 C
ATOM 1 CA THR B 6 1.000 2.000 3.000 1.00 0.00 C
"""
def exercise_03():
sqrt = math.sqrt
vs = []
vs.append( [(sqrt(2)/2, sqrt(2)/2, 0), (-sqrt(2)/2, sqrt(2)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(2)/2, sqrt(2)/2), (0, -sqrt(2)/2, sqrt(2)/2)] )
vs.append( [(sqrt(3)/2, 1/2, 0), (-1/2, sqrt(3)/2, 0), (0,0,1)] )
vs.append( [(1,0,0), (0, sqrt(3)/2, 1/2), (0, -1/2, sqrt(3)/2)] )
for pdb_str in [pdb_str_1, pdb_str_2, pdb_str_3, pdb_str_4]:
for vs_ in vs:
vx,vy,vz = vs_
print vx,vy,vz
tools.u_tls_vs_u_ens(pdb_str=pdb_str,
tx=0.05,ty=0.07,tz=0.09,
vx=vx, vy=vy, vz=vz,
n_models=1000)
if (__name__ == "__main__"):
t0 = time.time()
exercise_03()
print "Time: %6.4f"%(time.time()-t0)
print "OK"
| 34.372549
| 79
| 0.498003
| 369
| 1,753
| 2.257453
| 0.157182
| 0.086435
| 0.10084
| 0.096038
| 0.618247
| 0.617047
| 0.617047
| 0.617047
| 0.57623
| 0.57623
| 0
| 0.309359
| 0.3417
| 1,753
| 50
| 80
| 35.06
| 0.412478
| 0
| 0
| 0.272727
| 0
| 0.181818
| 0.507131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.090909
| null | null | 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0aa458014e027a9ad777515ef9c0b45d42da4384
| 93
|
py
|
Python
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | 6
|
2021-11-09T11:00:56.000Z
|
2022-01-14T03:44:52.000Z
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | 4
|
2022-03-28T23:39:23.000Z
|
2022-03-28T23:39:24.000Z
|
archiveis/__init__.py
|
palewire/archiveis
|
11b2f1a4be4e7fbdcd52d874733cf20bc2d4f480
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from .api import capture
__version__ = "0.0.7"
__all__ = ("capture",)
| 15.5
| 24
| 0.677419
| 14
| 93
| 3.928571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 0.139785
| 93
| 5
| 25
| 18.6
| 0.65
| 0.215054
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
0aa514fa3ff45ce4defbd248dad8a995955378b1
| 188
|
py
|
Python
|
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
edmundbotadder/cogs/webhook.py
|
thebeanogamer/edmund-botadder
|
91e71ce572f3206b99e1f7a68d40bc37b947daf5
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Bot, Cog
class Webhook(Cog):
"""
Webhook functionality
"""
def __init__(self, bot: Bot):
self.bot = bot
def setup(bot):
bot.add_cog(Webhook(bot))
| 15.666667
| 41
| 0.696809
| 28
| 188
| 4.5
| 0.535714
| 0.142857
| 0.15873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 188
| 12
| 42
| 15.666667
| 0.797468
| 0.111702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
0ab73f13b7fac0bb315926410cb3d00950f04053
| 114
|
py
|
Python
|
classes/settings.py
|
johnyburd/glucometer
|
075a48cff38e0570960fc2b8968bcb8b5ddd647f
|
[
"MIT"
] | 12
|
2016-11-02T09:15:32.000Z
|
2021-04-08T18:42:01.000Z
|
classes/settings.py
|
johnyburd/glucometer
|
075a48cff38e0570960fc2b8968bcb8b5ddd647f
|
[
"MIT"
] | null | null | null |
classes/settings.py
|
johnyburd/glucometer
|
075a48cff38e0570960fc2b8968bcb8b5ddd647f
|
[
"MIT"
] | 3
|
2018-10-18T15:59:57.000Z
|
2021-01-20T21:03:48.000Z
|
def init():
global brightness
global calibration_mode
brightness = 500
calibration_mode = False
| 14.25
| 28
| 0.692982
| 12
| 114
| 6.416667
| 0.666667
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.263158
| 114
| 7
| 29
| 16.285714
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0abdfc5e117d17fbbf96aa6e5e9c1b706bacee2c
| 95
|
py
|
Python
|
interface/app/__init__.py
|
caglorithm/accel
|
7fe5c13ea9559565c599633bdb3318c8fbc57088
|
[
"MIT"
] | 31
|
2019-12-07T01:27:19.000Z
|
2021-12-19T08:12:18.000Z
|
interface/app/__init__.py
|
caglorithm/accel
|
7fe5c13ea9559565c599633bdb3318c8fbc57088
|
[
"MIT"
] | null | null | null |
interface/app/__init__.py
|
caglorithm/accel
|
7fe5c13ea9559565c599633bdb3318c8fbc57088
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__, static_folder='static')
from app import routes
| 15.833333
| 45
| 0.778947
| 14
| 95
| 4.928571
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147368
| 95
| 5
| 46
| 19
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0.063158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
0ac44ba5690cb44ecf9e208ad61f69b8762610fd
| 634
|
py
|
Python
|
tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | 4
|
2015-10-10T00:30:55.000Z
|
2020-07-27T19:45:54.000Z
|
tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py
|
tedye/leetcode
|
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @param {integer} sum
# @return {boolean}
def hasPathSum(self, root, sum):
if not root: return False
if not root.right and not root.left:
return sum == root.val
r = False
l = False
if root.right:
r = self.hasPathSum(root.right,sum-root.val)
if root.left:
l = self.hasPathSum(root.left,sum-root.val)
return r or l
| 634
| 634
| 0.545741
| 83
| 634
| 4.120482
| 0.373494
| 0.061404
| 0.087719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.351735
| 634
| 1
| 634
| 634
| 0.832117
| 0.970032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0acd83639363e1e8109b480a9d0f9a0898831b8f
| 54,720
|
py
|
Python
|
tests/python/relay/test_op_level2.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
tests/python/relay/test_op_level2.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 4
|
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
tests/python/relay/test_op_level2.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 operator test cases.
"""
import numpy as np
import tvm
from tvm import autotvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import ctx_list, run_infer_type
from tvm.contrib import util
import topi.testing
def test_conv1d_infer_type():
# symbolic in batch dimension
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.ty.TensorType((n, c, w), "float32"))
w = relay.var("w")
y = relay.nn.conv1d(x, w,
kernel_size=3,
padding=(1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3), "float32")
# infer by shape of w, mixed precision
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222), "int32")
# Infer with NWC
n, c, w = 4, 32, 224
x = relay.var("x", relay.TensorType((n, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv1d(x, wt,
kernel_size=3,
padding=(1, 1),
channels=16,
data_layout="NWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, w, 16), "int32")
def test_conv1d_run():
def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
dilation=1,
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv1d(x, w,
padding=padding,
dilation=dilation,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = topi.testing.conv1d_ncw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation)
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv1d
dshape = (1, 3, 224)
kshape = (10, 3, 3)
run_test_conv1d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3)
# mixed precision
run_test_conv1d("int8", "int32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3)
# dilated conv2d
dshape = (1, 3, 18)
kshape = (10, 3, 3)
run_test_conv1d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=3, dilation=3)
def test_conv2d_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222), "int32")
# Infer with a different layout
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n//4, c//4, h, w, 4, 4), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NCHW4n4c",
kernel_layout="OIHW4o4i",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4, 224, 224, 4, 4), "int32")
assert yy.args[1].checked_type == relay.TensorType(
(4, 8, 3, 3, 4, 4), "int8")
# Infer with NHWC
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(x, wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NHWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, h, w, 16), "int32")
def test_conv2d_run():
def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
fref=None,
groups=1,
dilation=(1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
mod = tvm.relay.Module()
mod["main"] = func
test_schedule='{"i": ["llvm -device=arm_cpu", "topi_nn_depthwise_conv2d_nchw", \
[["TENSOR", [1, 512, 32, 32], "float32"], \
["TENSOR", [512, 1, 3, 3], "float32"], \
[1, 1], [1, 1], [1, 1], "float32"], {}, \
["depthwise_conv2d_nchw", [1, 512, 32, 32, "float32"], \
[512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \
{"i": 743640, "t": "contrib_spatial_pack", "c": null, \
"e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \
["tile_ow", "sp", [1, 8]], \
["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \
["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \
["ann_reduce", "an", ["unroll", "none"]], \
["ann_spatial", "an", ["unroll", "unroll", "vec"]], \
["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \
["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \
0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}'
temp = util.tempdir()
with open(temp.relpath("temp.log"), "w") as log_file:
log_file.write(test_schedule)
with autotvm.apply_history_best(temp.relpath("temp.log")):
with relay.build_config(opt_level=3):
print('Compiling...')
graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu")
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3),
fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw(
x, w, (1, 1), "SAME"))
# depthwise conv2d for arm_cpu
dshape = (1, 512, 32, 32)
kshape = (512, 1, 3, 3)
compile_test_conv2d_arm_cpu("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=512,
groups=512, kernel_size=(3 ,3))
# CUDA is disabled for 'direct' schedule:
# https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553
# group conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3),
except_targets=['cuda'])
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3),
except_targets=['cuda'])
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
# mixed precision
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3))
kshape = (10, 3, 1, 3)
# mixed precision.
run_test_conv2d("int8", "int32", 1, dshape, kshape,
padding=(0, 1), channels=10, kernel_size=(1 ,3))
# dilated conv2d
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3))
def test_conv2d_winograd():
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.template_key = 'winograd'
cfg.is_fallback = False
cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1])
cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500)
cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
mod = relay.Module()
mod['main'] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = topi.testing.conv2d_nchw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding,
groups=groups)
with WinogradFallback(), relay.build_config(opt_level=3):
for target, ctx in ctx_list():
if target != 'cuda':
continue
params = {'w': tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=target, params=params)
module = tvm.contrib.graph_runtime.create(graph, lib, ctx)
module.set_input('x', tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3)
# normal winograd: stride 1, padding 1, kernel 3x3
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(1, 1), channels=192, kernel_size=(3, 3))
# extended winograd: stride 1, padding N, kernel 3x3
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(0, 0), channels=192, kernel_size=(3, 3))
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(2, 2), channels=192, kernel_size=(3, 3))
# extended winograd: stride 1, padding N, kernel NxN
kshape = (192, 80, 7, 7)
run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape,
padding=(2, 2), channels=192, kernel_size=(7, 7))
def test_conv3d_infer_type():
# symbolic in batch dimension
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv3d(x, w,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(2, 10, 3, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert "out_dtype=\"int32\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 2, 222, 222, 222), "int32")
# Infer with NDHWC
n, c, d, h, w = 4, 32, 224, 224, 224
x = relay.var("x", relay.TensorType((n, d, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv3d(x, wt,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=16,
data_layout="NDHWC",
out_dtype="int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, d, h, w, 16), "int32")
def test_conv3d_run():
def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding,
groups=groups)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 3, 5, 224, 224)
kshape = (10, 3, 3, 3, 3)
run_test_conv3d("float32", "float32", 1, dshape, kshape,
padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3))
def test_conv3d_ndhwc_run():
def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w,
padding=padding,
dilation=dilation,
groups=groups,
data_layout="NDHWC", kernel_layout="DHWIO",
**attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = topi.testing.conv3d_ndhwc_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, ctx in ctx_list():
if target in except_targets:
continue
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 5, 224, 224, 6)
kshape = (3, 3, 3, 6, 10)
run_test_conv3d("float32", "float32", 1, dshape, kshape,
padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=["cuda"])
def test_conv2d_transpose_infer_type():
# symbolic in batch dimension
n, c, h, w = tvm.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=15)
assert "channels=15" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType(
(10, 15, 3, 3), "float32")
# infer by shape of w, mixed precision
n, h, w, c = tvm.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w,
output_padding=(1, 1),
channels=11,
data_layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 15, 15, 11), "float32")
def test_conv2d_transpose_nchw_run():
dshape = (1, 3, 18, 18)
kshape = (3, 10, 3, 3)
oshape = (1, 10, 37, 37)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv2d_transpose(x, w,
channels=10, kernel_size=(3,3), strides=(2,2),
padding=(1,1), output_padding=(2, 2))
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
c_np = topi.testing.conv2d_transpose_nchw_python(
data, kernel, 2, 1)
d_np = np.zeros(shape=oshape)
d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np
ref_res = d_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_conv2d_transpose_nhwc_run():
dshape_nhwc = (1, 18, 18, 3)
kshape_hwoi = (3, 3, 10, 3)
oshape_nhwc = (1, 37, 37, 10)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w")
# kshape and kernel_layout should have swapped IO.
# kshape is HWOI and kernel_layout is HWIO
y = relay.nn.conv2d_transpose(x, w,
channels=10, kernel_size=(3, 3), strides=(2, 2),
padding=(1, 1), output_padding=(2, 2),
data_layout="NHWC", kernel_layout="HWIO")
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
# use true kshape layout here - HWOI
c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1)
d_np = np.zeros(shape=oshape_nhwc)
d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np
def test_conv1d_transpose_ncw_run():
dshape = (1, 3, 18)
kshape = (3, 10, 3)
oshape = (1, 10, 37)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv1d_transpose(x, w,
channels=10, kernel_size=(3,), strides=(2,),
padding=(1,), output_padding=(2,))
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
c_np = topi.testing.conv1d_transpose_ncw_python(
data, kernel, 2, 1)
d_np = np.zeros(shape=oshape)
d_np[:,:,0:c_np.shape[2]] = c_np
ref_res = d_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_upsampling_infer_type():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
scale = tvm.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
"method=\"BINLINEAR\"" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)),
tvm.expr.Cast("int32", tvm.round(w*scale))),
"float32")
n, c = tvm.size_var("n"), tvm.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
def test_upsampling3d_infer_type():
n, c, d, h, w = tvm.size_var("n"), tvm.size_var("c"),\
tvm.size_var("d"), tvm.size_var("h"), tvm.size_var("w")
scale = tvm.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(d*scale)),
tvm.expr.Cast("int32", tvm.round(h*scale)),
tvm.expr.Cast("int32", tvm.round(w*scale))),
"float32")
n, c = tvm.size_var("n"), tvm.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32"))
y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32")
def _test_pool2d(opfunc, reffunc):
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5))
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_pool2d_int(opfunc, reffunc, dtype):
n, c, h, w = tvm.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
# test execution
dtype = "int32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape, dtype=dtype)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.random_integers(low=-128, high=128, size=dshape)
ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
y = opfunc(x, layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
# test execution
dtype = "float32"
dshape = (1, 1024, 7, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2,3), keepdims=True)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_pool2d():
_test_pool2d(relay.nn.max_pool2d, np.max)
_test_pool2d(relay.nn.avg_pool2d, np.mean)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32')
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16')
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)
def test_pool1d():
def _test_pool1d(opfunc):
n, c, w = tvm.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x, pool_size=(1,))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,),
(0, 0), (1, 3, 16), pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool1d(relay.nn.max_pool1d)
_test_pool1d(relay.nn.avg_pool1d)
def test_pool3d():
def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)):
n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 32, 32, 32)
x = relay.var("x", shape=dshape)
pool_type = 'max' if 'max' in str(opfunc) else 'avg'
y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding)
func = relay.Function([x], y)
# check output shape
f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape))
assert out_shape == f_out_shape, \
"Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2),
padding, out_shape, pool_type, False)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool3d(relay.nn.max_pool3d)
_test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
_test_pool3d(relay.nn.avg_pool3d)
_test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16))
_test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20))
def test_avg_pool2d_no_count_pad():
kh, kw = (4, 4)
sh, sw = (2, 2)
ph, pw = (2, 2)
n = 1
(ic, ih, iw) = (3, 28, 28)
(oc, oh, ow) = (3, 15, 15)
dshape = (n, ic, ih, iw)
x = relay.var("x", shape=dshape)
y = relay.nn.avg_pool2d(x,
pool_size=(kh, kw),
strides=(sw, sw),
padding=(ph, pw),
count_include_pad=False)
func = relay.Function([x], y)
dtype = "float32"
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw)))
pad_np[np.ix_(*no_zero)] = a_np
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
for i in range(oh):
for j in range(ow):
pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw],
axis=(2,3)) / np.maximum(pad_count, 1)
ref_res = np.maximum(b_np, 0.0)
data = a_np
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
def test_flatten_infer_type():
d1, d2, d3, d4 = tvm.size_var("d1"), tvm.size_var("d2"), tvm.size_var("d3"), tvm.size_var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_pad_infer_type():
# entirely concrete case
n, c, h, w = 1, 2, 3, 4
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
"pad_width=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
# some symbolic values
n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def test_pad_run():
def _test_run(dtype):
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape)
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run('float32')
_test_run('int32')
def test_lrn():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75)
"alpha=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
size=5
axis=1
bias=0.5
alpha=.00001
beta=0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_l2_normalize():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", shape=(n, c , h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c , h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps=0.001
axis=1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = topi.testing.l2_normalize_python(x_data, eps, axis)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def batch_flatten(data):
shape = data.shape
target_dim = 1
for i in range(len(shape) - 1):
target_dim = target_dim * shape[i + 1]
return np.reshape(data, (shape[0], target_dim))
def test_batch_flatten():
t1 = relay.TensorType((5, 10, 5))
x = relay.Var("x", t1)
func = relay.Function([x], relay.nn.batch_flatten(x))
data = np.random.rand(5, 10, 5).astype(t1.dtype)
ref_res = batch_flatten(data)
for target, ctx in ctx_list():
intrp = relay.create_executor("graph", ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method, align_corners=False):
n, c, h, w = tvm.size_var("n"), 16, 32, 32
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w)))
else:
return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,
method=method, align_corners=align_corners)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout,
method=method, align_corners=align_corners)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout)
else:
ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)),
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling():
_test_upsampling("NCHW", "nearest_neighbor")
_test_upsampling("NCHW", "bilinear", True)
_test_upsampling("NHWC", "nearest_neighbor")
_test_upsampling("NHWC", "bilinear", True)
def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"):
n, c, d, h, w = tvm.size_var("n"), 8, 16, 16, 16
scale_d = 2.0
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCDHW":
return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\
int(round(w*scale_w)))
else:
return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\
int(round(w*scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\
layout=layout, method=method,\
coordinate_transformation_mode=coordinate_transformation_mode)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\
layout=layout, method=method,\
coordinate_transformation_mode=coordinate_transformation_mode)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
if method == "nearest_neighbor":
ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout)
else:
ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\
int(round(h*scale_h)),\
int(round(w*scale_w))), layout)
for target, ctx in ctx_list():
executor = relay.create_executor("graph", ctx=ctx, target=target)
out = executor.evaluate(func)(data)
tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5)
def test_upsampling3d():
_test_upsampling3d("NCDHW", "nearest_neighbor")
_test_upsampling3d("NCDHW", "trilinear", "align_corners")
_test_upsampling3d("NDHWC", "nearest_neighbor")
_test_upsampling3d("NDHWC", "trilinear", "align_corners")
def test_conv2d_int8_intrinsics():
def _compile(ic, oc, target, data_layout, kernel_layout, dtypes):
input_dtype, weight_dtype, output_dtype = dtypes
n, h, w, ch, cw = 1, 64, 64, 3, 3
if data_layout == 'NCHW':
data_shape = (n, ic, h, w)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
elif data_layout == 'NHWC':
data_shape = (n, h, w, ic)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
else:
raise ValueError('Not supported')
if kernel_layout == 'OIHW':
kernel_shape = (oc, ic, ch, cw)
elif kernel_layout == 'HWIO':
kernel_shape = (ch, cw, ic, oc)
else:
raise ValueError('Not supported')
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(x, weight,
kernel_size=(ch, cw),
channels=oc,
padding=(1, 1),
dilation=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=output_dtype)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
assembly = lib.get_source("asm")
return assembly
def _has_fast_int8_instructions(asm, target):
if 'skylake-avx512' in target:
return "pmaddubs" in asm
elif 'cascadelake' in target:
return "vpdpbusd" in asm
else:
assert False, "Target should be Skylake or Cascadelake"
# compile conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
dtypes = ('uint8', 'int8', 'int32')
# Sweep the input channels to check int8 robustness
# Input channels should be a multiple of 4 internally.
for ic in [1, 4, 6]:
asm = _compile(ic=ic, oc=16, target=target, data_layout="NCHW",
kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
for ic in [1, 4, 6]:
asm = _compile(ic=ic, oc=16, target=target, data_layout="NHWC",
kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Sweep the output channels to check int8 robustness
# Output channels should be a multiple of 16 internally.
for oc in [4, 16, 20]:
asm = _compile(ic=8, oc=oc, target=target, data_layout="NCHW",
kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
for oc in [4, 16, 20]:
asm = _compile(ic=8, oc=oc, target=target, data_layout="NHWC",
kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Check that both non-divisible oc and ic work
asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Check that int8 x int8 goes through legalization so that fast instructions can be picked up.
for target in targets:
if llvm_version >= 8:
dtypes = (('int8', 'int8', 'int32'))
# Check that both non-divisible oc and ic work
asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
assert _has_fast_int8_instructions(asm, target)
# Ensure that code is generated when datatypes are not HW supported.
dtypes = ('uint8', 'uint8', 'int32')
asm = _compile(ic=16, oc=32, target=target, data_layout="NHWC", kernel_layout='HWIO',
dtypes=dtypes)
# Check that intrinisic is not present in the assembly.
assert not _has_fast_int8_instructions(asm, target)
# Check that a vectorized instruction is generated for older Intel
# generations, because we default to NCHWc layout.
target = "llvm -mcpu=core-avx2"
fast_int8_dtypes = ('uint8', 'int8', 'int32')
asm = _compile(ic=16, oc=32, target=target, data_layout="NCHW", kernel_layout='OIHW',
dtypes=fast_int8_dtypes)
# Check that vector int mult and add instructions are generated.
assert "vpmulld" in asm and "vpadd" in asm
def test_depthwise_conv2d_int8():
input_dtype = 'uint8'
weight_dtype = 'int8'
output_dtype = 'int32'
data_shape = (1, 64, 56, 56)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
kernel_shape = (64, 1, 3, 3)
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(x, weight,
kernel_size=(3, 3),
groups=64,
padding=(1, 1),
dilation=(1, 1),
out_dtype=output_dtype)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
def test_bitserial_conv2d_infer_type():
# Basic shape test with ambiguous batch.
n, c, h, w = tvm.size_var("n"), 32, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16"))
w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16"))
y = relay.nn.bitserial_conv2d(
x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, 32, 222, 222), "int16")
def test_bitpack_infer_type():
# Test axis packing shape inference.
o, i, h, w = 32, 32, 128, 128
x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16"))
y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(32, 2, 128, 128, 1), "uint16")
if __name__ == "__main__":
test_pool1d()
test_pool2d()
test_pool3d()
test_avg_pool2d_no_count_pad()
test_lrn()
test_l2_normalize()
test_conv1d_infer_type()
test_conv2d_infer_type()
test_conv3d_infer_type()
test_bitpack_infer_type()
test_upsampling_infer_type()
test_upsampling3d_infer_type()
test_flatten_infer_type()
test_pad_infer_type()
test_pad_run()
test_conv2d_transpose_infer_type()
test_conv2d_transpose_nchw_run()
test_conv2d_transpose_nhwc_run()
test_conv1d_transpose_ncw_run()
test_conv1d_run()
test_conv2d_run()
test_conv2d_winograd()
test_conv3d_run()
test_conv3d_ndhwc_run()
test_bitserial_conv2d_infer_type()
test_batch_flatten()
test_upsampling()
test_upsampling3d()
test_conv2d_int8_intrinsics()
test_depthwise_conv2d_int8()
| 43.052714
| 101
| 0.564126
| 7,620
| 54,720
| 3.899344
| 0.068766
| 0.019789
| 0.017871
| 0.019857
| 0.760374
| 0.72682
| 0.703396
| 0.686972
| 0.665803
| 0.645475
| 0
| 0.058336
| 0.286056
| 54,720
| 1,270
| 102
| 43.086614
| 0.70223
| 0.053162
| 0
| 0.574004
| 0
| 0.003795
| 0.049316
| 0.000967
| 0
| 0
| 0
| 0
| 0.085389
| 1
| 0.047438
| false
| 0
| 0.00759
| 0
| 0.065465
| 0.000949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0ad33111935325f80d27dfada02fe97074254f24
| 2,206
|
py
|
Python
|
qf_lib/containers/futures/future_contract.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 198
|
2019-08-16T15:09:23.000Z
|
2022-03-30T12:44:00.000Z
|
qf_lib/containers/futures/future_contract.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 13
|
2021-01-07T10:15:19.000Z
|
2022-03-29T13:01:47.000Z
|
qf_lib/containers/futures/future_contract.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 29
|
2019-08-16T15:21:28.000Z
|
2022-02-23T09:53:49.000Z
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
class FutureContract(object):
""" Class representing a single future contract.
The FutureContract is a simple class representing one futures contract. The FutureContract objects are used by the
FuturesChain, in order to provide the contracts chaining possibilities. It requires 3 parameters: ticker, which is
the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the
contract and a PricesDataFrame, containing dates with price field values.
Parameters
----------
ticker: Ticker
symbol of the future contract
exp_date: datetime
expiration date
data: PricesDataFrame
data frame containing dates with price fields values
"""
def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame):
self.ticker = ticker
self.exp_date = exp_date
self.data = data
def __str__(self):
return 'Contract: ticker: {}, expiration date: {}'.format(
self.ticker, self.exp_date)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, FutureContract):
return False
return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data)
def __hash__(self):
return hash((self.ticker, self.exp_date, self.data))
| 38.701754
| 118
| 0.704442
| 287
| 2,206
| 5.324042
| 0.466899
| 0.036649
| 0.034031
| 0.044503
| 0.051702
| 0.037958
| 0.037958
| 0
| 0
| 0
| 0
| 0.005851
| 0.225295
| 2,206
| 56
| 119
| 39.392857
| 0.887654
| 0.582956
| 0
| 0
| 0
| 0
| 0.048349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.157895
| 0.105263
| 0.684211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
0ae277577c0d9cf0180a37747d11d8dcd292baa5
| 57
|
py
|
Python
|
player.py
|
Drayux/Battlematus
|
1709a15b58d9274b99ec36eff1a181014d155037
|
[
"MIT"
] | null | null | null |
player.py
|
Drayux/Battlematus
|
1709a15b58d9274b99ec36eff1a181014d155037
|
[
"MIT"
] | null | null | null |
player.py
|
Drayux/Battlematus
|
1709a15b58d9274b99ec36eff1a181014d155037
|
[
"MIT"
] | null | null | null |
# PLAYER
class player:
def __init__(self):
| 9.5
| 23
| 0.561404
| 6
| 57
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.350877
| 57
| 5
| 24
| 11.4
| 0.756757
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
0ae3d125da916faaaf9490284cbbfda3ebc0f150
| 1,735
|
py
|
Python
|
soupy/approximations/taylor/backup/__init__.py
|
cpempire/soupy
|
9f65e3329fa126619c893daa4cd80478d83f840c
|
[
"MIT"
] | 1
|
2021-12-07T15:22:23.000Z
|
2021-12-07T15:22:23.000Z
|
soupy/approximations/taylor/backup/__init__.py
|
cpempire/soupy
|
9f65e3329fa126619c893daa4cd80478d83f840c
|
[
"MIT"
] | null | null | null |
soupy/approximations/taylor/backup/__init__.py
|
cpempire/soupy
|
9f65e3329fa126619c893daa4cd80478d83f840c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from .controlPDEProblem import ControlPDEProblem
from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE
from .costFunctionalConstant import CostFunctionalConstant
from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE
from .costFunctionalLinear import CostFunctionalLinear
from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE
from .costFunctionalQuadratic import CostFunctionalQuadratic
from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE
# from .chanceConstraintQuadratic import ChanceConstraintQuadratic
# from .chanceConstraintLinear import ChanceConstraintLinear
# from .chanceConstraintConstant import ChanceConstraintConstant
# to do list
# 0. implement zero, Hessian term
# 1. implement linear
# 2. implement quadratic
# 3. impelement SAA
# to do list
# 1. SAA does not run well in ccgo1, multiprocessor does not work,
### not clear bug, simplifing adjoint solver works
# 2. quadratic approximation does not converge well, even without variance, does not converge
### record eigenvector after m_tr[i].zero()
# 3. check gradient for quadratic + correction
# what to show tomorrow
# 1. variance reduction by mean square error
# 2. trace estimation by MC and randomized SVD
# 3. scaling with repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs
# 4. show the design and state, for both disk and submarine
# 5. random sample and state at different design
# April 9, 2018, work on reporting results
# 1. random samples and states at different design
# 2. table for variance reduction
# 3. plot trace estimation
# 4. plot #bfgs iterations
# obtain all results as planned
| 42.317073
| 93
| 0.821326
| 200
| 1,735
| 7.09
| 0.545
| 0.019746
| 0.011284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014599
| 0.131412
| 1,735
| 41
| 94
| 42.317073
| 0.926344
| 0.630548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
0afd820091335019ca4a87a89952513413136cc0
| 69
|
py
|
Python
|
src/metarl/tf/plotter/__init__.py
|
icml2020submission6857/metarl
|
9b66cefa2b6bcb6a38096d629ce8853b47c7171d
|
[
"MIT"
] | 2
|
2020-03-15T14:35:15.000Z
|
2021-02-15T16:38:00.000Z
|
src/metarl/tf/plotter/__init__.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
src/metarl/tf/plotter/__init__.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | 1
|
2020-02-24T03:04:23.000Z
|
2020-02-24T03:04:23.000Z
|
from metarl.tf.plotter.plotter import Plotter
__all__ = ['Plotter']
| 17.25
| 45
| 0.768116
| 9
| 69
| 5.444444
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 3
| 46
| 23
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
7c159cac6567c00ed5a82a064ec8c65b30f68447
| 1,595
|
py
|
Python
|
economist/migrations/0003_auto_20170406_1402.py
|
xingjianpan/news_reader_backend
|
c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9
|
[
"MIT"
] | 1
|
2017-11-01T02:12:24.000Z
|
2017-11-01T02:12:24.000Z
|
economist/migrations/0003_auto_20170406_1402.py
|
xingjianpan/news_reader_backend
|
c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9
|
[
"MIT"
] | null | null | null |
economist/migrations/0003_auto_20170406_1402.py
|
xingjianpan/news_reader_backend
|
c892e157460ef22720bfcbad5a7d2bfe9bcd4aa9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-06 06:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('economist', '0002_auto_20170406_1153'),
]
operations = [
migrations.AlterField(
model_name='article',
name='alternativename',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='fly_title',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='headline',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='project',
field=models.TextField(editable=False),
),
migrations.AlterField(
model_name='article',
name='source',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='article',
name='source_url',
field=models.URLField(editable=False),
),
migrations.AlterField(
model_name='article',
name='spider',
field=models.TextField(editable=False),
),
]
| 28.482143
| 58
| 0.552978
| 145
| 1,595
| 5.958621
| 0.358621
| 0.185185
| 0.231481
| 0.268519
| 0.689815
| 0.628472
| 0.582176
| 0.568287
| 0.445602
| 0.445602
| 0
| 0.030812
| 0.328527
| 1,595
| 55
| 59
| 29
| 0.77591
| 0.042633
| 0
| 0.645833
| 1
| 0
| 0.103018
| 0.015092
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
7c16097e2ba8634058cfc608cf9a3d535fa94016
| 2,051
|
py
|
Python
|
test/test_ethereum.py
|
coinplus-sa/coinplus-solo
|
e4f385a3d9eb7b72e14e397761fd9a113938917a
|
[
"MIT"
] | 1
|
2018-08-21T06:28:36.000Z
|
2018-08-21T06:28:36.000Z
|
test/test_ethereum.py
|
coinplus-sa/coinplus-solo
|
e4f385a3d9eb7b72e14e397761fd9a113938917a
|
[
"MIT"
] | 1
|
2019-05-30T06:23:41.000Z
|
2019-09-03T09:49:06.000Z
|
test/test_ethereum.py
|
coinplus-sa/coinplus-solo
|
e4f385a3d9eb7b72e14e397761fd9a113938917a
|
[
"MIT"
] | 1
|
2021-06-30T12:36:25.000Z
|
2021-06-30T12:36:25.000Z
|
import unittest
from coinplus_solo_redeem.common import wif_export_bitcoin, compute_public_key_sec256k1, address_from_publickey_ethereum
class TestEthereum(unittest.TestCase):
"""test of the bitcoin conversion from private key to wif"""
def setUp(self):
self.test_add_vector = [("03cb3e5f30245658e1e3615f1620e5b40f7d9016c0edb3611dd786327dd5e40caa", "0xfd965bB8907566c550D8C0325207a1cB744f2fc2"),
("03c2773e19b0cd4175832d781d521390e5aac7b0841904f93211bf114786f5a145", "0xDB1F8a8B668F15B9e696dDfF30Ce233703f9eC97"),
("0277c3757e791426b7fa43cf64197bfd5c2fe277ece721b12558a52729f6b68b8a", "0x6C4DCd1f900d89a7A70C9A5bA9F7a24a4Bd70878"),
("02d93dfcd93a76d7bac5b0fa394ad4bfd6cd92d10a64728b4b5f707d87db9cd2aa", "0x42F7C7ccD753055c219B85ddc5F05512b3f94528"),
("037049004c5ad576beb518dcc74506df3faf520109a489886b7d1435a63b9b0b88", "0x0af4DbEf58063AEd75e6fF57610348E55954E8FB"),
("0260bbacc03555af21f062ff04e9fbde36bcf0ae7396812d336e7f2e5292306f2b", "0xd13AA41456549AAf4F00C681e014E8CEd8c04d60"),
("0343710601de0710dd81a0b7102bf1b794809a330caf4e1b4ae6567923c00df6a5", "0x011934E5d9EE8C230BBFccF33Ab83c62E5486d91"),
("028c48ff458287f34cc1ad5c58a441500f8f315e9cabe34ff1601a5a0f791e4d0a", "0x98447B7aC721BDeb197a7e72780f6f41BECA2919"),
("0258cdabe1dad468dda6a7d62bee9e0cddadfe87d664e62df9143e769c017dd651", "0xaA5EacE5be0D09B09BAf66df62b0D85EA20b4ee4"),
("0289a6d2272382ceec291674530eebb1b05dadab88ebf1bc45569ba612a4e3973a", "0x79B4044CeB2DFAa123FbE5B4da43BF7cFF01718c")]
def test_address_testvector(self):
for publickey_hex, address_expected in self.test_add_vector:
publickey = bytearray.fromhex(publickey_hex)
address = address_from_publickey_ethereum(publickey)
self.assertEqual(address, address_expected)
| 85.458333
| 149
| 0.741589
| 91
| 2,051
| 16.461538
| 0.648352
| 0.014686
| 0.026702
| 0.037383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.406231
| 0.201853
| 2,051
| 23
| 150
| 89.173913
| 0.508858
| 0.026329
| 0
| 0
| 0
| 0
| 0.542441
| 0.542441
| 0
| 0
| 0.210949
| 0
| 0.052632
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.263158
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
7c1ee1ca0bd0d4b48cc0fd831915fd050efb4c03
| 7,323
|
py
|
Python
|
clients/kratos/python/test/test_v0alpha1_api.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_v0alpha1_api.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
clients/kratos/python/test/test_v0alpha1_api.py
|
kolotaev/sdk
|
0dda1becd70be8d7b9d678321ebe780c1ba00485
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.0-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_kratos_client
from ory_kratos_client.api.v0alpha1_api import V0alpha1Api # noqa: E501
class TestV0alpha1Api(unittest.TestCase):
"""V0alpha1Api unit test stubs"""
def setUp(self):
self.api = V0alpha1Api() # noqa: E501
def tearDown(self):
pass
def test_admin_create_identity(self):
"""Test case for admin_create_identity
Create an Identity # noqa: E501
"""
pass
def test_admin_create_self_service_recovery_link(self):
"""Test case for admin_create_self_service_recovery_link
Create a Recovery Link # noqa: E501
"""
pass
def test_admin_delete_identity(self):
"""Test case for admin_delete_identity
Delete an Identity # noqa: E501
"""
pass
def test_admin_get_identity(self):
"""Test case for admin_get_identity
Get an Identity # noqa: E501
"""
pass
def test_admin_list_identities(self):
"""Test case for admin_list_identities
List Identities # noqa: E501
"""
pass
def test_admin_update_identity(self):
"""Test case for admin_update_identity
Update an Identity # noqa: E501
"""
pass
def test_create_self_service_logout_flow_url_for_browsers(self):
"""Test case for create_self_service_logout_flow_url_for_browsers
Create a Logout URL for Browsers # noqa: E501
"""
pass
def test_get_json_schema(self):
"""Test case for get_json_schema
"""
pass
def test_get_self_service_error(self):
"""Test case for get_self_service_error
Get Self-Service Errors # noqa: E501
"""
pass
def test_get_self_service_login_flow(self):
"""Test case for get_self_service_login_flow
Get Login Flow # noqa: E501
"""
pass
def test_get_self_service_recovery_flow(self):
"""Test case for get_self_service_recovery_flow
Get Recovery Flow # noqa: E501
"""
pass
def test_get_self_service_registration_flow(self):
"""Test case for get_self_service_registration_flow
Get Registration Flow # noqa: E501
"""
pass
def test_get_self_service_settings_flow(self):
"""Test case for get_self_service_settings_flow
Get Settings Flow # noqa: E501
"""
pass
def test_get_self_service_verification_flow(self):
"""Test case for get_self_service_verification_flow
Get Verification Flow # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_for_browsers(self):
"""Test case for initialize_self_service_login_flow_for_browsers
Initialize Login Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_login_flow_without_browser(self):
"""Test case for initialize_self_service_login_flow_without_browser
Initialize Login Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_for_browsers(self):
"""Test case for initialize_self_service_recovery_flow_for_browsers
Initialize Recovery Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_recovery_flow_without_browser(self):
"""Test case for initialize_self_service_recovery_flow_without_browser
Initialize Recovery Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_for_browsers(self):
"""Test case for initialize_self_service_registration_flow_for_browsers
Initialize Registration Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_registration_flow_without_browser(self):
"""Test case for initialize_self_service_registration_flow_without_browser
Initialize Registration Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_for_browsers(self):
"""Test case for initialize_self_service_settings_flow_for_browsers
Initialize Settings Flow for Browsers # noqa: E501
"""
pass
def test_initialize_self_service_settings_flow_without_browser(self):
"""Test case for initialize_self_service_settings_flow_without_browser
Initialize Settings Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_for_browsers(self):
"""Test case for initialize_self_service_verification_flow_for_browsers
Initialize Verification Flow for Browser Clients # noqa: E501
"""
pass
def test_initialize_self_service_verification_flow_without_browser(self):
"""Test case for initialize_self_service_verification_flow_without_browser
Initialize Verification Flow for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_login_flow(self):
"""Test case for submit_self_service_login_flow
Submit a Login Flow # noqa: E501
"""
pass
def test_submit_self_service_logout_flow(self):
"""Test case for submit_self_service_logout_flow
Complete Self-Service Logout # noqa: E501
"""
pass
def test_submit_self_service_logout_flow_without_browser(self):
"""Test case for submit_self_service_logout_flow_without_browser
Perform Logout for APIs, Services, Apps, ... # noqa: E501
"""
pass
def test_submit_self_service_recovery_flow(self):
"""Test case for submit_self_service_recovery_flow
Complete Recovery Flow # noqa: E501
"""
pass
def test_submit_self_service_registration_flow(self):
"""Test case for submit_self_service_registration_flow
Submit a Registration Flow # noqa: E501
"""
pass
def test_submit_self_service_settings_flow(self):
"""Test case for submit_self_service_settings_flow
Complete Settings Flow # noqa: E501
"""
pass
def test_submit_self_service_verification_flow(self):
"""Test case for submit_self_service_verification_flow
Complete Verification Flow # noqa: E501
"""
pass
def test_to_session(self):
"""Test case for to_session
Check Who the Current HTTP Session Belongs To # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 28.944664
| 446
| 0.677455
| 894
| 7,323
| 5.190157
| 0.138702
| 0.123276
| 0.075862
| 0.103448
| 0.721121
| 0.680172
| 0.618319
| 0.541164
| 0.400647
| 0.295905
| 0
| 0.021438
| 0.261095
| 7,323
| 252
| 447
| 29.059524
| 0.836075
| 0.509491
| 0
| 0.445946
| 0
| 0
| 0.002786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.459459
| false
| 0.445946
| 0.040541
| 0
| 0.513514
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
7c29df3316dce7638b4588f6021b4bc59ffb4cfc
| 151
|
py
|
Python
|
base3_plus.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | 2
|
2019-01-10T03:44:03.000Z
|
2019-05-24T08:50:14.000Z
|
base3_plus.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
base3_plus.py
|
Mhaiyang/iccv
|
04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb
|
[
"MIT"
] | null | null | null |
"""
@Time : 201/21/19 10:47
@Author : TaylorMei
@Email : [email protected]
@Project : iccv
@File : base3_plus.py
@Function:
"""
| 15.1
| 34
| 0.596026
| 19
| 151
| 4.684211
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 0.245033
| 151
| 10
| 35
| 15.1
| 0.596491
| 0.887417
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
7c2c2ee21f857be97b79a37957d75b5c80b83234
| 421
|
py
|
Python
|
docker/setup.py
|
sreynit02/RunestoneServer
|
2d72fd1c26264a8d7d88e2bccfe9bfbb4d8b9a98
|
[
"MIT"
] | null | null | null |
docker/setup.py
|
sreynit02/RunestoneServer
|
2d72fd1c26264a8d7d88e2bccfe9bfbb4d8b9a98
|
[
"MIT"
] | null | null | null |
docker/setup.py
|
sreynit02/RunestoneServer
|
2d72fd1c26264a8d7d88e2bccfe9bfbb4d8b9a98
|
[
"MIT"
] | null | null | null |
# ******************************************************************
# |docname| - Provide `docker_tools.py` as the script `docker-tools`
# ******************************************************************
from setuptools import setup
setup(
name="runestone-docker-tools",
version="0.1",
install_requires=["click"],
entry_points={
"console_scripts": ["docker-tools = docker_tools:cli"]
},
)
| 30.071429
| 68
| 0.444181
| 34
| 421
| 5.352941
| 0.735294
| 0.302198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005495
| 0.135392
| 421
| 13
| 69
| 32.384615
| 0.494505
| 0.475059
| 0
| 0
| 0
| 0
| 0.35023
| 0.101382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
7c30b20fb26e70f99e3a1516c799910198cc11b1
| 17,421
|
py
|
Python
|
mango/__init__.py
|
kronael/mango-explorer
|
6292c089c2a3d1ff2cf0b50b815849451a50ec39
|
[
"MIT"
] | null | null | null |
mango/__init__.py
|
kronael/mango-explorer
|
6292c089c2a3d1ff2cf0b50b815849451a50ec39
|
[
"MIT"
] | null | null | null |
mango/__init__.py
|
kronael/mango-explorer
|
6292c089c2a3d1ff2cf0b50b815849451a50ec39
|
[
"MIT"
] | null | null | null |
# In --strict mode, mypy complains about imports unless they're done this way.
#
# It complains 'Module has no attribute ABC' or 'Module "mango" does not explicitly export
# attribute "XYZ"; implicit reexport disabled'. We could dial that back by using the
# --implicit-reexport parameter, but let's keep things strict.
#
# Each import then *must* be of the form `from .file import X as X`. (Until/unless there's
# a better way.)
#
from .account import Account as Account
from .account import AccountSlot as AccountSlot
from .accountflags import AccountFlags as AccountFlags
from .accountinfo import AccountInfo as AccountInfo
from .accountinfoconverter import build_account_info_converter as build_account_info_converter
from .accountinstrumentvalues import AccountInstrumentValues as AccountInstrumentValues
from .accountinstrumentvalues import PricedAccountInstrumentValues as PricedAccountInstrumentValues
from .accountliquidator import AccountLiquidator as AccountLiquidator
from .accountliquidator import NullAccountLiquidator as NullAccountLiquidator
from .accountscout import AccountScout as AccountScout
from .accountscout import ScoutReport as ScoutReport
from .addressableaccount import AddressableAccount as AddressableAccount
from .arguments import parse_args as parse_args
from .arguments import output as output
from .balancesheet import BalanceSheet as BalanceSheet
from .cache import Cache as Cache
from .cache import MarketCache as MarketCache
from .cache import PerpMarketCache as PerpMarketCache
from .cache import PriceCache as PriceCache
from .cache import RootBankCache as RootBankCache
from .client import BetterClient as BetterClient
from .client import BlockhashNotFoundException as BlockhashNotFoundException
from .client import ClientException as ClientException
from .client import CompoundException as CompoundException
from .client import CompoundRPCCaller as CompoundRPCCaller
from .client import FailedToFetchBlockhashException as FailedToFetchBlockhashException
from .client import NodeIsBehindException as NodeIsBehindException
from .client import RateLimitException as RateLimitException
from .client import RPCCaller as RPCCaller
from .client import SlotHolder as SlotHolder
from .client import TooManyRequestsRateLimitException as TooManyRequestsRateLimitException
from .client import TooMuchBandwidthRateLimitException as TooMuchBandwidthRateLimitException
from .client import TransactionException as TransactionException
from .combinableinstructions import CombinableInstructions as CombinableInstructions
from .constants import MangoConstants as MangoConstants
from .constants import DATA_PATH as DATA_PATH
from .constants import SOL_DECIMAL_DIVISOR as SOL_DECIMAL_DIVISOR
from .constants import SOL_DECIMALS as SOL_DECIMALS
from .constants import SOL_MINT_ADDRESS as SOL_MINT_ADDRESS
from .constants import SYSTEM_PROGRAM_ADDRESS as SYSTEM_PROGRAM_ADDRESS
from .constants import WARNING_DISCLAIMER_TEXT as WARNING_DISCLAIMER_TEXT
from .constants import version as version
from .context import Context as Context
from .contextbuilder import ContextBuilder as ContextBuilder
from .createmarketoperations import create_market_instruction_builder as create_market_instruction_builder
from .createmarketoperations import create_market_operations as create_market_operations
from .encoding import decode_binary as decode_binary
from .encoding import encode_binary as encode_binary
from .encoding import encode_key as encode_key
from .encoding import encode_int as encode_int
from .ensuremarketloaded import ensure_market_loaded as ensure_market_loaded
from .ensuremarketloaded import load_market_by_symbol as load_market_by_symbol
from .group import Group as Group
from .group import GroupSlot as GroupSlot
from .group import GroupSlotPerpMarket as GroupSlotPerpMarket
from .group import GroupSlotSpotMarket as GroupSlotSpotMarket
from .healthcheck import HealthCheck as HealthCheck
from .idl import IdlParser as IdlParser
from .idl import lazy_load_cached_idl_parser as lazy_load_cached_idl_parser
from .idsjsonmarketlookup import IdsJsonMarketLookup as IdsJsonMarketLookup
from .inventory import Inventory as Inventory
from .inventory import PerpInventoryAccountWatcher as PerpInventoryAccountWatcher
from .inventory import SpotInventoryAccountWatcher as SpotInventoryAccountWatcher
from .instructions import build_cancel_perp_order_instructions as build_cancel_perp_order_instructions
from .instructions import build_cancel_spot_order_instructions as build_cancel_spot_order_instructions
from .instructions import build_close_spl_account_instructions as build_close_spl_account_instructions
from .instructions import build_create_account_instructions as build_create_account_instructions
from .instructions import build_create_associated_spl_account_instructions as build_create_associated_spl_account_instructions
from .instructions import build_create_solana_account_instructions as build_create_solana_account_instructions
from .instructions import build_create_spl_account_instructions as build_create_spl_account_instructions
from .instructions import build_create_serum_open_orders_instructions as build_create_serum_open_orders_instructions
from .instructions import build_deposit_instructions as build_deposit_instructions
from .instructions import build_faucet_airdrop_instructions as build_faucet_airdrop_instructions
from .instructions import build_mango_consume_events_instructions as build_mango_consume_events_instructions
from .instructions import build_place_perp_order_instructions as build_place_perp_order_instructions
from .instructions import build_redeem_accrued_mango_instructions as build_redeem_accrued_mango_instructions
from .instructions import build_serum_consume_events_instructions as build_serum_consume_events_instructions
from .instructions import build_serum_place_order_instructions as build_serum_place_order_instructions
from .instructions import build_serum_settle_instructions as build_serum_settle_instructions
from .instructions import build_spot_place_order_instructions as build_spot_place_order_instructions
from .instructions import build_transfer_spl_tokens_instructions as build_transfer_spl_tokens_instructions
from .instructions import build_withdraw_instructions as build_withdraw_instructions
from .instructionreporter import InstructionReporter as InstructionReporter
from .instructionreporter import SerumInstructionReporter as SerumInstructionReporter
from .instructionreporter import MangoInstructionReporter as MangoInstructionReporter
from .instructionreporter import CompoundInstructionReporter as CompoundInstructionReporter
from .instructiontype import InstructionType as InstructionType
from .instrumentlookup import InstrumentLookup as InstrumentLookup
from .instrumentlookup import NullInstrumentLookup as NullInstrumentLookup
from .instrumentlookup import CompoundInstrumentLookup as CompoundInstrumentLookup
from .instrumentlookup import IdsJsonTokenLookup as IdsJsonTokenLookup
from .instrumentlookup import NonSPLInstrumentLookup as NonSPLInstrumentLookup
from .instrumentlookup import SPLTokenLookup as SPLTokenLookup
from .instrumentvalue import InstrumentValue as InstrumentValue
from .liquidatablereport import LiquidatableState as LiquidatableState
from .liquidatablereport import LiquidatableReport as LiquidatableReport
from .liquidationevent import LiquidationEvent as LiquidationEvent
from .liquidationprocessor import LiquidationProcessor as LiquidationProcessor
from .liquidationprocessor import LiquidationProcessorState as LiquidationProcessorState
from .loadedmarket import LoadedMarket as LoadedMarket
from .logmessages import expand_log_messages as expand_log_messages
from .lotsizeconverter import LotSizeConverter as LotSizeConverter
from .mangoinstruction import MangoInstruction as MangoInstruction
from .lotsizeconverter import NullLotSizeConverter as NullLotSizeConverter
from .market import DryRunMarket as DryRunMarket
from .market import InventorySource as InventorySource
from .market import Market as Market
from .marketlookup import CompoundMarketLookup as CompoundMarketLookup
from .marketlookup import MarketLookup as MarketLookup
from .marketlookup import NullMarketLookup as NullMarketLookup
from .marketoperations import MarketInstructionBuilder as MarketInstructionBuilder
from .marketoperations import MarketOperations as MarketOperations
from .marketoperations import NullMarketInstructionBuilder as NullMarketInstructionBuilder
from .marketoperations import NullMarketOperations as NullMarketOperations
from .metadata import Metadata as Metadata
from .modelstate import ModelState as ModelState
from .notification import CompoundNotificationTarget as CompoundNotificationTarget
from .notification import ConsoleNotificationTarget as ConsoleNotificationTarget
from .notification import CsvFileNotificationTarget as CsvFileNotificationTarget
from .notification import DiscordNotificationTarget as DiscordNotificationTarget
from .notification import FilteringNotificationTarget as FilteringNotificationTarget
from .notification import MailjetNotificationTarget as MailjetNotificationTarget
from .notification import NotificationHandler as NotificationHandler
from .notification import NotificationTarget as NotificationTarget
from .notification import TelegramNotificationTarget as TelegramNotificationTarget
from .notification import parse_notification_target as parse_notification_target
from .observables import CaptureFirstItem as CaptureFirstItem
from .observables import CollectingObserverSubscriber as CollectingObserverSubscriber
from .observables import DisposePropagator as DisposePropagator
from .observables import DisposeWrapper as DisposeWrapper
from .observables import EventSource as EventSource
from .observables import FunctionObserver as FunctionObserver
from .observables import LatestItemObserverSubscriber as LatestItemObserverSubscriber
from .observables import NullObserverSubscriber as NullObserverSubscriber
from .observables import PrintingObserverSubscriber as PrintingObserverSubscriber
from .observables import TimestampedPrintingObserverSubscriber as TimestampedPrintingObserverSubscriber
from .observables import create_backpressure_skipping_observer as create_backpressure_skipping_observer
from .observables import debug_print_item as debug_print_item
from .observables import log_subscription_error as log_subscription_error
from .observables import observable_pipeline_error_reporter as observable_pipeline_error_reporter
from .openorders import OpenOrders as OpenOrders
from .oracle import Oracle as Oracle
from .oracle import OracleProvider as OracleProvider
from .oracle import OracleSource as OracleSource
from .oracle import Price as Price
from .oracle import SupportedOracleFeature as SupportedOracleFeature
from .orderbookside import OrderBookSideType as OrderBookSideType
from .orderbookside import PerpOrderBookSide as PerpOrderBookSide
from .orders import Order as Order
from .orders import OrderType as OrderType
from .orders import OrderBook as OrderBook
from .orders import Side as Side
from .ownedinstrumentvalue import OwnedInstrumentValue as OwnedInstrumentValue
from .oraclefactory import create_oracle_provider as create_oracle_provider
from .parse_account_info_to_orders import parse_account_info_to_orders as parse_account_info_to_orders
from .perpaccount import PerpAccount as PerpAccount
from .perpeventqueue import PerpEvent as PerpEvent
from .perpeventqueue import PerpEventQueue as PerpEventQueue
from .perpeventqueue import PerpFillEvent as PerpFillEvent
from .perpeventqueue import PerpOutEvent as PerpOutEvent
from .perpeventqueue import PerpUnknownEvent as PerpUnknownEvent
from .perpeventqueue import UnseenPerpEventChangesTracker as UnseenPerpEventChangesTracker
from .perpmarket import PerpMarket as PerpMarket
from .perpmarket import PerpMarketStub as PerpMarketStub
from .perpmarketdetails import PerpMarketDetails as PerpMarketDetails
from .perpmarketoperations import PerpMarketInstructionBuilder as PerpMarketInstructionBuilder
from .perpmarketoperations import PerpMarketOperations as PerpMarketOperations
from .perpopenorders import PerpOpenOrders as PerpOpenOrders
from .placedorder import PlacedOrder as PlacedOrder
from .placedorder import PlacedOrdersContainer as PlacedOrdersContainer
from .publickey import encode_public_key_for_sorting as encode_public_key_for_sorting
from .reconnectingwebsocket import ReconnectingWebsocket as ReconnectingWebsocket
from .retrier import RetryWithPauses as RetryWithPauses
from .retrier import retry_context as retry_context
from .serumeventqueue import SerumEventQueue as SerumEventQueue
from .serumeventqueue import UnseenSerumEventChangesTracker as UnseenSerumEventChangesTracker
from .serummarket import SerumMarket as SerumMarket
from .serummarket import SerumMarketStub as SerumMarketStub
from .serummarketlookup import SerumMarketLookup as SerumMarketLookup
from .serummarketoperations import SerumMarketInstructionBuilder as SerumMarketInstructionBuilder
from .serummarketoperations import SerumMarketOperations as SerumMarketOperations
from .spotmarket import SpotMarket as SpotMarket
from .spotmarket import SpotMarketStub as SpotMarketStub
from .spotmarketoperations import SpotMarketInstructionBuilder as SpotMarketInstructionBuilder
from .spotmarketoperations import SpotMarketOperations as SpotMarketOperations
from .text import indent_collection_as_str as indent_collection_as_str
from .text import indent_item_by as indent_item_by
from .token import Instrument as Instrument
from .token import SolToken as SolToken
from .token import Token as Token
from .tokenaccount import TokenAccount as TokenAccount
from .tokenbank import BankBalances as BankBalances
from .tokenbank import InterestRates as InterestRates
from .tokenbank import NodeBank as NodeBank
from .tokenbank import RootBank as RootBank
from .tokenbank import TokenBank as TokenBank
from .tradeexecutor import ImmediateTradeExecutor as ImmediateTradeExecutor
from .tradeexecutor import NullTradeExecutor as NullTradeExecutor
from .tradeexecutor import TradeExecutor as TradeExecutor
from .tradehistory import TradeHistory as TradeHistory
from .transactionscout import TransactionScout as TransactionScout
from .transactionscout import fetch_all_recent_transaction_signatures as fetch_all_recent_transaction_signatures
from .transactionscout import mango_instruction_from_response as mango_instruction_from_response
from .valuation import AccountValuation as AccountValuation
from .valuation import TokenValuation as TokenValuation
from .valuation import Valuation as Valuation
from .version import Version as Version
from .wallet import Wallet as Wallet
from .walletbalancer import FilterSmallChanges as FilterSmallChanges
from .walletbalancer import FixedTargetBalance as FixedTargetBalance
from .walletbalancer import LiveAccountBalancer as LiveAccountBalancer
from .walletbalancer import LiveWalletBalancer as LiveWalletBalancer
from .walletbalancer import NullWalletBalancer as NullWalletBalancer
from .walletbalancer import PercentageTargetBalance as PercentageTargetBalance
from .walletbalancer import TargetBalance as TargetBalance
from .walletbalancer import WalletBalancer as WalletBalancer
from .walletbalancer import calculate_required_balance_changes as calculate_required_balance_changes
from .walletbalancer import parse_fixed_target_balance as parse_fixed_target_balance
from .walletbalancer import parse_target_balance as parse_target_balance
from .walletbalancer import sort_changes_for_trades as sort_changes_for_trades
from .watcher import LamdaUpdateWatcher as LamdaUpdateWatcher
from .watcher import ManualUpdateWatcher as ManualUpdateWatcher
from .watcher import Watcher as Watcher
from .watchers import build_group_watcher as build_group_watcher
from .watchers import build_account_watcher as build_account_watcher
from .watchers import build_cache_watcher as build_cache_watcher
from .watchers import build_spot_open_orders_watcher as build_spot_open_orders_watcher
from .watchers import build_serum_open_orders_watcher as build_serum_open_orders_watcher
from .watchers import build_perp_open_orders_watcher as build_perp_open_orders_watcher
from .watchers import build_price_watcher as build_price_watcher
from .watchers import build_serum_inventory_watcher as build_serum_inventory_watcher
from .watchers import build_orderbook_watcher as build_orderbook_watcher
from .websocketsubscription import IndividualWebSocketSubscriptionManager as IndividualWebSocketSubscriptionManager
from .websocketsubscription import SharedWebSocketSubscriptionManager as SharedWebSocketSubscriptionManager
from .websocketsubscription import WebSocketAccountSubscription as WebSocketAccountSubscription
from .websocketsubscription import WebSocketLogSubscription as WebSocketLogSubscription
from .websocketsubscription import WebSocketProgramSubscription as WebSocketProgramSubscription
from .websocketsubscription import WebSocketSubscription as WebSocketSubscription
from .websocketsubscription import WebSocketSubscriptionManager as WebSocketSubscriptionManager
from .layouts import layouts
import decimal
# Increased precision from 18 to 36 because for a decimal like:
# val = Decimal("17436036573.2030800")
#
# The following rounding operations would both throw decimal.InvalidOperation:
# val.quantize(Decimal('.000000001'))
# round(val, 9)
decimal.getcontext().prec = 36
| 66.747126
| 126
| 0.89306
| 1,901
| 17,421
| 7.996844
| 0.192004
| 0.020984
| 0.027496
| 0.033746
| 0.160242
| 0.064465
| 0.045191
| 0.010854
| 0
| 0
| 0
| 0.002142
| 0.08903
| 17,421
| 260
| 127
| 67.003846
| 0.955766
| 0.03668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.995868
| 0
| 0.995868
| 0.004132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
7c31ff3f832fdd4d6ba0dc485287be931476d8a3
| 1,017
|
py
|
Python
|
BB/bbObjects/items/bbTurret.py
|
mwaitzman/GOF2BountyBot
|
b66026228b752b07ac4734ca74b60730dbd74995
|
[
"MIT"
] | null | null | null |
BB/bbObjects/items/bbTurret.py
|
mwaitzman/GOF2BountyBot
|
b66026228b752b07ac4734ca74b60730dbd74995
|
[
"MIT"
] | null | null | null |
BB/bbObjects/items/bbTurret.py
|
mwaitzman/GOF2BountyBot
|
b66026228b752b07ac4734ca74b60730dbd74995
|
[
"MIT"
] | null | null | null |
from .bbItem import bbItem
from ...bbConfig import bbData
class bbTurret(bbItem):
dps = 0.0
def __init__(self, name, aliases, dps=0.0, value=0, wiki="", manufacturer="", icon="", emoji=""):
super(bbTurret, self).__init__(name, aliases, value=value, wiki=wiki, manufacturer=manufacturer, icon=icon, emoji=emoji)
self.dps = dps
def statsStringShort(self):
return "*Dps: " + str(self.dps) + "*"
def getType(self):
return bbTurret
def fromDict(turretDict):
if turretDict["builtIn"]:
return bbData.builtInTurretObjs[turretDict["name"]]
else:
return bbTurret(turretDict["name"], turretDict["aliases"], dps=turretDict["dps"], value=turretDict["value"],
wiki=turretDict["wiki"] if "wiki" in turretDict else "", manufacturer=turretDict["manufacturer"] if "manufacturer" in turretDict else "",
icon=turretDict["icon"] if "icon" in turretDict else bbData.rocketIcon, emoji=turretDict["emoji"] if "emoji" in turretDict else "")
| 36.321429
| 145
| 0.66765
| 118
| 1,017
| 5.686441
| 0.271186
| 0.071535
| 0.09538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006031
| 0.184857
| 1,017
| 27
| 146
| 37.666667
| 0.803378
| 0
| 0
| 0
| 0
| 0
| 0.085546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.111111
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
7c42dc9f24c848eb5660235f34da5faf02dd1e33
| 2,192
|
py
|
Python
|
signin/tests.py
|
pptnz/swa_team2
|
253ae83d73c00245d359574d6a16f4eba9830950
|
[
"MIT"
] | null | null | null |
signin/tests.py
|
pptnz/swa_team2
|
253ae83d73c00245d359574d6a16f4eba9830950
|
[
"MIT"
] | 3
|
2018-06-07T17:18:16.000Z
|
2021-06-10T20:19:27.000Z
|
signin/tests.py
|
pptnz/swa_team2
|
253ae83d73c00245d359574d6a16f4eba9830950
|
[
"MIT"
] | 1
|
2018-06-25T23:52:57.000Z
|
2018-06-25T23:52:57.000Z
|
import json
from django.test import TestCase
from django.contrib.auth.models import User
from .models import CustomUser
from django.apps import apps
from .apps import SigninConfig
class SignInTest(TestCase):
def setUp(self):
self.django_user = User.objects.create_user(username='testusername', password='testpassword')
self.custom_user = CustomUser.objects.create(django_user=self.django_user)
def test_apps(self):
self.assertEqual(SigninConfig.name, 'signin')
self.assertEqual(apps.get_app_config('signin').name, 'signin')
def test_sign_in_redirect_page(self):
response = self.client.get('/')
self.assertRedirects(response, '/sign_in/')
def test_get(self):
response = self.client.get('/sign_in/')
self.assertEqual(response.status_code, 200)
def test_wrong_username(self):
response = self.client.post('/sign_in/', {'username': 'wrongusername', 'password': 'testpassword'})
self.assertEqual(response.status_code, 200)
def test_wrong_password(self):
response = self.client.post('/sign_in/', {'username': 'testusername', 'password': 'wrongpassword'})
self.assertEqual(response.status_code, 200)
def test_login(self):
response = self.client.post('/sign_in/', {'username': 'testusername', 'password': 'testpassword'})
self.assertRedirects(response, '/habitmaker/')
# todo: change this link
def test_login_other_page(self):
response = self.client.post('/sign_in/?next=/habitmaker/', {'username': 'testusername', 'password': 'testpassword'})
self.assertRedirects(response, '/habitmaker/')
def test_form_not_valid(self):
response = self.client.post('/sign_in/', {'username': 'testusername'})
self.assertEqual(response.status_code, 200)
def test_email_verification(self):
self.custom_user.authenticate_email()
self.assertTrue(self.custom_user.is_email_authenticated)
def test_already_signed_in(self):
self.client.login(username='testusername', password='testpassword')
response = self.client.get('/sign_in/')
self.assertRedirects(response, '/habitmaker/')
| 39.142857
| 124
| 0.696624
| 252
| 2,192
| 5.876984
| 0.253968
| 0.047265
| 0.097232
| 0.103984
| 0.466577
| 0.411884
| 0.411884
| 0.351114
| 0.180959
| 0.081026
| 0
| 0.006583
| 0.168339
| 2,192
| 55
| 125
| 39.854545
| 0.805815
| 0.010037
| 0
| 0.219512
| 0
| 0
| 0.17297
| 0.012454
| 0
| 0
| 0
| 0.018182
| 0.268293
| 1
| 0.268293
| false
| 0.170732
| 0.146341
| 0
| 0.439024
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
7c44dd7ee7dcb34a8c6b443486c1190c2f8b538a
| 707
|
py
|
Python
|
tree/list/BinaryNode.py
|
EliHar/BinaryTree-ADT
|
bf220eb8ccb04f6fee7d7a67ef7e9cd00cc6a4c1
|
[
"MIT"
] | null | null | null |
tree/list/BinaryNode.py
|
EliHar/BinaryTree-ADT
|
bf220eb8ccb04f6fee7d7a67ef7e9cd00cc6a4c1
|
[
"MIT"
] | null | null | null |
tree/list/BinaryNode.py
|
EliHar/BinaryTree-ADT
|
bf220eb8ccb04f6fee7d7a67ef7e9cd00cc6a4c1
|
[
"MIT"
] | null | null | null |
__author__ = 'Elias Haroun'
class BinaryNode(object):
def __init__(self, data, left, right):
self.data = data
self.left = left
self.right = right
def getData(self):
return self.data
def getLeft(self):
return self.left
def getRight(self):
return self.right
def setData(self, data):
self.data = data
def setLeft(self, aNode):
self.left = aNode
def setRight(self, aNode):
self.right = aNode
def hasLeft(self):
return self.getLeft() is not None
def hasRight(self):
return self.getRight() is not None
def isLeaf(self):
return not(self.hasLeft() | self.hasRight())
| 19.108108
| 52
| 0.589816
| 87
| 707
| 4.701149
| 0.298851
| 0.146699
| 0.171149
| 0.05868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308345
| 707
| 36
| 53
| 19.638889
| 0.836401
| 0
| 0
| 0.083333
| 0
| 0
| 0.016997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0.25
| 0.708333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
7c5bc0acf118170063960ef1b43392c65c34384f
| 1,421
|
py
|
Python
|
TM1py/Objects/ElementAttribute.py
|
damirishpreet/TM1py
|
8482d0787fd5a9e5eb05a0288c41b75fc1fc93ac
|
[
"MIT"
] | 19
|
2016-03-04T19:21:40.000Z
|
2021-12-10T02:39:51.000Z
|
TM1py/Objects/ElementAttribute.py
|
damirishpreet/TM1py
|
8482d0787fd5a9e5eb05a0288c41b75fc1fc93ac
|
[
"MIT"
] | 11
|
2016-08-24T19:27:11.000Z
|
2017-07-30T01:10:28.000Z
|
TM1py/Objects/ElementAttribute.py
|
damirishpreet/TM1py
|
8482d0787fd5a9e5eb05a0288c41b75fc1fc93ac
|
[
"MIT"
] | 6
|
2016-08-03T19:28:45.000Z
|
2017-01-30T12:25:05.000Z
|
# -*- coding: utf-8 -*-
import json
from TM1py.Objects.TM1Object import TM1Object
class ElementAttribute(TM1Object):
""" Abstraction of TM1 Element Attributes
"""
valid_types = ['NUMERIC', 'STRING', 'ALIAS']
def __init__(self, name, attribute_type):
self.name = name
self.attribute_type = attribute_type
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def attribute_type(self):
return self._attribute_type
@attribute_type.setter
def attribute_type(self, value):
if value.upper() in ElementAttribute.valid_types:
self._attribute_type = value
else:
raise Exception('{} not a valid Attribute Type.'.format(value))
@property
def body_as_dict(self):
return {"Name": self._name, "Type": self._attribute_type}
@property
def body(self):
return json.dumps(self.body_as_dict, ensure_ascii=False)
@classmethod
def from_json(cls, element_attribute_as_json):
return cls.from_dict(json.loads(element_attribute_as_json))
@classmethod
def from_dict(cls, element_attribute_as_dict):
return cls(name=element_attribute_as_dict['Name'],
attribute_type=element_attribute_as_dict['Type'])
def __eq__(self, other):
return self.name == other
| 25.375
| 75
| 0.655172
| 171
| 1,421
| 5.157895
| 0.304094
| 0.162132
| 0.102041
| 0.07483
| 0.068027
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005587
| 0.244194
| 1,421
| 55
| 76
| 25.836364
| 0.815642
| 0.049261
| 0
| 0.162162
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.27027
| false
| 0
| 0.054054
| 0.189189
| 0.567568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
7c7069a54d49756f83e36923521eba70ab74f6c7
| 139
|
py
|
Python
|
demo/demo/accounts/urls.py
|
caravancoop/rest-auth-toolkit
|
425bf293987f7128d9538f27a5eca7e47ba84217
|
[
"MIT"
] | 1
|
2019-12-23T21:51:06.000Z
|
2019-12-23T21:51:06.000Z
|
demo/demo/accounts/urls.py
|
caravancoop/rest-framework-auth-toolkit
|
425bf293987f7128d9538f27a5eca7e47ba84217
|
[
"MIT"
] | 127
|
2017-10-27T15:20:01.000Z
|
2022-03-07T04:09:15.000Z
|
demo/demo/accounts/urls.py
|
caravancoop/rest-auth-toolkit
|
425bf293987f7128d9538f27a5eca7e47ba84217
|
[
"MIT"
] | 2
|
2018-01-03T16:22:51.000Z
|
2019-12-23T21:51:54.000Z
|
from django.urls import path
from .views import ProfileView
urlpatterns = [
path('', ProfileView.as_view(), name='user-profile'),
]
| 15.444444
| 57
| 0.705036
| 17
| 139
| 5.705882
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158273
| 139
| 8
| 58
| 17.375
| 0.82906
| 0
| 0
| 0
| 0
| 0
| 0.086331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
7c872854a67dcbee173ef18681a5116e43865d52
| 53,677
|
py
|
Python
|
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2019-04-16T08:13:06.000Z
|
2019-04-16T08:13:06.000Z
|
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | null | null | null |
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
|
erikwebb/google-cloud-python
|
288a878e9a07239015c78a193eca1cc15e926127
|
[
"Apache-2.0"
] | 1
|
2020-11-15T11:44:36.000Z
|
2020-11-15T11:44:36.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.automl.v1beta1 AutoMl API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.automl_v1beta1.gapic import auto_ml_client_config
from google.cloud.automl_v1beta1.gapic import enums
from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport
from google.cloud.automl_v1beta1.proto import data_items_pb2
from google.cloud.automl_v1beta1.proto import dataset_pb2
from google.cloud.automl_v1beta1.proto import io_pb2
from google.cloud.automl_v1beta1.proto import model_evaluation_pb2
from google.cloud.automl_v1beta1.proto import model_pb2
from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc
from google.cloud.automl_v1beta1.proto import service_pb2
from google.cloud.automl_v1beta1.proto import service_pb2_grpc
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version
class AutoMlClient(object):
"""
AutoML Server API.
The resource names are assigned by the server. The server never reuses
names that it has created after the resources with those names are
deleted.
An ID of a resource is the last element of the item's resource name. For
``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``,
then the id for the item is ``{dataset_id}``.
"""
SERVICE_ADDRESS = "automl.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.automl.v1beta1.AutoMl"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AutoMlClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
)
@classmethod
def dataset_path(cls, project, location, dataset):
"""Return a fully-qualified dataset string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/datasets/{dataset}",
project=project,
location=location,
dataset=dataset,
)
@classmethod
def model_path(cls, project, location, model):
"""Return a fully-qualified model string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/models/{model}",
project=project,
location=location,
model=model,
)
@classmethod
def model_evaluation_path(cls, project, location, model, model_evaluation):
"""Return a fully-qualified model_evaluation string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}",
project=project,
location=location,
model=model,
model_evaluation=model_evaluation,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.AutoMlGrpcTransport,
Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = auto_ml_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=auto_ml_grpc_transport.AutoMlGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_dataset(
self,
parent,
dataset,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `dataset`:
>>> dataset = {}
>>>
>>> response = client.create_dataset(parent, dataset)
Args:
parent (str): The resource name of the project to create the dataset for.
dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Dataset`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"create_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_dataset,
default_retry=self._method_configs["CreateDataset"].retry,
default_timeout=self._method_configs["CreateDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset)
return self._inner_api_calls["create_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.get_dataset(name)
Args:
name (str): The resource name of the dataset to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"get_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_dataset,
default_retry=self._method_configs["GetDataset"].retry,
default_timeout=self._method_configs["GetDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetDatasetRequest(name=name)
return self._inner_api_calls["get_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_datasets(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists datasets in a project.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_datasets(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_datasets(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): The resource name of the project from which to list datasets.
filter_ (str): An expression for filtering the results of the request.
- ``dataset_metadata`` - for existence of the case.
An example of using the filter is:
- ``translation_dataset_metadata:*`` --> The dataset has
translation\_dataset\_metadata.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_datasets" not in self._inner_api_calls:
self._inner_api_calls[
"list_datasets"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_datasets,
default_retry=self._method_configs["ListDatasets"].retry,
default_timeout=self._method_configs["ListDatasets"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListDatasetsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_datasets"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="datasets",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a dataset and all of its contents. Returns empty response in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.delete_dataset(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): The resource name of the dataset to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"delete_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_dataset,
default_retry=self._method_configs["DeleteDataset"].retry,
default_timeout=self._method_configs["DeleteDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteDatasetRequest(name=name)
operation = self._inner_api_calls["delete_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def import_data(
self,
name,
input_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Imports data into a dataset. Returns an empty response in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `input_config`:
>>> input_config = {}
>>>
>>> response = client.import_data(name, input_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Dataset name. Dataset must already exist. All imported
annotations and examples will be added.
input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.InputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "import_data" not in self._inner_api_calls:
self._inner_api_calls[
"import_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_data,
default_retry=self._method_configs["ImportData"].retry,
default_timeout=self._method_configs["ImportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ImportDataRequest(name=name, input_config=input_config)
operation = self._inner_api_calls["import_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def export_data(
self,
name,
output_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Exports dataset's data to a Google Cloud Storage bucket. Returns an
empty response in the ``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.export_data(name, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The resource name of the dataset.
output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.OutputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "export_data" not in self._inner_api_calls:
self._inner_api_calls[
"export_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_data,
default_retry=self._method_configs["ExportData"].retry,
default_timeout=self._method_configs["ExportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ExportDataRequest(name=name, output_config=output_config)
operation = self._inner_api_calls["export_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def create_model(
self,
parent,
model,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a model. Returns a Model in the ``response`` field when it
completes. When you create a model, several model evaluations are
created for it: a global evaluation, and one evaluation for each
annotation spec.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `model`:
>>> model = {}
>>>
>>> response = client.create_model(parent, model)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Resource name of the parent project where the model is being created.
model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Model`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_model" not in self._inner_api_calls:
self._inner_api_calls[
"create_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_model,
default_retry=self._method_configs["CreateModel"].retry,
default_timeout=self._method_configs["CreateModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateModelRequest(parent=parent, model=model)
operation = self._inner_api_calls["create_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
model_pb2.Model,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def get_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.get_model(name)
Args:
name (str): Resource name of the model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Model` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model" not in self._inner_api_calls:
self._inner_api_calls[
"get_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model,
default_retry=self._method_configs["GetModel"].retry,
default_timeout=self._method_configs["GetModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelRequest(name=name)
return self._inner_api_calls["get_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_models(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists models.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_models(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_models(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the project, from which to list the models.
filter_ (str): An expression for filtering the results of the request.
- ``model_metadata`` - for existence of the case.
- ``dataset_id`` - for = or !=.
Some examples of using the filter are:
- ``image_classification_model_metadata:*`` --> The model has
image\_classification\_model\_metadata.
- ``dataset_id=5`` --> The model was created from a sibling dataset
with ID 5.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_models" not in self._inner_api_calls:
self._inner_api_calls[
"list_models"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_models,
default_retry=self._method_configs["ListModels"].retry,
default_timeout=self._method_configs["ListModels"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_models"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a model. If a model is already deployed, this only deletes the
model in AutoML BE, and does not change the status of the deployed model
in the production environment. Returns ``google.protobuf.Empty`` in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.delete_model(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Resource name of the model being deleted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_model" not in self._inner_api_calls:
self._inner_api_calls[
"delete_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_model,
default_retry=self._method_configs["DeleteModel"].retry,
default_timeout=self._method_configs["DeleteModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteModelRequest(name=name)
operation = self._inner_api_calls["delete_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def deploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deploys model. Returns a ``DeployModelResponse`` in the ``response``
field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.deploy_model(name)
Args:
name (str): Resource name of the model to deploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "deploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"deploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.deploy_model,
default_retry=self._method_configs["DeployModel"].retry,
default_timeout=self._method_configs["DeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeployModelRequest(name=name)
return self._inner_api_calls["deploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def undeploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Undeploys model. Returns an ``UndeployModelResponse`` in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.undeploy_model(name)
Args:
name (str): Resource name of the model to undeploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "undeploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"undeploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.undeploy_model,
default_retry=self._method_configs["UndeployModel"].retry,
default_timeout=self._method_configs["UndeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.UndeployModelRequest(name=name)
return self._inner_api_calls["undeploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_model_evaluation(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model evaluation.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]')
>>>
>>> response = client.get_model_evaluation(name)
Args:
name (str): Resource name for the model evaluation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model_evaluation" not in self._inner_api_calls:
self._inner_api_calls[
"get_model_evaluation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model_evaluation,
default_retry=self._method_configs["GetModelEvaluation"].retry,
default_timeout=self._method_configs["GetModelEvaluation"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelEvaluationRequest(name=name)
return self._inner_api_calls["get_model_evaluation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_model_evaluations(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists model evaluations.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> # Iterate over all results
>>> for element in client.list_model_evaluations(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_model_evaluations(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the model to list the model evaluations for.
If modelId is set as "-", this will list model evaluations from across all
models of the parent location.
filter_ (str): An expression for filtering the results of the request.
- ``annotation_spec_id`` - for =, != or existence. See example below
for the last.
Some examples of using the filter are:
- ``annotation_spec_id!=4`` --> The model evaluation was done for
annotation spec with ID different than 4.
- ``NOT annotation_spec_id:*`` --> The model evaluation was done for
aggregate of all annotation specs.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_model_evaluations" not in self._inner_api_calls:
self._inner_api_calls[
"list_model_evaluations"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_model_evaluations,
default_retry=self._method_configs["ListModelEvaluations"].retry,
default_timeout=self._method_configs["ListModelEvaluations"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelEvaluationsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_model_evaluations"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model_evaluation",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| 42.198899
| 128
| 0.596512
| 5,778
| 53,677
| 5.388543
| 0.079439
| 0.03093
| 0.044676
| 0.02775
| 0.772571
| 0.754039
| 0.702425
| 0.681516
| 0.67005
| 0.623671
| 0
| 0.006476
| 0.321031
| 53,677
| 1,271
| 129
| 42.232101
| 0.847825
| 0.53535
| 0
| 0.488518
| 0
| 0
| 0.076699
| 0.018372
| 0
| 0
| 0
| 0.003147
| 0
| 1
| 0.041754
| false
| 0
| 0.075157
| 0
| 0.164927
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
7cde0e155e222f52e34bae521e25a21b28caf52a
| 550
|
py
|
Python
|
Code/extract_method3.py
|
AbdullahNoori/CS-2.1-Trees-Sorting
|
59ba182d60abe6171a3d7d64981f79ee192de3bb
|
[
"MIT"
] | null | null | null |
Code/extract_method3.py
|
AbdullahNoori/CS-2.1-Trees-Sorting
|
59ba182d60abe6171a3d7d64981f79ee192de3bb
|
[
"MIT"
] | null | null | null |
Code/extract_method3.py
|
AbdullahNoori/CS-2.1-Trees-Sorting
|
59ba182d60abe6171a3d7d64981f79ee192de3bb
|
[
"MIT"
] | null | null | null |
# Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
import math
def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84):
# Calculate the distance between the two circle
return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2)
print('distance', get_distance())
# *** somewhere else in your program ***
def get_length(xa=-50, ya=99, xb=.67, yb=.26):
# calcualte the length of vector AB vector which is a vector between A and B points.
return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb))
print('length', get_length())
| 26.190476
| 88
| 0.670909
| 92
| 550
| 3.967391
| 0.630435
| 0.032877
| 0.076712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059341
| 0.172727
| 550
| 20
| 89
| 27.5
| 0.742857
| 0.432727
| 0
| 0
| 0
| 0
| 0.045752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.285714
| 0.714286
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
7ce3f2fa9ab64a2e056aae0886c6829b2b5285e6
| 7,722
|
py
|
Python
|
src/quart/local.py
|
Dunkledore/quart
|
803c8678b083895f4ece35fccb6aca56e189ee0a
|
[
"MIT"
] | 3
|
2020-03-31T10:36:31.000Z
|
2020-04-23T12:01:10.000Z
|
venv/lib/python3.9/site-packages/quart/local.py
|
ryanwwest/kademlia
|
e1e5b84db0a7710cf372663325041850802d55f1
|
[
"MIT"
] | 6
|
2020-09-05T01:40:23.000Z
|
2022-03-12T00:40:58.000Z
|
src/quart/local.py
|
ccns1/ccns11
|
d6edfac34fbee06fe974cda007d24a088d31ad30
|
[
"MIT"
] | 1
|
2020-09-05T00:19:03.000Z
|
2020-09-05T00:19:03.000Z
|
from __future__ import annotations
import asyncio
import copy
from contextvars import ContextVar # noqa # contextvars not understood as stdlib
from typing import Any # noqa # contextvars not understood as stdlib
from typing import Callable, Dict, Optional
class TaskLocal:
"""An object local to the current task."""
__slots__ = ("_storage",)
def __init__(self) -> None:
# Note as __setattr__ is overidden below, use the object __setattr__
object.__setattr__(self, "_storage", ContextVar("storage"))
def __getattr__(self, name: str) -> Any:
values = self._storage.get({})
try:
return values[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
values = self._storage.get({})
values[name] = value
self._storage.set(values)
def __delattr__(self, name: str) -> None:
values = self._storage.get({})
try:
del values[name]
self._storage.set(values)
except KeyError:
raise AttributeError(name)
@staticmethod
def _task_identity() -> int:
loop = asyncio.get_event_loop()
if loop.is_running():
task = asyncio.current_task()
task_id = id(task)
return task_id
else:
return 0
class LocalStack:
def __init__(self) -> None:
self._task_local = TaskLocal()
def push(self, value: Any) -> None:
stack = getattr(self._task_local, "stack", None)
if stack is None:
self._task_local.stack = stack = []
stack.append(value)
def pop(self) -> Any:
stack = getattr(self._task_local, "stack", None)
if stack is None or stack == []:
return None
else:
return stack.pop()
@property
def top(self) -> Any:
try:
return self._task_local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalProxy:
"""Proxy to a task local object."""
__slots__ = ("__dict__", "__local", "__wrapped__")
def __init__(self, local: Callable, name: Optional[str] = None) -> None:
# Note as __setattr__ is overidden below, use the object __setattr__
object.__setattr__(self, "__LocalProxy_local", local)
object.__setattr__(self, "__wrapped__", local)
object.__setattr__(self, "__name__", name)
def _get_current_object(self) -> Any:
return object.__getattribute__(self, "__LocalProxy_local")()
@property
def __dict__(self) -> Dict[str, Any]: # type: ignore
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError("__dict__")
def __repr__(self) -> str:
try:
obj = self._get_current_object()
except RuntimeError:
return "<%s unbound>" % self.__class__.__name__
return repr(obj)
def __bool__(self) -> bool:
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __dir__(self) -> Any:
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name: Any) -> Any:
if name == "__members__":
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key: Any, value: Any) -> Any:
self._get_current_object()[key] = value
def __delitem__(self, key: Any) -> Any:
del self._get_current_object()[key]
async def __aiter__(self) -> Any:
async for x in self._get_current_object():
yield x
__setattr__ = lambda x, n, v: setattr( # noqa: E731, E501
x._get_current_object(), n, v # type: ignore
)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n) # type: ignore # noqa: E731
__str__ = lambda x: str(x._get_current_object()) # type: ignore # noqa: E731
__lt__ = lambda x, o: x._get_current_object() < o # noqa: E731
__le__ = lambda x, o: x._get_current_object() <= o # noqa: E731
__eq__ = lambda x, o: x._get_current_object() == o # type: ignore # noqa: E731
__ne__ = lambda x, o: x._get_current_object() != o # type: ignore # noqa: E731
__gt__ = lambda x, o: x._get_current_object() > o # noqa: E731
__ge__ = lambda x, o: x._get_current_object() >= o # noqa: E731
__hash__ = lambda x: hash(x._get_current_object()) # type: ignore # noqa: E731
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731
__len__ = lambda x: len(x._get_current_object()) # noqa: E731
__getitem__ = lambda x, i: x._get_current_object()[i] # noqa: E731
__iter__ = lambda x: iter(x._get_current_object()) # noqa: E731
__contains__ = lambda x, i: i in x._get_current_object() # noqa: E731
__add__ = lambda x, o: x._get_current_object() + o # noqa: E731
__sub__ = lambda x, o: x._get_current_object() - o # noqa: E731
__mul__ = lambda x, o: x._get_current_object() * o # noqa: E731
__floordiv__ = lambda x, o: x._get_current_object() // o # noqa: E731
__mod__ = lambda x, o: x._get_current_object() % o # noqa: E731
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731
__pow__ = lambda x, o: x._get_current_object() ** o # noqa: E731
__lshift__ = lambda x, o: x._get_current_object() << o # noqa: E731
__rshift__ = lambda x, o: x._get_current_object() >> o # noqa: E731
__and__ = lambda x, o: x._get_current_object() & o # noqa: E731
__xor__ = lambda x, o: x._get_current_object() ^ o # noqa: E731
__or__ = lambda x, o: x._get_current_object() | o # noqa: E731
__div__ = lambda x, o: x._get_current_object().__div__(o) # noqa: E731
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731
__neg__ = lambda x: -(x._get_current_object()) # noqa: E731
__pos__ = lambda x: +(x._get_current_object()) # noqa: E731
__abs__ = lambda x: abs(x._get_current_object()) # noqa: E731
__invert__ = lambda x: ~(x._get_current_object()) # noqa: E731
__complex__ = lambda x: complex(x._get_current_object()) # noqa: E731
__int__ = lambda x: int(x._get_current_object()) # noqa: E731
__float__ = lambda x: float(x._get_current_object()) # noqa: E731
__oct__ = lambda x: oct(x._get_current_object()) # noqa: E731
__hex__ = lambda x: hex(x._get_current_object()) # noqa: E731
__index__ = lambda x: x._get_current_object().__index__() # noqa: E731
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731
__enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731
__radd__ = lambda x, o: o + x._get_current_object() # noqa: E731
__rsub__ = lambda x, o: o - x._get_current_object() # noqa: E731
__rmul__ = lambda x, o: o * x._get_current_object() # noqa: E731
__rdiv__ = lambda x, o: o / x._get_current_object() # noqa: E731
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object() # noqa: E731
__rmod__ = lambda x, o: o % x._get_current_object() # noqa: E731
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731
__copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731
__await__ = lambda x: x._get_current_object().__await__() # noqa: E731
| 41.740541
| 95
| 0.630536
| 1,009
| 7,722
| 4.22002
| 0.153617
| 0.145608
| 0.232973
| 0.207609
| 0.517849
| 0.463833
| 0.379286
| 0.318929
| 0.267262
| 0.242837
| 0
| 0.027759
| 0.248899
| 7,722
| 184
| 96
| 41.967391
| 0.706379
| 0.12445
| 0
| 0.210526
| 0
| 0
| 0.021687
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.039474
| 0.006579
| 0.651316
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
7cfc7ea83eddae1dd85543f912c4d06746387bfa
| 492
|
py
|
Python
|
30-39/35. final_class/final_class.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 1
|
2021-11-30T05:03:24.000Z
|
2021-11-30T05:03:24.000Z
|
30-39/35. final_class/final_class.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | null | null | null |
30-39/35. final_class/final_class.py
|
dcragusa/PythonMorsels
|
5f75b51a68769036e4004e9ccdada6b220124ab6
|
[
"MIT"
] | 2
|
2021-04-18T05:26:43.000Z
|
2021-11-28T18:46:43.000Z
|
class Unsubclassable:
def __init_subclass__(cls, **kwargs):
raise TypeError('Unacceptable base type')
def prevent_subclassing():
raise TypeError('Unacceptable base type')
def final_class(cls):
setattr(cls, '__init_subclass__', prevent_subclassing)
return cls
class UnsubclassableType(type):
def __new__(cls, name, bases, dct):
c = super().__new__(cls, name, bases, dct)
setattr(c, '__init_subclass__', prevent_subclassing)
return c
| 22.363636
| 60
| 0.693089
| 56
| 492
| 5.607143
| 0.428571
| 0.11465
| 0.165605
| 0.191083
| 0.579618
| 0.235669
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203252
| 492
| 21
| 61
| 23.428571
| 0.80102
| 0
| 0
| 0.153846
| 0
| 0
| 0.159184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
7cfd39821c7ad2ac471f6e189d3999d3560e833a
| 259
|
py
|
Python
|
users/views.py
|
AnvarKhan/django-python
|
bd54e44deb290f43ea5982c2ca9f37cd6c946879
|
[
"Apache-2.0"
] | 1
|
2022-02-05T15:07:25.000Z
|
2022-02-05T15:07:25.000Z
|
users/views.py
|
AnvarKhan/django-python
|
bd54e44deb290f43ea5982c2ca9f37cd6c946879
|
[
"Apache-2.0"
] | null | null | null |
users/views.py
|
AnvarKhan/django-python
|
bd54e44deb290f43ea5982c2ca9f37cd6c946879
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic import CreateView
from django.urls import reverse_lazy
from .forms import CustomUserCreationForm
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| 28.777778
| 43
| 0.830116
| 31
| 259
| 6.774194
| 0.677419
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104247
| 259
| 8
| 44
| 32.375
| 0.905172
| 0
| 0
| 0
| 0
| 0
| 0.061776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
6b0ff469900ccc9c854a18661fc7b7737ba3ac79
| 98
|
py
|
Python
|
pi_control/server_stats/apps.py
|
mhozza/pi-control
|
0dce821b4702519fedc3950270ee0091ed484ef6
|
[
"MIT"
] | null | null | null |
pi_control/server_stats/apps.py
|
mhozza/pi-control
|
0dce821b4702519fedc3950270ee0091ed484ef6
|
[
"MIT"
] | 10
|
2020-03-14T21:04:36.000Z
|
2022-03-03T21:51:07.000Z
|
pi_control/server_stats/apps.py
|
mhozza/pi-control
|
0dce821b4702519fedc3950270ee0091ed484ef6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ServerStatsConfig(AppConfig):
name = "server_stats"
| 16.333333
| 35
| 0.77551
| 11
| 98
| 6.818182
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153061
| 98
| 5
| 36
| 19.6
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0.122449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
6b1e268c000917add1c1379d6ddcd9ab23f2b03b
| 245
|
py
|
Python
|
src/digibujogens/__main__.py
|
roaet/digibujogens
|
ab154edda69c091595902dd8b2e3fd273b2e7105
|
[
"MIT"
] | null | null | null |
src/digibujogens/__main__.py
|
roaet/digibujogens
|
ab154edda69c091595902dd8b2e3fd273b2e7105
|
[
"MIT"
] | null | null | null |
src/digibujogens/__main__.py
|
roaet/digibujogens
|
ab154edda69c091595902dd8b2e3fd273b2e7105
|
[
"MIT"
] | null | null | null |
""" Main application entry point.
python -m digibujogens ...
"""
def main():
""" Execute the application.
"""
raise NotImplementedError
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(main())
| 14.411765
| 33
| 0.636735
| 24
| 245
| 6.166667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232653
| 245
| 16
| 34
| 15.3125
| 0.787234
| 0.481633
| 0
| 0
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
6b3ef77f1a082e51763d4a446e010e19a72af147
| 101
|
py
|
Python
|
docs/source/tutorial/code/read_csv.py
|
HanSooLim/DIL-Project
|
069fa7e35a2e1edfff30dc2540d9b87f5db95dde
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2021-10-16T15:08:05.000Z
|
2021-10-16T15:59:57.000Z
|
docs/source/tutorial/code/read_csv.py
|
HanSooLim/DIL-Project
|
069fa7e35a2e1edfff30dc2540d9b87f5db95dde
|
[
"MIT",
"BSD-3-Clause"
] | 8
|
2021-10-21T04:48:12.000Z
|
2021-11-07T03:09:25.000Z
|
docs/source/tutorial/code/read_csv.py
|
HanSooLim/DIL-Project
|
069fa7e35a2e1edfff30dc2540d9b87f5db95dde
|
[
"MIT",
"BSD-3-Clause"
] | 3
|
2021-05-02T13:39:14.000Z
|
2021-05-31T14:05:56.000Z
|
import pandas
datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0)
print(datas)
| 20.2
| 72
| 0.742574
| 15
| 101
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.079208
| 101
| 4
| 73
| 25.25
| 0.763441
| 0
| 0
| 0
| 0
| 0
| 0.316832
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
866300a7100ec622abdb52b3c5fac82349d29555
| 60
|
py
|
Python
|
examples/web/handlers.py
|
nicoddemus/aioworkers
|
4ab85064844dc28141833d1348989d8c891f3d7d
|
[
"Apache-2.0"
] | 45
|
2017-04-26T23:50:30.000Z
|
2021-12-29T03:21:06.000Z
|
examples/web/handlers.py
|
nicoddemus/aioworkers
|
4ab85064844dc28141833d1348989d8c891f3d7d
|
[
"Apache-2.0"
] | 63
|
2017-08-01T10:35:45.000Z
|
2022-03-01T18:07:49.000Z
|
examples/web/handlers.py
|
nicoddemus/aioworkers
|
4ab85064844dc28141833d1348989d8c891f3d7d
|
[
"Apache-2.0"
] | 6
|
2017-10-19T08:21:23.000Z
|
2021-12-29T03:25:32.000Z
|
async def handler(context):
return await context.data
| 12
| 29
| 0.733333
| 8
| 60
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 60
| 4
| 30
| 15
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
86789c8feaa8d10751a8b27ad6e7fc323ebc39ff
| 956
|
py
|
Python
|
redmine/__init__.py
|
hugoseabra/redmine-task-generator
|
b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf
|
[
"MIT"
] | null | null | null |
redmine/__init__.py
|
hugoseabra/redmine-task-generator
|
b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf
|
[
"MIT"
] | 4
|
2021-03-30T14:04:56.000Z
|
2021-06-10T19:40:52.000Z
|
redmine/__init__.py
|
hugoseabra/redmine-task-generator
|
b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from redminelib import Redmine as DefaultRedmine
from .validator import RedmineInstanceValidator
class Redmine(DefaultRedmine):
def __init__(self, url=None, key=None):
url = url or settings.REDMINE_BASE_URL
key = key or settings.REDMINE_API_KEY
super().__init__(url=url, key=key)
self.validator = RedmineInstanceValidator(client=self)
@property
def score_field(self):
return self.validator.score_field
def instance_errors(self):
errors = list()
if self.validator.track_errors:
errors += self.validator.track_errors
if self.validator.score_field_errors:
errors += self.validator.score_field_errors
return errors
def instance_valid(self) -> bool:
return self.validator.instance_valid()
def project_valid(self, project_id) -> bool:
return self.validator.project_valid(project_id)
| 28.117647
| 62
| 0.696653
| 114
| 956
| 5.605263
| 0.315789
| 0.162754
| 0.089202
| 0.107981
| 0.090767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225941
| 956
| 33
| 63
| 28.969697
| 0.863514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.217391
| false
| 0
| 0.130435
| 0.130435
| 0.565217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
867af3eaf92e62e8468d18b191fba31f6c76639c
| 2,836
|
py
|
Python
|
utils/dsp.py
|
huchenxucs/WaveRNN
|
6d5805d54b8a3db99aa190083b550236f2c15d28
|
[
"MIT"
] | null | null | null |
utils/dsp.py
|
huchenxucs/WaveRNN
|
6d5805d54b8a3db99aa190083b550236f2c15d28
|
[
"MIT"
] | null | null | null |
utils/dsp.py
|
huchenxucs/WaveRNN
|
6d5805d54b8a3db99aa190083b550236f2c15d28
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import librosa
from utils import hparams as hp
from scipy.signal import lfilter
import soundfile as sf
def label_2_float(x, bits):
return 2 * x / (2**bits - 1.) - 1.
def float_2_label(x, bits):
assert abs(x).max() <= 1.0
x = (x + 1.) * (2**bits - 1) / 2
return x.clip(0, 2**bits - 1)
def load_wav(path):
return librosa.load(path, sr=hp.sample_rate)[0]
def save_wav(x, path):
# librosa.output.write_wav(path, x.astype(np.float32), sr=hp.sample_rate)
sf.write(path, x.astype(np.float32), samplerate=hp.sample_rate)
def split_signal(x):
unsigned = x + 2**15
coarse = unsigned // 256
fine = unsigned % 256
return coarse, fine
def combine_signal(coarse, fine):
return coarse * 256 + fine - 2**15
def encode_16bits(x):
return np.clip(x * 2**15, -2**15, 2**15 - 1).astype(np.int16)
def linear_to_mel(spectrogram):
return librosa.feature.melspectrogram(
S=spectrogram, sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin)
'''
def build_mel_basis():
return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin)
'''
def normalize(S):
return np.clip((S - hp.min_level_db) / -hp.min_level_db, 0, 1)
def denormalize(S):
return (np.clip(S, 0, 1) * -hp.min_level_db) + hp.min_level_db
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def db_to_amp(x):
return np.power(10.0, x * 0.05)
def spectrogram(y):
D = stft(y)
S = amp_to_db(np.abs(D)) - hp.ref_level_db
return normalize(S)
def melspectrogram(y):
D = stft(y)
S = amp_to_db(linear_to_mel(np.abs(D)))
return normalize(S)
def stft(y):
return librosa.stft(
y=y,
n_fft=hp.n_fft, hop_length=hp.hop_length, win_length=hp.win_length)
def pre_emphasis(x):
return lfilter([1, -hp.preemphasis], [1], x)
def de_emphasis(x):
return lfilter([1], [1, -hp.preemphasis], x)
def encode_mu_law(x, mu):
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5)
def decode_mu_law(y, mu, from_labels=True):
# TODO: get rid of log2 - makes no sense
if from_labels: y = label_2_float(y, math.log2(mu))
mu = mu - 1
x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1)
return x
def reconstruct_waveform(mel, n_iter=32):
"""Uses Griffin-Lim phase reconstruction to convert from a normalized
mel spectrogram back into a waveform."""
denormalized = denormalize(mel)
amp_mel = db_to_amp(denormalized)
S = librosa.feature.inverse.mel_to_stft(
amp_mel, power=1, sr=hp.sample_rate,
n_fft=hp.n_fft, fmin=hp.fmin)
wav = librosa.core.griffinlim(
S, n_iter=n_iter,
hop_length=hp.hop_length, win_length=hp.win_length)
return wav
| 23.831933
| 91
| 0.645275
| 498
| 2,836
| 3.516064
| 0.253012
| 0.015991
| 0.041119
| 0.031982
| 0.23301
| 0.151913
| 0.151913
| 0.151913
| 0.107367
| 0.083381
| 0
| 0.037828
| 0.207687
| 2,836
| 118
| 92
| 24.033898
| 0.741433
| 0.076164
| 0
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.014085
| 1
| 0.28169
| false
| 0
| 0.084507
| 0.169014
| 0.633803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
867bbaa4b747400e8e0dce95ef2502b3a1d6e3df
| 188
|
py
|
Python
|
app/helpers/geocode.py
|
Soumya117/finnazureflaskapp
|
794f82596a329ff1a2e4dc23d49903a0ef474f95
|
[
"MIT"
] | null | null | null |
app/helpers/geocode.py
|
Soumya117/finnazureflaskapp
|
794f82596a329ff1a2e4dc23d49903a0ef474f95
|
[
"MIT"
] | 2
|
2021-03-31T20:43:02.000Z
|
2021-12-13T20:13:40.000Z
|
app/helpers/geocode.py
|
Soumya117/finnparser
|
e89ff6e1a0c08b57a1b2f971d5f7bb888c2f4a05
|
[
"MIT"
] | null | null | null |
import googlemaps
gmaps = googlemaps.Client(key='google_key')
def get_markers(address):
geocode_result = gmaps.geocode(address)
return geocode_result[0]['geometry']['location']
| 20.888889
| 52
| 0.75
| 23
| 188
| 5.956522
| 0.695652
| 0.189781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006061
| 0.12234
| 188
| 8
| 53
| 23.5
| 0.824242
| 0
| 0
| 0
| 0
| 0
| 0.138298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
8687c290ea5275e332a8f9c623d8f42da0525b01
| 244
|
py
|
Python
|
mayan/apps/rest_api/exceptions.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | 1
|
2021-02-24T15:03:23.000Z
|
2021-02-24T15:03:23.000Z
|
mayan/apps/rest_api/exceptions.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | 10
|
2021-03-20T00:01:17.000Z
|
2022-03-12T00:48:43.000Z
|
mayan/apps/rest_api/exceptions.py
|
sophiawa/Mayan-EDMS
|
42f20576d0c690b645a60bf53c5169cda4264231
|
[
"Apache-2.0"
] | 1
|
2020-08-09T09:06:59.000Z
|
2020-08-09T09:06:59.000Z
|
class APIError(Exception):
"""
Base exception for the API app
"""
pass
class APIResourcePatternError(APIError):
"""
Raised when an app tries to override an existing URL regular expression
pattern
"""
pass
| 16.266667
| 75
| 0.647541
| 27
| 244
| 5.851852
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278689
| 244
| 14
| 76
| 17.428571
| 0.897727
| 0.45082
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
868c6ea160dd2c056e7da123714e1987646a86cf
| 9,215
|
py
|
Python
|
ravem/tests/util_test.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
ravem/tests/util_test.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
ravem/tests/util_test.py
|
bpedersen2/indico-plugins-cern
|
c4f06d11d981c316fc8de2892758484deb58e2f5
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2022 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from unittest.mock import MagicMock
import pytest
from requests.exceptions import HTTPError, Timeout
from indico.testing.util import extract_logs
from indico_ravem.plugin import RavemPlugin
from indico_ravem.util import has_access, ravem_api_call
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize('method', ('get', 'post'))
def test_correct_http_method(mocker, method):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', method=method, param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[0][0] == method
@pytest.mark.usefixtures('db')
def test_correct_auth_method(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
token = 'foo'
RavemPlugin.settings.set('access_token', token)
ravem_api_call('test_endpoint', param1='test1', param2='test2')
assert request.call_count == 1
assert 'Authorization' in request.call_args[1]['headers']
assert request.call_args[1]['headers']['Authorization'] == 'Bearer %s' % token
@pytest.mark.usefixtures('db')
def test_accepts_json(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[1]['headers']['Accept'] == 'application/json'
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('root_endpoint', 'endpoint', 'expected_url'), (
('https://ravem.test/', 'final_endpoint', 'https://ravem.test/final_endpoint'),
('https://ravem.test/api/', 'final_endpoint', 'https://ravem.test/api/final_endpoint'),
('https://ravem.test/api/v2/', 'final_endpoint', 'https://ravem.test/api/v2/final_endpoint'),
('https://ravem.test', './final_endpoint', 'https://ravem.test/final_endpoint'),
('https://ravem.test/api/', './final_endpoint', 'https://ravem.test/api/final_endpoint'),
('https://ravem.test/api/v2/', './final_endpoint', 'https://ravem.test/api/v2/final_endpoint'),
('https://ravem.test', 'sub/final_endpoint', 'https://ravem.test/sub/final_endpoint'),
('https://ravem.test/api/', 'sub/final_endpoint', 'https://ravem.test/api/sub/final_endpoint'),
('https://ravem.test/api/v2/', 'sub/final_endpoint', 'https://ravem.test/api/v2/sub/final_endpoint'),
('https://ravem.test', './sub/final_endpoint', 'https://ravem.test/sub/final_endpoint'),
('https://ravem.test/api/', './sub/final_endpoint', 'https://ravem.test/api/sub/final_endpoint'),
('https://ravem.test/api/v2/', './sub/final_endpoint', 'https://ravem.test/api/v2/sub/final_endpoint'),
('https://ravem.test/', '', 'https://ravem.test/'),
('https://ravem.test/api/', '', 'https://ravem.test/api/'),
('https://ravem.test/api/v2/', '', 'https://ravem.test/api/v2/'),
))
def test_correct_api_endpoint(mocker, root_endpoint, endpoint, expected_url):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
RavemPlugin.settings.set('api_endpoint', root_endpoint)
ravem_api_call(endpoint, param1='test1', param2='test2')
assert request.call_count == 1
assert request.call_args[0][1] == expected_url
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize('params', (
{},
{'p1': '1stparam'},
{'p1': '1stparam', 'p2': '2ndparam'}
))
def test_params_generated(mocker, params):
request = mocker.patch('indico_ravem.util.requests.request')
response = MagicMock()
response.json.return_value = {'result': 'test'}
response.raise_for_status.return_value = False
request.return_value = response
ravem_api_call('test_endpoint', params=params)
assert request.call_count == 1
assert request.call_args[1]['params'] == params
@pytest.mark.usefixtures('db')
def test_raises_timeout(mocker):
request = mocker.patch('indico_ravem.util.requests.request')
request.side_effect = Timeout('Timeout test error message', request=request)
with pytest.raises(Timeout) as excinfo:
ravem_api_call('test_endpoint')
assert str(excinfo.value) == "Timeout while contacting the room."
assert request.call_count == 1
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('method', 'params'), (
('get', {}),
('post', {}),
('get', {'p1': '1stparam'}),
('post', {'p1': '1stparam'}),
('get', {'p1': '1stparam', 'p2': '2ndparam'}),
('post', {'p1': '1stparam', 'p2': '2ndparam'})
))
def test_unexpected_exception_is_logged(mocker, caplog, method, params):
request = mocker.patch('indico_ravem.util.requests.request')
request.side_effect = IndexError('this is unexpected')
with pytest.raises(IndexError) as excinfo:
ravem_api_call('test_endpoint', method=method, **params)
assert str(excinfo.value) == 'this is unexpected'
log = extract_logs(caplog, one=True, name='indico.plugin.ravem')
assert log.message == "failed call: {} {} with {}: {}".format(method.upper(), 'test_endpoint', params,
'this is unexpected')
assert request.call_count == 1
@pytest.mark.usefixtures('db')
@pytest.mark.parametrize(('method', 'params'), (
('get', {}),
('post', {}),
('get', {'p1': '1stparam'}),
('post', {'p1': '1stparam'}),
('get', {'p1': '1stparam', 'p2': '2ndparam'}),
('post', {'p1': '1stparam', 'p2': '2ndparam'})
))
def test_http_error_is_logged(mocker, caplog, method, params):
request = mocker.patch('indico_ravem.util.requests.request')
request.method = method.upper()
request.url = RavemPlugin.settings.get('api_endpoint') + 'test_endpoint'
response = MagicMock()
response.raise_for_status.side_effect = HTTPError('Well this is embarrassing')
response.request = request
response.url = response.request.url
request.return_value = response
with pytest.raises(HTTPError) as excinfo:
ravem_api_call('test_endpoint', method=method, **params)
assert str(excinfo.value) == 'Well this is embarrassing'
log = extract_logs(caplog, one=True, name='indico.plugin.ravem')
assert log.message == '{} {} failed with {}'.format(
method.upper(), RavemPlugin.settings.get('api_endpoint') + 'test_endpoint', 'Well this is embarrassing')
assert request.call_count == 1
@pytest.mark.usefixtures('db')
def test_unlinked_event_vc_room_has_no_access():
event_vc_room = MagicMock()
event_vc_room.link_object = None
assert not has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_unlinked_room_has_no_access(mocker):
session = mocker.patch('indico_ravem.util.session')
session.user = 'Guinea Pig'
event_vc_room = MagicMock()
event_vc_room.link_object.room = None
assert not has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_check_if_current_user_is_room_owner(mocker):
session = mocker.patch('indico_ravem.util.session')
session.user = 'Guinea Pig'
request = mocker.patch('indico_ravem.util.request')
request.remote_addr = '111.222.123.123'
retrieve_principal = mocker.patch('indico_ravem.util._retrieve_principal')
retrieve_principal.side_effect = lambda x: session.user
event_vc_room = MagicMock()
event_vc_room.link_object.room.has_equipment = MagicMock(return_value=True)
event_vc_room.link_object.room.get_attribute_value.return_value = request.remote_addr
event_vc_room.vc_room.data.get.return_value = 'User:123'
event_vc_room.event.can_manage.return_value = False
assert has_access(event_vc_room)
@pytest.mark.usefixtures('db', 'request_context')
def test_check_if_current_user_can_modify(mocker):
request = mocker.patch('indico_ravem.util.request')
request.remote_addr = '111.222.123.123'
session = mocker.patch('indico_ravem.util.session')
session.user = 'Guinea Pig'
mocker.patch('indico_ravem.util._retrieve_principal')
event_vc_room = MagicMock()
event_vc_room.link_object.room.has_equipment = MagicMock(return_value=True)
event_vc_room.link_object.room.get_attribute_value.return_value = request.remote_addr
event_vc_room.event.can_manage.return_value = True
assert has_access(event_vc_room)
event_vc_room.event.can_manage.assert_called_once_with(session.user)
| 39.549356
| 112
| 0.703527
| 1,191
| 9,215
| 5.22838
| 0.141058
| 0.048177
| 0.067448
| 0.088646
| 0.757668
| 0.740646
| 0.703549
| 0.652963
| 0.626947
| 0.611049
| 0
| 0.013493
| 0.139447
| 9,215
| 232
| 113
| 39.719828
| 0.771753
| 0.025936
| 0
| 0.551724
| 0
| 0
| 0.287769
| 0.052514
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
868e31d3b6d09c73dfd001c290d85e56d3f9bb45
| 672
|
py
|
Python
|
app/http/middleware/LoadUserMiddleware.py
|
josephmancuso/masonite-forum
|
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
|
[
"MIT"
] | 11
|
2018-07-08T17:44:28.000Z
|
2020-03-02T10:45:37.000Z
|
app/http/middleware/LoadUserMiddleware.py
|
josephmancuso/masonite-forum
|
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
|
[
"MIT"
] | 2
|
2018-07-21T07:49:09.000Z
|
2019-05-29T14:34:42.000Z
|
app/http/middleware/LoadUserMiddleware.py
|
josephmancuso/masonite-forum
|
a91c7386f3e0b02b0ac71623eb295a7543cb60fd
|
[
"MIT"
] | 5
|
2018-07-12T02:36:14.000Z
|
2020-04-05T21:10:30.000Z
|
''' Load User Middleware'''
from masonite.facades.Auth import Auth
class LoadUserMiddleware:
''' Middleware class which loads the current user into the request '''
def __init__(self, Request):
''' Inject Any Dependencies From The Service Container '''
self.request = Request
def before(self):
''' Run This Middleware Before The Route Executes '''
self.load_user(self.request)
return self.request
def after(self):
''' Run This Middleware After The Route Executes '''
pass
def load_user(self, request):
''' Load user into the request '''
request.set_user(Auth(request).user())
| 29.217391
| 74
| 0.644345
| 80
| 672
| 5.325
| 0.4125
| 0.129108
| 0.051643
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254464
| 672
| 22
| 75
| 30.545455
| 0.850299
| 0.38244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0.090909
| 0.090909
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
869c4c6a792894e8eb7116f05f76e9950b851051
| 364
|
py
|
Python
|
dbclient/__init__.py
|
dmoore247/db-migration
|
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
|
[
"Apache-2.0"
] | null | null | null |
dbclient/__init__.py
|
dmoore247/db-migration
|
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
|
[
"Apache-2.0"
] | null | null | null |
dbclient/__init__.py
|
dmoore247/db-migration
|
cc75d491d7dd7e9e24b5a35dd3d1080317b25520
|
[
"Apache-2.0"
] | null | null | null |
import json, requests, datetime
from cron_descriptor import get_description
from .dbclient import dbclient
from .JobsClient import JobsClient
from .ClustersClient import ClustersClient
from .WorkspaceClient import WorkspaceClient
from .ScimClient import ScimClient
from .LibraryClient import LibraryClient
from .HiveClient import HiveClient
from .parser import *
| 30.333333
| 44
| 0.857143
| 41
| 364
| 7.560976
| 0.414634
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112637
| 364
| 11
| 45
| 33.090909
| 0.959752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
869e772414d99f560741ba4d5f3b4440b61ae41b
| 2,931
|
py
|
Python
|
Ifc/IfcBase.py
|
gsimon75/IFC_parser
|
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
|
[
"BSD-2-Clause"
] | 28
|
2019-12-02T11:41:14.000Z
|
2022-03-02T22:53:24.000Z
|
Ifc/IfcBase.py
|
gsimon75/IFC_parser
|
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
|
[
"BSD-2-Clause"
] | 4
|
2019-08-30T13:52:40.000Z
|
2022-02-02T02:31:31.000Z
|
Ifc/IfcBase.py
|
gsimon75/IFC_parser
|
f9fbe2afa48795bbb502530bc9ab5c4db842e10f
|
[
"BSD-2-Clause"
] | 6
|
2020-07-11T22:35:07.000Z
|
2022-03-18T15:12:46.000Z
|
from Ifc.ClassRegistry import ifc_class, ifc_abstract_class, ifc_fallback_class
@ifc_abstract_class
class IfcEntity:
"""
Generic IFC entity, only for subclassing from it
"""
def __init__(self, rtype, args):
"""
rtype: Resource type
args: Arguments in *reverse* order, so you can just args.pop() from it
"""
self.rtype = rtype
def __str__(self):
return self.rtype
def __json__(self):
return {'rtype': self.rtype}
@ifc_fallback_class
class IfcGenericEntity(IfcEntity):
"""
Generic IFC entity: type and args
"""
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.args = args
self.args.reverse()
def __str__(self):
return "Gen<{sup}>{a}".format(
sup=IfcEntity.__str__(self),
a=self.args)
@ifc_class
class IfcScalarValue(IfcEntity):
def __init__(self, rtype, args):
IfcEntity.__init__(self, rtype, args)
self.value = args.pop()
def __str__(self):
return str(self.value)
@ifc_class
class BOOLEAN(IfcScalarValue):
pass
@ifc_class
class REAL(IfcScalarValue):
pass
@ifc_class
class BINARY(IfcScalarValue):
pass
@ifc_class
class INTEGER(IfcScalarValue):
pass
@ifc_class
class NUMBER(IfcScalarValue):
pass
@ifc_class
class STRING(IfcScalarValue):
pass
@ifc_class
class LOGICAL(IfcScalarValue):
pass
class Omitted:
"""
Marked with '*' it states that some supertype had defined that attribute, but in the subtype it is a derived
(calculated) value, so it no longer makes sense to explicitely assign value to it.
"""
# TODO: Haven't tried if it can be handled 'just as expected'
def __init__(self):
pass
def __str__(self):
return "<omitted>"
def __json__(self):
return None
# class-level, enough to reference, no need to create multiple instances (doesn't hurt though)
omitted = Omitted()
class Reference:
"""
Refers to another entity by its index
"""
def __init__(self, index):
self.index = index
def __str__(self):
return "<#{idx}>".format(idx=self.index)
def __json__(self):
return {'ref': self.index}
class EnumValue:
"""
Item from some set of enumerated values.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return "<.{val}.>".format(val=self.value)
def __json__(self):
return self.value
@ifc_class
class STEPHeader(IfcEntity):
def __init__(self):
IfcEntity.__init__(self, "STEPHeader", [])
self.fields = {}
def add(self, e):
self.fields[e.rtype] = e
def __str__(self):
return "STEPHeader({f})".format(f=", ".join(map(lambda f: "{n}: {v}".format(n=f[0], v=str(f[1])), self.fields.iteritems())))
# vim: set sw=4 ts=4 et:
| 21.23913
| 132
| 0.624701
| 369
| 2,931
| 4.674797
| 0.336043
| 0.063768
| 0.067826
| 0.064928
| 0.202899
| 0.057971
| 0.057971
| 0.057971
| 0.057971
| 0.057971
| 0
| 0.001842
| 0.258956
| 2,931
| 137
| 133
| 21.394161
| 0.792357
| 0.212214
| 0
| 0.448718
| 0
| 0
| 0.037341
| 0
| 0
| 0
| 0
| 0.007299
| 0
| 1
| 0.24359
| false
| 0.102564
| 0.012821
| 0.141026
| 0.576923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 3
|
86a6e60b85eb87efd2531834b58c525dde29390d
| 21,322
|
py
|
Python
|
school/views.py
|
pa-one-patel/college_managenment
|
be6f6dcac1f7e01f71d95f445e2118e8eec3fe3a
|
[
"MIT"
] | 1
|
2021-04-11T12:05:44.000Z
|
2021-04-11T12:05:44.000Z
|
school/views.py
|
aliffauzi/schoolmanagement
|
6a4477af01df148404d1ff2941f74accb5717b09
|
[
"MIT"
] | 6
|
2021-03-19T04:10:49.000Z
|
2021-09-22T19:06:14.000Z
|
school/views.py
|
aliffauzi/schoolmanagement
|
6a4477af01df148404d1ff2941f74accb5717b09
|
[
"MIT"
] | 1
|
2021-04-11T12:07:08.000Z
|
2021-04-11T12:07:08.000Z
|
from django.shortcuts import render,redirect,reverse
from . import forms,models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required,user_passes_test
def home_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/index.html')
#for showing signup/login button for teacher(by sumit)
def adminclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/adminclick.html')
#for showing signup/login button for teacher(by sumit)
def teacherclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/teacherclick.html')
#for showing signup/login button for student(by sumit)
def studentclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request,'school/studentclick.html')
def admin_signup_view(request):
form=forms.AdminSigupForm()
if request.method=='POST':
form=forms.AdminSigupForm(request.POST)
if form.is_valid():
user=form.save()
user.set_password(user.password)
user.save()
my_admin_group = Group.objects.get_or_create(name='ADMIN')
my_admin_group[0].user_set.add(user)
return HttpResponseRedirect('adminlogin')
return render(request,'school/adminsignup.html',{'form':form})
def student_signup_view(request):
form1=forms.StudentUserForm()
form2=forms.StudentExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST)
form2=forms.StudentExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.user=user
user2=f2.save()
my_student_group = Group.objects.get_or_create(name='STUDENT')
my_student_group[0].user_set.add(user)
return HttpResponseRedirect('studentlogin')
return render(request,'school/studentsignup.html',context=mydict)
def teacher_signup_view(request):
form1=forms.TeacherUserForm()
form2=forms.TeacherExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST)
form2=forms.TeacherExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.user=user
user2=f2.save()
my_teacher_group = Group.objects.get_or_create(name='TEACHER')
my_teacher_group[0].user_set.add(user)
return HttpResponseRedirect('teacherlogin')
return render(request,'school/teachersignup.html',context=mydict)
#for checking user is techer , student or admin(by sumit)
def is_admin(user):
return user.groups.filter(name='ADMIN').exists()
def is_teacher(user):
return user.groups.filter(name='TEACHER').exists()
def is_student(user):
return user.groups.filter(name='STUDENT').exists()
def afterlogin_view(request):
if is_admin(request.user):
return redirect('admin-dashboard')
elif is_teacher(request.user):
accountapproval=models.TeacherExtra.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('teacher-dashboard')
else:
return render(request,'school/teacher_wait_for_approval.html')
elif is_student(request.user):
accountapproval=models.StudentExtra.objects.all().filter(user_id=request.user.id,status=True)
if accountapproval:
return redirect('student-dashboard')
else:
return render(request,'school/student_wait_for_approval.html')
#for dashboard of adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_dashboard_view(request):
teachercount=models.TeacherExtra.objects.all().filter(status=True).count()
pendingteachercount=models.TeacherExtra.objects.all().filter(status=False).count()
studentcount=models.StudentExtra.objects.all().filter(status=True).count()
pendingstudentcount=models.StudentExtra.objects.all().filter(status=False).count()
teachersalary=models.TeacherExtra.objects.filter(status=True).aggregate(Sum('salary'))
pendingteachersalary=models.TeacherExtra.objects.filter(status=False).aggregate(Sum('salary'))
studentfee=models.StudentExtra.objects.filter(status=True).aggregate(Sum('fee',default=0))
pendingstudentfee=models.StudentExtra.objects.filter(status=False).aggregate(Sum('fee'))
notice=models.Notice.objects.all()
#aggregate function return dictionary so fetch data from dictionay(by sumit)
mydict={
'teachercount':teachercount,
'pendingteachercount':pendingteachercount,
'studentcount':studentcount,
'pendingstudentcount':pendingstudentcount,
'teachersalary':teachersalary['salary__sum'],
'pendingteachersalary':pendingteachersalary['salary__sum'],
'studentfee':studentfee['fee__sum'],
'pendingstudentfee':pendingstudentfee['fee__sum'],
'notice':notice
}
return render(request,'school/admin_dashboard.html',context=mydict)
#for teacher sectionnnnnnnn by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_teacher_view(request):
return render(request,'school/admin_teacher.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_add_teacher_view(request):
form1=forms.TeacherUserForm()
form2=forms.TeacherExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST)
form2=forms.TeacherExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.user=user
f2.status=True
f2.save()
my_teacher_group = Group.objects.get_or_create(name='TEACHER')
my_teacher_group[0].user_set.add(user)
return HttpResponseRedirect('admin-teacher')
return render(request,'school/admin_add_teacher.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_teacher_view(request):
teachers=models.TeacherExtra.objects.all().filter(status=True)
return render(request,'school/admin_view_teacher.html',{'teachers':teachers})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_approve_teacher_view(request):
teachers=models.TeacherExtra.objects.all().filter(status=False)
return render(request,'school/admin_approve_teacher.html',{'teachers':teachers})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def approve_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
teacher.status=True
teacher.save()
return redirect(reverse('admin-approve-teacher'))
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
user.delete()
teacher.delete()
return redirect('admin-approve-teacher')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_teacher_from_school_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
user.delete()
teacher.delete()
return redirect('admin-view-teacher')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def update_teacher_view(request,pk):
teacher=models.TeacherExtra.objects.get(id=pk)
user=models.User.objects.get(id=teacher.user_id)
form1=forms.TeacherUserForm(instance=user)
form2=forms.TeacherExtraForm(instance=teacher)
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.TeacherUserForm(request.POST,instance=user)
form2=forms.TeacherExtraForm(request.POST,instance=teacher)
print(form1)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.status=True
f2.save()
return redirect('admin-view-teacher')
return render(request,'school/admin_update_teacher.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_teacher_salary_view(request):
teachers=models.TeacherExtra.objects.all()
return render(request,'school/admin_view_teacher_salary.html',{'teachers':teachers})
#for student by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_student_view(request):
return render(request,'school/admin_student.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_add_student_view(request):
form1=forms.StudentUserForm()
form2=forms.StudentExtraForm()
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST)
form2=forms.StudentExtraForm(request.POST)
if form1.is_valid() and form2.is_valid():
print("form is valid")
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.user=user
f2.status=True
f2.save()
my_student_group = Group.objects.get_or_create(name='STUDENT')
my_student_group[0].user_set.add(user)
else:
print("form is invalid")
return HttpResponseRedirect('admin-student')
return render(request,'school/admin_add_student.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_student_view(request):
students=models.StudentExtra.objects.all().filter(status=True)
return render(request,'school/admin_view_student.html',{'students':students})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_student_from_school_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
user.delete()
student.delete()
return redirect('admin-view-student')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def delete_student_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
user.delete()
student.delete()
return redirect('admin-approve-student')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def update_student_view(request,pk):
student=models.StudentExtra.objects.get(id=pk)
user=models.User.objects.get(id=student.user_id)
form1=forms.StudentUserForm(instance=user)
form2=forms.StudentExtraForm(instance=student)
mydict={'form1':form1,'form2':form2}
if request.method=='POST':
form1=forms.StudentUserForm(request.POST,instance=user)
form2=forms.StudentExtraForm(request.POST,instance=student)
print(form1)
if form1.is_valid() and form2.is_valid():
user=form1.save()
user.set_password(user.password)
user.save()
f2=form2.save(commit=False)
f2.status=True
f2.save()
return redirect('admin-view-student')
return render(request,'school/admin_update_student.html',context=mydict)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_approve_student_view(request):
students=models.StudentExtra.objects.all().filter(status=False)
return render(request,'school/admin_approve_student.html',{'students':students})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def approve_student_view(request,pk):
students=models.StudentExtra.objects.get(id=pk)
students.status=True
students.save()
return redirect(reverse('admin-approve-student'))
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_student_fee_view(request):
students=models.StudentExtra.objects.all()
return render(request,'school/admin_view_student_fee.html',{'students':students})
#attendance related viewwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_attendance_view(request):
return render(request,'school/admin_attendance.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_take_attendance_view(request,cl):
students=models.StudentExtra.objects.all().filter(cl=cl)
print(students)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.cl=cl
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
AttendanceModel.roll=students[i].roll
AttendanceModel.save()
return redirect('admin-attendance')
else:
print('form invalid')
return render(request,'school/admin_take_attendance.html',{'students':students,'aform':aform})
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_attendance_view(request,cl):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date,cl=cl)
studentdata=models.StudentExtra.objects.all().filter(cl=cl)
mylist=zip(attendancedata,studentdata)
return render(request,'school/admin_view_attendance_page.html',{'cl':cl,'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/admin_view_attendance_ask_date.html',{'cl':cl,'form':form})
#fee related view by adminnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_fee_view(request):
return render(request,'school/admin_fee.html')
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_view_fee_view(request,cl):
feedetails=models.StudentExtra.objects.all().filter(cl=cl)
return render(request,'school/admin_view_fee.html',{'feedetails':feedetails,'cl':cl})
#notice related viewsssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss(by sumit)
@login_required(login_url='adminlogin')
@user_passes_test(is_admin)
def admin_notice_view(request):
form=forms.NoticeForm()
if request.method=='POST':
form=forms.NoticeForm(request.POST)
if form.is_valid():
form=form.save(commit=False)
form.by=request.user.first_name
form.save()
return redirect('admin-dashboard')
return render(request,'school/admin_notice.html',{'form':form})
#for TEACHER LOGIN SECTIONNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN(by sumit)
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_dashboard_view(request):
teacherdata=models.TeacherExtra.objects.all().filter(status=True,user_id=request.user.id)
notice=models.Notice.objects.all()
mydict={
'salary':teacherdata[0].salary,
'mobile':teacherdata[0].mobile,
'date':teacherdata[0].joindate,
'notice':notice
}
return render(request,'school/teacher_dashboard.html',context=mydict)
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_attendance_view(request):
return render(request,'school/teacher_attendance.html')
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_take_attendance_view(request,cl):
students=models.StudentExtra.objects.all().filter(cl=cl)
aform=forms.AttendanceForm()
if request.method=='POST':
form=forms.AttendanceForm(request.POST)
if form.is_valid():
Attendances=request.POST.getlist('present_status')
date=form.cleaned_data['date']
for i in range(len(Attendances)):
AttendanceModel=models.Attendance()
AttendanceModel.cl=cl
AttendanceModel.date=date
AttendanceModel.present_status=Attendances[i]
AttendanceModel.roll=students[i].roll
AttendanceModel.save()
return redirect('teacher-attendance')
else:
print('form invalid')
return render(request,'school/teacher_take_attendance.html',{'students':students,'aform':aform})
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_view_attendance_view(request,cl):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
attendancedata=models.Attendance.objects.all().filter(date=date,cl=cl)
studentdata=models.StudentExtra.objects.all().filter(cl=cl)
mylist=zip(attendancedata,studentdata)
return render(request,'school/teacher_view_attendance_page.html',{'cl':cl,'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/teacher_view_attendance_ask_date.html',{'cl':cl,'form':form})
@login_required(login_url='teacherlogin')
@user_passes_test(is_teacher)
def teacher_notice_view(request):
form=forms.NoticeForm()
if request.method=='POST':
form=forms.NoticeForm(request.POST)
if form.is_valid():
form=form.save(commit=False)
form.by=request.user.first_name
form.save()
return redirect('teacher-dashboard')
else:
print('form invalid')
return render(request,'school/teacher_notice.html',{'form':form})
#FOR STUDENT AFTER THEIR Loginnnnnnnnnnnnnnnnnnnnn(by sumit)
@login_required(login_url='studentlogin')
@user_passes_test(is_student)
def student_dashboard_view(request):
studentdata=models.StudentExtra.objects.all().filter(status=True,user_id=request.user.id)
notice=models.Notice.objects.all()
mydict={
'roll':studentdata[0].roll,
'mobile':studentdata[0].mobile,
'fee':studentdata[0].fee,
'notice':notice
}
return render(request,'school/student_dashboard.html',context=mydict)
@login_required(login_url='studentlogin')
@user_passes_test(is_student)
def student_attendance_view(request):
form=forms.AskDateForm()
if request.method=='POST':
form=forms.AskDateForm(request.POST)
if form.is_valid():
date=form.cleaned_data['date']
studentdata=models.StudentExtra.objects.all().filter(user_id=request.user.id,status=True)
attendancedata=models.Attendance.objects.all().filter(date=date,cl=studentdata[0].cl,roll=studentdata[0].roll)
mylist=zip(attendancedata,studentdata)
return render(request,'school/student_view_attendance_page.html',{'mylist':mylist,'date':date})
else:
print('form invalid')
return render(request,'school/student_view_attendance_ask_date.html',{'form':form})
# for aboutus and contact ussssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss (by sumit)
def aboutus_view(request):
return render(request,'school/aboutus.html')
def contactus_view(request):
sub = forms.ContactusForm()
if request.method == 'POST':
sub = forms.ContactusForm(request.POST)
if sub.is_valid():
email = sub.cleaned_data['Email']
name=sub.cleaned_data['Name']
message = sub.cleaned_data['Message']
send_mail(str(name)+' || '+str(email),message, EMAIL_HOST_USER, ['[email protected]'], fail_silently = False)
return render(request, 'school/contactussuccess.html')
return render(request, 'school/contactus.html', {'form':sub})
| 33.211838
| 123
| 0.710909
| 2,513
| 21,322
| 5.859133
| 0.074015
| 0.031377
| 0.052907
| 0.069614
| 0.791972
| 0.760459
| 0.698452
| 0.652268
| 0.631214
| 0.609753
| 0
| 0.006424
| 0.167761
| 21,322
| 641
| 124
| 33.263651
| 0.823331
| 0.053231
| 0
| 0.612335
| 0
| 0
| 0.132256
| 0.063203
| 0
| 0
| 0
| 0
| 0
| 1
| 0.099119
| false
| 0.088106
| 0.013216
| 0.019824
| 0.262115
| 0.024229
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
86bcd2890d4f11513d628469a8efe8d1af2d7195
| 65
|
py
|
Python
|
src/cicd_sim/artifact/__init__.py
|
Software-Natives-OSS/cicd_sim
|
19452a5b06a6c6d99322c9b6777c501025e954f1
|
[
"MIT"
] | null | null | null |
src/cicd_sim/artifact/__init__.py
|
Software-Natives-OSS/cicd_sim
|
19452a5b06a6c6d99322c9b6777c501025e954f1
|
[
"MIT"
] | 8
|
2020-03-12T05:51:56.000Z
|
2020-03-15T17:31:12.000Z
|
src/cicd_sim/artifact/__init__.py
|
Software-Natives-OSS/cicd_sim
|
19452a5b06a6c6d99322c9b6777c501025e954f1
|
[
"MIT"
] | null | null | null |
from . artifactory import Artifactory
__all__ = ['Artifactory']
| 16.25
| 37
| 0.769231
| 6
| 65
| 7.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138462
| 65
| 3
| 38
| 21.666667
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.169231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 3
|
86c445a03cb1fedcfaa9af4175640a3d81afd9b9
| 8,505
|
py
|
Python
|
reco_utils/recommender/deeprec/io/iterator.py
|
yutian-zhao/recommenders
|
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
|
[
"MIT"
] | null | null | null |
reco_utils/recommender/deeprec/io/iterator.py
|
yutian-zhao/recommenders
|
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
|
[
"MIT"
] | null | null | null |
reco_utils/recommender/deeprec/io/iterator.py
|
yutian-zhao/recommenders
|
17b9c1280a79019dd91f50b3a7e66f25cb5004b1
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
# import tensorflow as tf
import abc
class BaseIterator(object):
@abc.abstractmethod
def parser_one_line(self, line):
pass
@abc.abstractmethod
def load_data_from_file(self, infile):
pass
@abc.abstractmethod
def _convert_data(self, labels, features):
pass
@abc.abstractmethod
def gen_feed_dict(self, data_dict):
pass
# class FFMTextIterator(BaseIterator):
# """Data loader for FFM format based models, such as xDeepFM.
# Iterator will not load the whole data into memory. Instead, it loads data into memory
# per mini-batch, so that large files can be used as input data.
# """
# def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"):
# """Initialize an iterator. Create necessary placeholders for the model.
# Args:
# hparams (obj): Global hyper-parameters. Some key settings such as #_feature and #_field are there.
# graph (obj): the running graph. All created placeholder will be added to this graph.
# col_spliter (str): column splitter in one line.
# ID_spliter (str): ID splitter in one line.
# """
# self.feature_cnt = hparams.FEATURE_COUNT
# self.field_cnt = hparams.FIELD_COUNT
# self.col_spliter = col_spliter
# self.ID_spliter = ID_spliter
# self.batch_size = hparams.batch_size
# self.graph = graph
# with self.graph.as_default():
# self.labels = tf.placeholder(tf.float32, [None, 1], name="label")
# self.fm_feat_indices = tf.placeholder(
# tf.int64, [None, 2], name="fm_feat_indices"
# )
# self.fm_feat_values = tf.placeholder(
# tf.float32, [None], name="fm_feat_values"
# )
# self.fm_feat_shape = tf.placeholder(tf.int64, [None], name="fm_feat_shape")
# self.dnn_feat_indices = tf.placeholder(
# tf.int64, [None, 2], name="dnn_feat_indices"
# )
# self.dnn_feat_values = tf.placeholder(
# tf.int64, [None], name="dnn_feat_values"
# )
# self.dnn_feat_weights = tf.placeholder(
# tf.float32, [None], name="dnn_feat_weights"
# )
# self.dnn_feat_shape = tf.placeholder(
# tf.int64, [None], name="dnn_feat_shape"
# )
# def parser_one_line(self, line):
# """Parse one string line into feature values.
# Args:
# line (str): a string indicating one instance
# Returns:
# list: Parsed results,including label, features and impression_id
# """
# impression_id = 0
# words = line.strip().split(self.ID_spliter)
# if len(words) == 2:
# impression_id = words[1].strip()
# cols = words[0].strip().split(self.col_spliter)
# label = float(cols[0])
# features = []
# for word in cols[1:]:
# if not word.strip():
# continue
# tokens = word.split(":")
# features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])])
# return label, features, impression_id
# def load_data_from_file(self, infile):
# """Read and parse data from a file.
# Args:
# infile (str): text input file. Each line in this file is an instance.
# Returns:
# obj: An iterator that will yields parsed results, in the format of graph feed_dict.
# """
# label_list = []
# features_list = []
# impression_id_list = []
# cnt = 0
# with tf.gfile.GFile(infile, "r") as rd:
# for line in rd:
# label, features, impression_id = self.parser_one_line(line)
# features_list.append(features)
# label_list.append(label)
# impression_id_list.append(impression_id)
# cnt += 1
# if cnt == self.batch_size:
# res = self._convert_data(label_list, features_list)
# yield self.gen_feed_dict(res), impression_id_list, self.batch_size
# label_list = []
# features_list = []
# impression_id_list = []
# cnt = 0
# if cnt > 0:
# res = self._convert_data(label_list, features_list)
# yield self.gen_feed_dict(res), impression_id_list, cnt
# def _convert_data(self, labels, features):
# """Convert data into numpy arrays that are good for further operation.
# Args:
# labels (list): a list of ground-truth labels.
# features (list): a 3-dimensional list, carrying a list (batch_size) of feature array,
# where each feature array is a list of [field_idx, feature_idx, feature_value] tuple.
# Returns:
# dict: A dictionary, contains multiple numpy arrays that are convenient for further operation.
# """
# dim = self.feature_cnt
# FIELD_COUNT = self.field_cnt
# instance_cnt = len(labels)
# fm_feat_indices = []
# fm_feat_values = []
# fm_feat_shape = [instance_cnt, dim]
# dnn_feat_indices = []
# dnn_feat_values = []
# dnn_feat_weights = []
# dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1]
# for i in range(instance_cnt):
# m = len(features[i])
# dnn_feat_dic = {}
# for j in range(m):
# fm_feat_indices.append([i, features[i][j][1]])
# fm_feat_values.append(features[i][j][2])
# if features[i][j][0] not in dnn_feat_dic:
# dnn_feat_dic[features[i][j][0]] = 0
# else:
# dnn_feat_dic[features[i][j][0]] += 1
# dnn_feat_indices.append(
# [
# i * FIELD_COUNT + features[i][j][0],
# dnn_feat_dic[features[i][j][0]],
# ]
# )
# dnn_feat_values.append(features[i][j][1])
# dnn_feat_weights.append(features[i][j][2])
# if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]:
# dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]]
# dnn_feat_shape[1] += 1
# sorted_index = sorted(
# range(len(dnn_feat_indices)),
# key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]),
# )
# res = {}
# res["fm_feat_indices"] = np.asarray(fm_feat_indices, dtype=np.int64)
# res["fm_feat_values"] = np.asarray(fm_feat_values, dtype=np.float32)
# res["fm_feat_shape"] = np.asarray(fm_feat_shape, dtype=np.int64)
# res["labels"] = np.asarray([[label] for label in labels], dtype=np.float32)
# res["dnn_feat_indices"] = np.asarray(dnn_feat_indices, dtype=np.int64)[
# sorted_index
# ]
# res["dnn_feat_values"] = np.asarray(dnn_feat_values, dtype=np.int64)[
# sorted_index
# ]
# res["dnn_feat_weights"] = np.asarray(dnn_feat_weights, dtype=np.float32)[
# sorted_index
# ]
# res["dnn_feat_shape"] = np.asarray(dnn_feat_shape, dtype=np.int64)
# return res
# def gen_feed_dict(self, data_dict):
# """Construct a dictionary that maps graph elements to values.
# Args:
# data_dict (dict): a dictionary that maps string name to numpy arrays.
# Returns:
# dict: a dictionary that maps graph elements to numpy arrays.
# """
# feed_dict = {
# self.labels: data_dict["labels"],
# self.fm_feat_indices: data_dict["fm_feat_indices"],
# self.fm_feat_values: data_dict["fm_feat_values"],
# self.fm_feat_shape: data_dict["fm_feat_shape"],
# self.dnn_feat_indices: data_dict["dnn_feat_indices"],
# self.dnn_feat_values: data_dict["dnn_feat_values"],
# self.dnn_feat_weights: data_dict["dnn_feat_weights"],
# self.dnn_feat_shape: data_dict["dnn_feat_shape"],
# }
# return feed_dict
| 38.310811
| 112
| 0.55485
| 1,020
| 8,505
| 4.387255
| 0.189216
| 0.068827
| 0.034413
| 0.017207
| 0.346369
| 0.312849
| 0.256313
| 0.136089
| 0.095419
| 0.052961
| 0
| 0.012467
| 0.330394
| 8,505
| 221
| 113
| 38.484163
| 0.77331
| 0.907937
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.266667
| false
| 0.266667
| 0.133333
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 3
|
86f2a90daa96d1b00a32e75d50fd040ca01ed705
| 172
|
py
|
Python
|
pyconde/context_processors.py
|
EuroPython/djep
|
afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21
|
[
"BSD-3-Clause"
] | 5
|
2015-01-02T14:33:14.000Z
|
2021-08-03T10:19:07.000Z
|
pyconde/context_processors.py
|
EuroPython/djep
|
afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21
|
[
"BSD-3-Clause"
] | null | null | null |
pyconde/context_processors.py
|
EuroPython/djep
|
afcccbdda483e5f6962ac97f0dc4c4c5ea67fd21
|
[
"BSD-3-Clause"
] | 3
|
2015-08-30T09:45:03.000Z
|
2017-04-08T12:15:22.000Z
|
from django.conf import settings
def less_settings(request):
return {
'use_dynamic_less_in_debug': getattr(settings, 'LESS_USE_DYNAMIC_IN_DEBUG', True)
}
| 21.5
| 89
| 0.732558
| 23
| 172
| 5.086957
| 0.652174
| 0.17094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180233
| 172
| 7
| 90
| 24.571429
| 0.829787
| 0
| 0
| 0
| 0
| 0
| 0.290698
| 0.290698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 3
|
8109517cd2448992084aac4cf51be9ed93b5e56f
| 467
|
py
|
Python
|
greendoge/types/condition_with_args.py
|
grayfallstown/greendoge-blockchain
|
31e325913374d694dc0859140d006a642e7f95ac
|
[
"Apache-2.0"
] | 44
|
2021-07-06T10:09:06.000Z
|
2022-02-09T04:30:14.000Z
|
greendoge/types/condition_with_args.py
|
grayfallstown/greendoge-blockchain
|
31e325913374d694dc0859140d006a642e7f95ac
|
[
"Apache-2.0"
] | 67
|
2021-07-06T11:57:18.000Z
|
2022-02-02T16:14:15.000Z
|
greendoge/types/condition_with_args.py
|
grayfallstown/greendoge-blockchain
|
31e325913374d694dc0859140d006a642e7f95ac
|
[
"Apache-2.0"
] | 16
|
2021-07-06T10:36:37.000Z
|
2022-03-15T08:35:16.000Z
|
from dataclasses import dataclass
from typing import List
from greendoge.types.condition_opcodes import ConditionOpcode
from greendoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
| 25.944444
| 83
| 0.770878
| 56
| 467
| 6.410714
| 0.660714
| 0.072423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.164882
| 467
| 17
| 84
| 27.470588
| 0.912821
| 0.286938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.444444
| 0
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
810dcd1a1c119a6c004be66c020243fbafedf1ee
| 5,229
|
py
|
Python
|
boto3_type_annotations/boto3_type_annotations/guardduty/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 119
|
2018-12-01T18:20:57.000Z
|
2022-02-02T10:31:29.000Z
|
boto3_type_annotations/boto3_type_annotations/guardduty/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 15
|
2018-11-16T00:16:44.000Z
|
2021-11-13T03:44:18.000Z
|
boto3_type_annotations/boto3_type_annotations/guardduty/client.py
|
cowboygneox/boto3_type_annotations
|
450dce1de4e066b939de7eac2ec560ed1a7ddaa2
|
[
"MIT"
] | 11
|
2019-05-06T05:26:51.000Z
|
2021-09-28T15:27:59.000Z
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def accept_invitation(self, DetectorId: str, InvitationId: str, MasterId: str) -> Dict:
pass
def archive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def can_paginate(self, operation_name: str = None):
pass
def create_detector(self, Enable: bool, ClientToken: str = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def create_filter(self, DetectorId: str, FindingCriteria: Dict, Name: str, Action: str = None, ClientToken: str = None, Description: str = None, Rank: int = None) -> Dict:
pass
def create_ip_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def create_members(self, AccountDetails: List, DetectorId: str) -> Dict:
pass
def create_sample_findings(self, DetectorId: str, FindingTypes: List = None) -> Dict:
pass
def create_threat_intel_set(self, Activate: bool, DetectorId: str, Format: str, Location: str, Name: str, ClientToken: str = None) -> Dict:
pass
def decline_invitations(self, AccountIds: List) -> Dict:
pass
def delete_detector(self, DetectorId: str) -> Dict:
pass
def delete_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def delete_invitations(self, AccountIds: List) -> Dict:
pass
def delete_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def delete_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def delete_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def disassociate_from_master_account(self, DetectorId: str) -> Dict:
pass
def disassociate_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_detector(self, DetectorId: str) -> Dict:
pass
def get_filter(self, DetectorId: str, FilterName: str) -> Dict:
pass
def get_findings(self, DetectorId: str, FindingIds: List, SortCriteria: Dict = None) -> Dict:
pass
def get_findings_statistics(self, DetectorId: str, FindingStatisticTypes: List, FindingCriteria: Dict = None) -> Dict:
pass
def get_invitations_count(self) -> Dict:
pass
def get_ip_set(self, DetectorId: str, IpSetId: str) -> Dict:
pass
def get_master_account(self, DetectorId: str) -> Dict:
pass
def get_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def invite_members(self, AccountIds: List, DetectorId: str, DisableEmailNotification: bool = None, Message: str = None) -> Dict:
pass
def list_detectors(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_filters(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_findings(self, DetectorId: str, FindingCriteria: Dict = None, MaxResults: int = None, NextToken: str = None, SortCriteria: Dict = None) -> Dict:
pass
def list_invitations(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_ip_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_members(self, DetectorId: str, MaxResults: int = None, NextToken: str = None, OnlyAssociated: str = None) -> Dict:
pass
def list_threat_intel_sets(self, DetectorId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def start_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def stop_monitoring_members(self, AccountIds: List, DetectorId: str) -> Dict:
pass
def unarchive_findings(self, DetectorId: str, FindingIds: List) -> Dict:
pass
def update_detector(self, DetectorId: str, Enable: bool = None, FindingPublishingFrequency: str = None) -> Dict:
pass
def update_filter(self, DetectorId: str, FilterName: str, Action: str = None, Description: str = None, FindingCriteria: Dict = None, Rank: int = None) -> Dict:
pass
def update_findings_feedback(self, DetectorId: str, Feedback: str, FindingIds: List, Comments: str = None) -> Dict:
pass
def update_ip_set(self, DetectorId: str, IpSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
def update_threat_intel_set(self, DetectorId: str, ThreatIntelSetId: str, Activate: bool = None, Location: str = None, Name: str = None) -> Dict:
pass
| 35.331081
| 175
| 0.669344
| 632
| 5,229
| 5.420886
| 0.143987
| 0.091944
| 0.13164
| 0.083187
| 0.701985
| 0.627262
| 0.518973
| 0.444542
| 0.380035
| 0.265908
| 0
| 0
| 0.229681
| 5,229
| 147
| 176
| 35.571429
| 0.850546
| 0
| 0
| 0.46
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.46
| false
| 0.46
| 0.07
| 0
| 0.54
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 3
|
810f24ca6e713fb7958aa28861ebd60291bab8c3
| 2,089
|
bzl
|
Python
|
google/cloud/google_cloud_cpp_common_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/google_cloud_cpp_common_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/google_cloud_cpp_common_unit_tests.bzl
|
joezqren/google-cloud-cpp
|
325d312b0a21569f3c57515aec7d91f3540d3b48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated unit tests list - DO NOT EDIT."""
google_cloud_cpp_common_unit_tests = [
"common_options_test.cc",
"future_generic_test.cc",
"future_generic_then_test.cc",
"future_void_test.cc",
"future_void_then_test.cc",
"iam_bindings_test.cc",
"internal/algorithm_test.cc",
"internal/api_client_header_test.cc",
"internal/backoff_policy_test.cc",
"internal/base64_transforms_test.cc",
"internal/big_endian_test.cc",
"internal/compiler_info_test.cc",
"internal/credentials_impl_test.cc",
"internal/env_test.cc",
"internal/filesystem_test.cc",
"internal/format_time_point_test.cc",
"internal/future_impl_test.cc",
"internal/invoke_result_test.cc",
"internal/log_impl_test.cc",
"internal/pagination_range_test.cc",
"internal/parse_rfc3339_test.cc",
"internal/random_test.cc",
"internal/retry_policy_test.cc",
"internal/status_payload_keys_test.cc",
"internal/strerror_test.cc",
"internal/throw_delegate_test.cc",
"internal/tuple_test.cc",
"internal/type_list_test.cc",
"internal/user_agent_prefix_test.cc",
"internal/utility_test.cc",
"kms_key_name_test.cc",
"log_test.cc",
"options_test.cc",
"polling_policy_test.cc",
"project_test.cc",
"status_or_test.cc",
"status_test.cc",
"stream_range_test.cc",
"terminate_handler_test.cc",
"tracing_options_test.cc",
]
| 34.245902
| 79
| 0.727621
| 297
| 2,089
| 4.845118
| 0.464646
| 0.166782
| 0.233495
| 0.037526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007959
| 0.15797
| 2,089
| 60
| 80
| 34.816667
| 0.810119
| 0.326472
| 0
| 0
| 0
| 0
| 0.726748
| 0.603461
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 3
|
811134f08b2c67534a9093ee9d1a20f045af6b48
| 865
|
py
|
Python
|
socialdistribution/app/templatetags/filters.py
|
CMPUT404-Project-Group/CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | null | null | null |
socialdistribution/app/templatetags/filters.py
|
CMPUT404-Project-Group/CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | 44
|
2021-10-14T15:44:46.000Z
|
2021-12-05T00:57:23.000Z
|
socialdistribution/app/templatetags/filters.py
|
CMPUT404-Project-Group/Social-Distribution-CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | 1
|
2021-12-07T01:14:14.000Z
|
2021-12-07T01:14:14.000Z
|
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import SafeString
import markdown
import urllib
register = template.Library()
@register.filter
def strip_space(value):
return value.replace(' ', '')
@register.filter
@stringfilter
def commonmark(value):
return markdown.Markdown().convert(value)
@register.filter(name="getID")
def get_ID(value):
if not type(value) is str:
return value
return value.split('/')[-1]
@register.filter(name="getNav")
def get_nav(value):
return value.split('/')[-2]
@register.filter(name="encode_url")
def encode_url(value):
return urllib.parse.quote(value)
@register.filter
def get_post_id(url):
"""
gets the post id from the comment page url
"""
return urllib.parse.urlparse(url.get_full_path()).path.rsplit('/', 1)[0]
| 23.378378
| 76
| 0.721387
| 117
| 865
| 5.25641
| 0.418803
| 0.136585
| 0.078049
| 0.068293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005405
| 0.144509
| 865
| 37
| 76
| 23.378378
| 0.825676
| 0.048555
| 0
| 0.111111
| 0
| 0
| 0.030941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.185185
| 0.148148
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 3
|
811e73ee0c3fc584081650f0224040703f26ea00
| 386
|
py
|
Python
|
tabular/__init__.py
|
yamins81/tabular
|
1caf091c8c395960a9ad7078f95158b533cc52dd
|
[
"MIT"
] | 6
|
2015-05-24T20:59:31.000Z
|
2021-05-31T14:34:18.000Z
|
tabular/__init__.py
|
yamins81/tabular
|
1caf091c8c395960a9ad7078f95158b533cc52dd
|
[
"MIT"
] | 3
|
2016-06-17T20:02:27.000Z
|
2020-02-13T19:20:40.000Z
|
tabular/__init__.py
|
yamins81/tabular
|
1caf091c8c395960a9ad7078f95158b533cc52dd
|
[
"MIT"
] | 8
|
2015-08-22T17:09:40.000Z
|
2022-02-10T14:47:40.000Z
|
import io
import fast
import spreadsheet
import tab
import utils
import web
from io import *
from fast import *
from spreadsheet import *
from tab import *
from utils import *
from web import *
__all__ = []
__all__.extend(io.__all__)
__all__.extend(fast.__all__)
__all__.extend(spreadsheet.__all__)
__all__.extend(tab.__all__)
__all__.extend(utils.__all__)
__all__.extend(web.__all__)
| 18.380952
| 35
| 0.795337
| 55
| 386
| 4.636364
| 0.181818
| 0.141176
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119171
| 386
| 21
| 36
| 18.380952
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.631579
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 3
|
812594dced1920626bd6e5484a03e5c3aa5dda9e
| 1,943
|
py
|
Python
|
ServerSide/models.py
|
Coullence/DRF_Percels-Couriers_API_V.0.0.2
|
906786115861b316f8ecf023c8af82f2dacff68e
|
[
"MIT"
] | null | null | null |
ServerSide/models.py
|
Coullence/DRF_Percels-Couriers_API_V.0.0.2
|
906786115861b316f8ecf023c8af82f2dacff68e
|
[
"MIT"
] | null | null | null |
ServerSide/models.py
|
Coullence/DRF_Percels-Couriers_API_V.0.0.2
|
906786115861b316f8ecf023c8af82f2dacff68e
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
# Station
class Stations(models.Model):
stationName = models.CharField(max_length=100)
stationLocation = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.stationName
# Customers
class Customers(models.Model):
customerName = models.CharField(max_length=100)
customerPhone = models.CharField(max_length=100)
customerId = models.CharField(max_length=100)
customerStartLoc = models.CharField(max_length=100)
customerDestinationLoc = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.customerName
# Items
class Items(models.Model):
itemName = models.CharField(max_length=100)
itemType = models.CharField(max_length=100)
Quantity = models.CharField(max_length=100)
originStation = models.CharField(max_length=100)
originCounty = models.CharField(max_length=100)
receiverName = models.CharField(max_length=100)
receiverPhone = models.CharField(max_length=100)
destinationAddress = models.CharField(max_length=100)
destinationCounty = models.CharField(max_length=100)
dateSend= models.CharField(max_length=100)
dateExpected = models.CharField(max_length=100)
def __str__(self):
return self.itemName
# Payments
class Payments(models.Model):
customerPhone = models.CharField(max_length=100)
paymentAmount = models.CharField(max_length=100)
paymentMeans = models.EmailField(max_length=100)
code = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.customerPhone
| 32.932203
| 66
| 0.716933
| 218
| 1,943
| 6.188073
| 0.238532
| 0.160119
| 0.213491
| 0.409192
| 0.641957
| 0.306894
| 0.247591
| 0.247591
| 0.247591
| 0.247591
| 0
| 0.045948
| 0.193515
| 1,943
| 58
| 67
| 33.5
| 0.814933
| 0.029336
| 0
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.025
| 0.1
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.