code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def save_intraday(data: pd.DataFrame, ticker: str, dt, typ='TRADE'): cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d') logger = logs.get_logger(save_intraday, level='debug') info = f'{ticker} / {cur_dt} / {typ}' data_file = hist_file(ticker=ticker, dt=dt, typ=typ) if not data_file: return if data.empty: logger.warning(f'data is empty for {info} ...') return exch = const.exch_info(ticker=ticker) if exch.empty: return end_time = pd.Timestamp( const.market_timing(ticker=ticker, dt=dt, timing='FINISHED') ).tz_localize(exch.tz) now = pd.Timestamp('now', tz=exch.tz) - pd.Timedelta('1H') if end_time > now: logger.debug(f'skip saving cause market close ({end_time}) < now - 1H ({now}) ...') return logger.info(f'saving data to {data_file} ...') files.create_folder(data_file, is_file=True) data.to_parquet(data_file)
Check whether data is done for the day and save Args: data: data ticker: ticker dt: date typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK] Examples: >>> os.environ['BBG_ROOT'] = 'xbbg/tests/data' >>> sample = pd.read_parquet('xbbg/tests/data/aapl.parq') >>> save_intraday(sample, 'AAPL US Equity', '2018-11-02') >>> # Invalid exchange >>> save_intraday(sample, 'AAPL XX Equity', '2018-11-02') >>> # Invalid empty data >>> save_intraday(pd.DataFrame(), 'AAPL US Equity', '2018-11-02') >>> # Invalid date - too close >>> cur_dt = utils.cur_time() >>> save_intraday(sample, 'AAPL US Equity', cur_dt)
def fetch_and_filter_tags(self): self.all_tags = self.fetcher.get_all_tags() self.filtered_tags = self.get_filtered_tags(self.all_tags) self.fetch_tags_dates()
Fetch and filter tags, fetch dates and sort them in time order.
def __load_jams_schema(): schema_file = os.path.join(SCHEMA_DIR, 'jams_schema.json') jams_schema = None with open(resource_filename(__name__, schema_file), mode='r') as fdesc: jams_schema = json.load(fdesc) if jams_schema is None: raise JamsError('Unable to load JAMS schema') return jams_schema
Load the schema file from the package.
def MoveToCenter(self) -> bool: if self.IsTopLevel(): rect = self.BoundingRectangle screenWidth, screenHeight = GetScreenSize() x, y = (screenWidth - rect.width()) // 2, (screenHeight - rect.height()) // 2 if x < 0: x = 0 if y < 0: y = 0 return SetWindowPos(self.NativeWindowHandle, SWP.HWND_Top, x, y, 0, 0, SWP.SWP_NoSize) return False
Move window to screen center.
def delete_tenant_quota(context, tenant_id): tenant_quotas = context.session.query(Quota) tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id) tenant_quotas.delete()
Delete the quota entries for a given tenant_id. Atfer deletion, this tenant will use default quota values in conf.
def transform(self, mode=None): if mode: self._canvas.mode = mode return self._canvas.mode
Set the current transform mode. :param mode: CENTER or CORNER
def make_clean_figure(figsize, remove_tooltips=False, remove_keybindings=False): tooltip = mpl.rcParams['toolbar'] if remove_tooltips: mpl.rcParams['toolbar'] = 'None' fig = pl.figure(figsize=figsize) mpl.rcParams['toolbar'] = tooltip if remove_keybindings: fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id) return fig
Makes a `matplotlib.pyplot.Figure` without tooltips or keybindings Parameters ---------- figsize : tuple Figsize as passed to `matplotlib.pyplot.figure` remove_tooltips, remove_keybindings : bool Set to True to remove the tooltips bar or any key bindings, respectively. Default is False Returns ------- fig : `matplotlib.pyplot.Figure`
def _get_valid_formats(): if NO_SOX: return [] so = subprocess.check_output(['sox', '-h']) if type(so) is not str: so = str(so, encoding='UTF-8') so = so.split('\n') idx = [i for i in range(len(so)) if 'AUDIO FILE FORMATS:' in so[i]][0] formats = so[idx].split(' ')[3:] return formats
Calls SoX help for a lists of audio formats available with the current install of SoX. Returns: -------- formats : list List of audio file extensions that SoX can process.
def contains(self, clr): if not isinstance(clr, Color): return False if not isinstance(clr, _list): clr = [clr] for clr in clr: if clr.is_grey and not self.grayscale: return (self.black.contains(clr) or \ self.white.contains(clr)) for r, v in [(self.h, clr.h), (self.s, clr.s), (self.b, clr.brightness), (self.a, clr.a)]: if isinstance(r, _list): pass elif isinstance(r, tuple): r = [r] else: r = [(r, r)] for min, max in r: if not (min <= v <= max): return False return True
Returns True if the given color is part of this color range. Check whether each h, s, b, a component of the color falls within the defined range for that component. If the given color is grayscale, checks against the definitions for black and white.
def append(self, item): if self.meta_type == 'dict': raise AssertionError('Cannot append to object of `dict` base type!') if self.meta_type == 'list': self._list.append(item) return
Append to object, if object is list.
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): line = clean_lines.elided[linenum] match = Match(r'^(.*[^ ({>]){', line) if match: leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.')
Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
def as_unicode(self): result = self.domain if self.local: result = self.local + u'@' + result if self.resource: result = result + u'/' + self.resource if not JID.cache.has_key(result): JID.cache[result] = self return result
Unicode string JID representation. :return: JID as Unicode string.
def _insert_metadata(cursor, model, publisher, message): params = model.metadata.copy() params['publisher'] = publisher params['publication_message'] = message params['_portal_type'] = _model_to_portaltype(model) params['summary'] = str(cnxepub.DocumentSummaryFormatter(model)) for person_field in ATTRIBUTED_ROLE_KEYS: params[person_field] = [parse_user_uri(x['id']) for x in params.get(person_field, [])] params['parent_ident_hash'] = parse_parent_ident_hash(model) if model.ident_hash is not None: uuid, version = split_ident_hash(model.ident_hash, split_version=True) params['_uuid'] = uuid params['_major_version'], params['_minor_version'] = version cursor.execute("SELECT moduleid FROM latest_modules WHERE uuid = %s", (uuid,)) try: moduleid = cursor.fetchone()[0] except TypeError: moduleid = None params['_moduleid'] = moduleid cursor.execute("SELECT * from document_controls where uuid = %s", (uuid,)) try: cursor.fetchone()[0] except TypeError: cursor.execute("INSERT INTO document_controls (uuid) VALUES (%s)", (uuid,)) created = model.metadata.get('created', None) stmt = MODULE_INSERTION_TEMPLATE.format(**{ '__uuid__': "%(_uuid)s::uuid", '__major_version__': "%(_major_version)s", '__minor_version__': "%(_minor_version)s", '__moduleid__': moduleid is None and "DEFAULT" or "%(_moduleid)s", '__created__': created is None and "DEFAULT" or "%(created)s", }) else: created = model.metadata.get('created', None) stmt = MODULE_INSERTION_TEMPLATE.format(**{ '__uuid__': "DEFAULT", '__major_version__': "DEFAULT", '__minor_version__': "DEFAULT", '__moduleid__': "DEFAULT", '__created__': created is None and "DEFAULT" or "%(created)s", }) cursor.execute(stmt, params) module_ident, ident_hash = cursor.fetchone() _insert_optional_roles(cursor, model, module_ident) return module_ident, ident_hash
Insert a module with the given ``metadata``.
def _validate_search_query(self, returning_query): start_index = returning_query.from_index or 0 size = returning_query.size or 0 if start_index < 0: raise CitrinationClientError( "start_index cannot be negative. Please enter a value greater than or equal to zero") if size < 0: raise CitrinationClientError("Size cannot be negative. Please enter a value greater than or equal to zero") if start_index + size > MAX_QUERY_DEPTH: raise CitrinationClientError( "Citrination does not support pagination past the {0}th result. Please reduce either the from_index and/or size such that their sum is below {0}".format( MAX_QUERY_DEPTH))
Checks to see that the query will not exceed the max query depth :param returning_query: The PIF system or Dataset query to execute. :type returning_query: :class:`PifSystemReturningQuery` or :class: `DatasetReturningQuery`
def ControlFromPoint2(x: int, y: int) -> Control: return Control.CreateControlFromElement(_AutomationClient.instance().IUIAutomation.ElementFromHandle(WindowFromPoint(x, y)))
Get a native handle from point x,y and call IUIAutomation.ElementFromHandle. Return `Control` subclass.
def fmt(lbaf=3): if env(): cij.err("cij.nvme.exists: Invalid NVMe ENV.") return 1 nvme = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED) cmd = ["nvme", "format", nvme["DEV_PATH"], "-l", str(lbaf)] rcode, _, _ = cij.ssh.command(cmd, shell=True) return rcode
Do format for NVMe device
def toBigInt(byteArray): array = byteArray[::-1] out = 0 for key, value in enumerate(array): decoded = struct.unpack("B", bytes([value]))[0] out = out | decoded << key * 8 return out
Convert the byte array to a BigInteger
def serialize_groups(self, groups): rules = [] for group in groups: rules.extend(self.serialize_rules(group.rules)) return rules
Creates a payload for the redis server The rule schema is the following: REDIS KEY - port_device_id.port_mac_address/sg REDIS VALUE - A JSON dump of the following: port_mac_address must be lower-cased and stripped of non-alphanumeric characters {"id": "<arbitrary uuid>", "rules": [ {"ethertype": <hexademical integer>, "protocol": <integer>, "port start": <integer>, # optional "port end": <integer>, # optional "icmp type": <integer>, # optional "icmp code": <integer>, # optional "source network": <string>, "destination network": <string>, "action": <string>, "direction": <string>}, ], "security groups ack": <boolean> } Example: {"id": "004c6369-9f3d-4d33-b8f5-9416bf3567dd", "rules": [ {"ethertype": 0x800, "protocol": "tcp", "port start": 1000, "port end": 1999, "source network": "10.10.10.0/24", "destination network": "", "action": "allow", "direction": "ingress"}, ], "security groups ack": "true" } port start/end and icmp type/code are mutually exclusive pairs.
def custom_showtraceback( self, exc_tuple=None, filename=None, tb_offset=None, exception_only=False, running_compiled_code=False, ): self.default_showtraceback( exc_tuple, filename, tb_offset, exception_only=True, running_compiled_code=running_compiled_code, )
Custom showtraceback for monkey-patching IPython's InteractiveShell https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
def GetTopLevelControl(self) -> 'Control': handle = self.NativeWindowHandle if handle: topHandle = GetAncestor(handle, GAFlag.Root) if topHandle: if topHandle == handle: return self else: return ControlFromHandle(topHandle) else: pass else: control = self while True: control = control.GetParentControl() handle = control.NativeWindowHandle if handle: topHandle = GetAncestor(handle, GAFlag.Root) return ControlFromHandle(topHandle)
Get the top level control which current control lays. If current control is top level, return self. If current control is root control, return None. Return `PaneControl` or `WindowControl` or None.
def handle_package_has_file_helper(self, pkg_file): nodes = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(pkg_file.name)))) if len(nodes) == 1: return nodes[0][0] else: raise InvalidDocumentError('handle_package_has_file_helper could not' + ' find file node for file: {0}'.format(pkg_file.name))
Return node representing pkg_file pkg_file should be instance of spdx.file.
def get_wildcard(self): return _convert(self._ip, notation=NM_WILDCARD, inotation=IP_DOT, _check=False, _isnm=self._isnm)
Return the wildcard bits notation of the netmask.
def upload_backend(index='dev', user=None): get_vars() use_devpi(index=index) with fab.lcd('../application'): fab.local('make upload')
Build the backend and upload it to the remote server at the given index
def refresh(self, token, timeout): assert token in self._dict, "Lock must exist" assert timeout == -1 or timeout > 0 if timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX self._lock.acquire_write() try: lock = self._dict[token] lock["timeout"] = timeout lock["expire"] = time.time() + timeout self._dict[token] = lock self._flush() finally: self._lock.release() return lock
Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid.
def analyze(fname=False,save=True,show=None): if fname and os.path.exists(fname.replace(".abf",".rst")): print("SKIPPING DUE TO RST FILE") return swhlab.plotting.core.IMAGE_SAVE=save if show is None: if cm.isIpython(): swhlab.plotting.core.IMAGE_SHOW=True else: swhlab.plotting.core.IMAGE_SHOW=False abf=ABF(fname) print(">>>>> PROTOCOL >>>>>",abf.protocomment) runFunction="proto_unknown" if "proto_"+abf.protocomment in globals(): runFunction="proto_"+abf.protocomment abf.log.debug("running %s()"%(runFunction)) plt.close('all') globals()[runFunction](abf) try: globals()[runFunction](abf) except: abf.log.error("EXCEPTION DURING PROTOCOL FUNCTION") abf.log.error(sys.exc_info()[0]) return "ERROR" plt.close('all') return "SUCCESS"
given a filename or ABF object, try to analyze it.
def from_entity(entity, self_user_id): user_id = UserID(chat_id=entity.id.chat_id, gaia_id=entity.id.gaia_id) return User(user_id, entity.properties.display_name, entity.properties.first_name, entity.properties.photo_url, entity.properties.email, (self_user_id == user_id) or (self_user_id is None))
Construct user from ``Entity`` message. Args: entity: ``Entity`` message. self_user_id (~hangups.user.UserID or None): The ID of the current user. If ``None``, assume ``entity`` is the current user. Returns: :class:`~hangups.user.User` object.
def write_sbml_model(cobra_model, filename, f_replace=F_REPLACE, **kwargs): doc = _model_to_sbml(cobra_model, f_replace=f_replace, **kwargs) if isinstance(filename, string_types): libsbml.writeSBMLToFile(doc, filename) elif hasattr(filename, "write"): sbml_str = libsbml.writeSBMLToString(doc) filename.write(sbml_str)
Writes cobra model to filename. The created model is SBML level 3 version 1 (L1V3) with fbc package v2 (fbc-v2). If the given filename ends with the suffix ".gz" (for example, "myfile.xml.gz"), libSBML assumes the caller wants the file to be written compressed in gzip format. Similarly, if the given filename ends with ".zip" or ".bz2", libSBML assumes the caller wants the file to be compressed in zip or bzip2 format (respectively). Files whose names lack these suffixes will be written uncompressed. Special considerations for the zip format: If the given filename ends with ".zip", the file placed in the zip archive will have the suffix ".xml" or ".sbml". For example, the file in the zip archive will be named "test.xml" if the given filename is "test.xml.zip" or "test.zip". Similarly, the filename in the archive will be "test.sbml" if the given filename is "test.sbml.zip". Parameters ---------- cobra_model : cobra.core.Model Model instance which is written to SBML filename : string path to which the model is written use_fbc_package : boolean {True, False} should the fbc package be used f_replace: dict of replacement functions for id replacement
def get_filtered_normalized_events(self): user_image = google_v2_operations.get_action_image(self._op, _ACTION_USER_COMMAND) need_ok = google_v2_operations.is_success(self._op) events = {} for event in google_v2_operations.get_events(self._op): if self._filter(event): continue mapped, match = self._map(event) name = mapped['name'] if name == 'ok': if not need_ok or 'ok' in events: continue if name == 'pulling-image': if match.group(1) != user_image: continue events[name] = mapped return sorted(events.values(), key=operator.itemgetter('start-time'))
Filter the granular v2 events down to events of interest. Filter through the large number of granular events returned by the pipelines API, and extract only those that are interesting to a user. This is implemented by filtering out events which are known to be uninteresting (i.e. the default actions run for every job) and by explicitly matching specific events which are interesting and mapping those to v1 style naming. Events which are not whitelisted or blacklisted will still be output, meaning any events which are added in the future won't be masked. We don't want to suppress display of events that we don't recognize. They may be important. Returns: A list of maps containing the normalized, filtered events.
def platform_config_dir(): if LINUX: dpath_ = os.environ.get('XDG_CONFIG_HOME', '~/.config') elif DARWIN: dpath_ = '~/Library/Application Support' elif WIN32: dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming') else: raise NotImplementedError('Unknown Platform %r' % (sys.platform,)) dpath = normpath(expanduser(dpath_)) return dpath
Returns a directory which should be writable for any application This should be used for persistent configuration files. Returns: PathLike : path to the cahce dir used by the current operating system
def _GetNativeEolStyle(platform=sys.platform): _NATIVE_EOL_STYLE_MAP = { 'win32' : EOL_STYLE_WINDOWS, 'linux2' : EOL_STYLE_UNIX, 'linux' : EOL_STYLE_UNIX, 'darwin' : EOL_STYLE_MAC, } result = _NATIVE_EOL_STYLE_MAP.get(platform) if result is None: from ._exceptions import UnknownPlatformError raise UnknownPlatformError(platform) return result
Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform.
def get_socket(self, sessid=''): socket = self.sockets.get(sessid) if sessid and not socket: return None if socket is None: socket = Socket(self, self.config) self.sockets[socket.sessid] = socket else: socket.incr_hits() return socket
Return an existing or new client Socket.
def item(proto_dataset_uri, input_file, relpath_in_dataset): proto_dataset = dtoolcore.ProtoDataSet.from_uri( proto_dataset_uri, config_path=CONFIG_PATH) if relpath_in_dataset == "": relpath_in_dataset = os.path.basename(input_file) proto_dataset.put_item(input_file, relpath_in_dataset)
Add a file to the proto dataset.
def _in_gce_environment(): if SETTINGS.env_name is not None: return SETTINGS.env_name == 'GCE_PRODUCTION' if NO_GCE_CHECK != 'True' and _detect_gce_environment(): SETTINGS.env_name = 'GCE_PRODUCTION' return True return False
Detect if the code is running in the Compute Engine environment. Returns: True if running in the GCE environment, False otherwise.
def index(self, i, length=None): if self.begin <= i <= self.end: index = i - self.BEGIN - self.offset if length is None: length = self.full_range() else: length = min(length, self.full_range()) if 0 <= index < length: return index
Return an integer index or None
def clippedObj(obj, maxElementSize=64): if hasattr(obj, '_asdict'): obj = obj._asdict() if isinstance(obj, dict): objOut = dict() for key,val in obj.iteritems(): objOut[key] = clippedObj(val) elif hasattr(obj, '__iter__'): objOut = [] for val in obj: objOut.append(clippedObj(val)) else: objOut = str(obj) if len(objOut) > maxElementSize: objOut = objOut[0:maxElementSize] + '...' return objOut
Return a clipped version of obj suitable for printing, This is useful when generating log messages by printing data structures, but don't want the message to be too long. If passed in a dict, list, or namedtuple, each element of the structure's string representation will be limited to 'maxElementSize' characters. This will return a new object where the string representation of each element has been truncated to fit within maxElementSize.
def sorted(list, cmp=None, reversed=False): list = [x for x in list] list.sort(cmp) if reversed: list.reverse() return list
Returns a sorted copy of the list.
def reposition(self, frame_no): for label, j in self.channels.items(): body = self.bodies[label] body.position = self.positions[frame_no, j] body.linear_velocity = self.velocities[frame_no, j]
Reposition markers to a specific frame of data. Parameters ---------- frame_no : int The frame of data where we should reposition marker bodies. Markers will be positioned in the appropriate places in world coordinates. In addition, linear velocities of the markers will be set according to the data as long as there are no dropouts in neighboring frames.
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1, copy=True, raise_if_out_of_image=False, thickness=None): image = np.copy(image) if copy else image for bb in self.bounding_boxes: image = bb.draw_on_image( image, color=color, alpha=alpha, size=size, copy=False, raise_if_out_of_image=raise_if_out_of_image, thickness=thickness ) return image
Draw all bounding boxes onto a given image. Parameters ---------- image : (H,W,3) ndarray The image onto which to draw the bounding boxes. This image should usually have the same shape as set in BoundingBoxesOnImage.shape. color : int or list of int or tuple of int or (3,) ndarray, optional The RGB color of all bounding boxes. If a single int ``C``, then that is equivalent to ``(C,C,C)``. alpha : float, optional Alpha/transparency of the bounding box. size : int, optional Thickness in pixels. copy : bool, optional Whether to copy the image before drawing the bounding boxes. raise_if_out_of_image : bool, optional Whether to raise an exception if any bounding box is outside of the image. thickness : None or int, optional Deprecated. Returns ------- image : (H,W,3) ndarray Image with drawn bounding boxes.
def list(self, key_name=None, max_suggestions=100, cutoff=0.5, locked_only=False, key_type=None): self._assert_valid_stash() key_list = [k for k in self._storage.list() if k['name'] != 'stored_passphrase' and (k.get('lock') if locked_only else True)] if key_type: types = ('secret', None) if key_type == 'secret' else [key_type] key_list = [k for k in key_list if k.get('type') in types] key_list = [k['name'] for k in key_list] if key_name: if key_name.startswith('~'): key_list = difflib.get_close_matches( key_name.lstrip('~'), key_list, max_suggestions, cutoff) else: key_list = [k for k in key_list if key_name in k] audit( storage=self._storage.db_path, action='LIST' + ('[LOCKED]' if locked_only else ''), message=json.dumps(dict())) return key_list
Return a list of all keys.
def refresh_maps(self): for robot in self.robots: resp2 = ( requests.get(urljoin(self.ENDPOINT, 'users/me/robots/{}/maps'.format(robot.serial)), headers=self._headers)) resp2.raise_for_status() self._maps.update({robot.serial: resp2.json()})
Get information about maps of the robots. :return:
def lint_file(in_file, out_file=None): for line in in_file: print(line.strip(), file=out_file)
Helps remove extraneous whitespace from the lines of a file :param file in_file: A readable file or file-like :param file out_file: A writable file or file-like
def codestr2rst(codestr, lang='python'): code_directive = "\n.. code-block:: {0}\n\n".format(lang) indented_block = indent(codestr, ' ' * 4) return code_directive + indented_block
Return reStructuredText code block from code string
def send(self, command, timeout=5): logger.info(u'Sending %s' % command) _, writable, __ = select.select([], [self.sock], [], timeout) if not writable: raise SendTimeoutError() writable[0].sendall(command + '\n')
Send a command to the server :param string command: command to send
async def get_entity_by_id(self, get_entity_by_id_request): response = hangouts_pb2.GetEntityByIdResponse() await self._pb_request('contacts/getentitybyid', get_entity_by_id_request, response) return response
Return one or more user entities. Searching by phone number only finds entities when their phone number is in your contacts (and not always even then), and can't be used to find Google Voice contacts.
def next(self, data): self.__length += 1 result = self.__kdf.calculate(self.__key, data, 64) self.__key = result[:32] return result[32:]
Derive a new set of internal and output data from given input data and the data stored internally. Use the key derivation function to derive new data. The kdf gets supplied with the current key and the data passed to this method. :param data: A bytes-like object encoding the data to pass to the key derivation function. :returns: A bytes-like object encoding the output material.
def auth_finish(self, _unused): self.lock.acquire() try: self.__logger.debug("Authenticated") self.authenticated=True self.state_change("authorized",self.my_jid) self._post_auth() finally: self.lock.release()
Handle success of the legacy authentication.
def convert_gemm(params, w_name, scope_name, inputs, layers, weights, names): print('Converting Linear ...') if names == 'short': tf_name = 'FC' + random_string(6) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) bias_name = '{0}.bias'.format(w_name) weights_name = '{0}.weight'.format(w_name) W = weights[weights_name].numpy().transpose() input_channels, output_channels = W.shape keras_weights = [W] has_bias = False if bias_name in weights: bias = weights[bias_name].numpy() keras_weights = [W, bias] has_bias = True dense = keras.layers.Dense( output_channels, weights=keras_weights, use_bias=has_bias, name=tf_name, bias_initializer='zeros', kernel_initializer='zeros', ) layers[scope_name] = dense(layers[inputs[0]])
Convert Linear. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
def find_duplicates(items, k=2, key=None): duplicates = defaultdict(list) if key is None: for count, item in enumerate(items): duplicates[item].append(count) else: for count, item in enumerate(items): duplicates[key(item)].append(count) for key in list(duplicates.keys()): if len(duplicates[key]) < k: del duplicates[key] duplicates = dict(duplicates) return duplicates
Find all duplicate items in a list. Search for all items that appear more than `k` times and return a mapping from each (k)-duplicate item to the positions it appeared in. Args: items (Iterable): hashable items possibly containing duplicates k (int): only return items that appear at least `k` times (default=2) key (Callable, optional): Returns indices where `key(items[i])` maps to a particular value at least k times. Returns: dict: maps each duplicate item to the indices at which it appears CommandLine: python -m ubelt.util_dict find_duplicates Example: >>> import ubelt as ub >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9] >>> duplicates = ub.find_duplicates(items) >>> print('items = %r' % (items,)) >>> print('duplicates = %r' % (duplicates,)) >>> assert duplicates == {0: [0, 1, 6], 2: [3, 8], 3: [4, 5]} >>> assert ub.find_duplicates(items, 3) == {0: [0, 1, 6]} Example: >>> import ubelt as ub >>> items = [0, 0, 1, 2, 3, 3, 0, 12, 2, 9] >>> # note: k can be 0 >>> duplicates = ub.find_duplicates(items, k=0) >>> print(ub.repr2(duplicates, nl=0)) {0: [0, 1, 6], 1: [2], 2: [3, 8], 3: [4, 5], 9: [9], 12: [7]} Example: >>> import ubelt as ub >>> items = [10, 11, 12, 13, 14, 15, 16] >>> duplicates = ub.find_duplicates(items, key=lambda x: x // 2) >>> print(ub.repr2(duplicates, nl=0)) {5: [0, 1], 6: [2, 3], 7: [4, 5]}
def parse_line(line, document=None): result = re.match(line_pattern, line) if result: _, lineno, offset, severity, msg = result.groups() lineno = int(lineno or 1) offset = int(offset or 0) errno = 2 if severity == 'error': errno = 1 diag = { 'source': 'mypy', 'range': { 'start': {'line': lineno - 1, 'character': offset}, 'end': {'line': lineno - 1, 'character': offset + 1} }, 'message': msg, 'severity': errno } if document: word = document.word_at_position(diag['range']['start']) if word: diag['range']['end']['character'] = ( diag['range']['start']['character'] + len(word)) return diag
Return a language-server diagnostic from a line of the Mypy error report; optionally, use the whole document to provide more context on it.
def detail(self, *args, **kwargs): prefix = kwargs.pop("prefix", default_prefix) kwargs["votes"] = list(set(kwargs["votes"])) return OrderedDict( [ ("memo_key", PublicKey(kwargs["memo_key"], prefix=prefix)), ("voting_account", ObjectId(kwargs["voting_account"], "account")), ("num_witness", Uint16(kwargs["num_witness"])), ("num_committee", Uint16(kwargs["num_committee"])), ("votes", Array([VoteId(o) for o in kwargs["votes"]])), ("extensions", Set([])), ] )
This is an example how to sort votes prior to using them in the Object
def update_collaboration(self): for field in record_get_field_instances(self.record, '710'): subs = field_get_subfield_instances(field) for idx, (key, value) in enumerate(subs[:]): if key == '5': subs.pop(idx) elif value.startswith('CERN. Geneva'): subs.pop(idx) if len(subs) == 0: record_delete_field(self.record, tag='710', field_position_global=field[4])
710 Collaboration.
def _get_result_msg_and_payload(result, stream): groups = _GDB_MI_RESULT_RE.match(result).groups() token = int(groups[0]) if groups[0] != "" else None message = groups[1] if groups[2] is None: payload = None else: stream.advance_past_chars([","]) payload = _parse_dict(stream) return token, message, payload
Get result message and payload dict
def login(email=None, password=None, api_key=None, application='Default', url=None, verify_ssl_certificate=True): try: input_ = raw_input except NameError: input_ = input if url is None: url = input_('Server URL: ') url = url.rstrip('/') if session.communicator is None: session.communicator = Communicator(url) else: session.communicator.url = url session.communicator.verify_ssl_certificate = verify_ssl_certificate if email is None: email = input_('Email: ') session.email = email if api_key is None: if password is None: password = getpass.getpass() session.api_key = session.communicator.get_default_api_key( session.email, password) session.application = 'Default' else: session.api_key = api_key session.application = application return renew_token()
Do the legwork of logging into the Midas Server instance, storing the API key and token. :param email: (optional) Email address to login with. If not set, the console will be prompted. :type email: None | string :param password: (optional) User password to login with. If not set and no 'api_key' is set, the console will be prompted. :type password: None | string :param api_key: (optional) API key to login with. If not set, password login with be used. :type api_key: None | string :param application: (optional) Application name to be used with 'api_key'. :type application: string :param url: (optional) URL address of the Midas Server instance to login to. If not set, the console will be prompted. :type url: None | string :param verify_ssl_certificate: (optional) If True, the SSL certificate will be verified :type verify_ssl_certificate: bool :returns: API token. :rtype: string
def delete(self, wait=True): resp = self.parent.delete(self.id) if wait: self.wait() return resp
Delete this droplet Parameters ---------- wait: bool, default True Whether to block until the pending action is completed
def create(self, list_id, data): return self._mc_client._post(url=self._build_path(list_id, 'segments'), data=data)
adds a new segment to the list.
def upgrade(safe=True): manager = MANAGER if safe: cmd = 'upgrade' else: cmd = 'dist-upgrade' run_as_root("%(manager)s --assume-yes %(cmd)s" % locals(), pty=False)
Upgrade all packages.
def http_exception_error_handler( exception): assert issubclass(type(exception), HTTPException), type(exception) assert hasattr(exception, "code") assert hasattr(exception, "description") return response(exception.code, exception.description)
Handle HTTP exception :param werkzeug.exceptions.HTTPException exception: Raised exception A response is returned, as formatted by the :py:func:`response` function.
def id_to_word(self, word_id): if word_id >= len(self.reverse_vocab): return self.reverse_vocab[self.unk_id] else: return self.reverse_vocab[word_id]
Returns the word string of an integer word id.
def new_knitting_pattern_set_loader(specification=DefaultSpecification()): parser = specification.new_parser(specification) loader = specification.new_loader(parser.knitting_pattern_set) return loader
Create a loader for a knitting pattern set. :param specification: a :class:`specification <knittingpattern.ParsingSpecification.ParsingSpecification>` for the knitting pattern set, default :class:`DefaultSpecification`
def currency(self): try: current_subscription = self.info["viewer"]["home"]["currentSubscription"] return current_subscription["priceInfo"]["current"]["currency"] except (KeyError, TypeError, IndexError): _LOGGER.error("Could not find currency.") return ""
Return the currency.
def run(self, cmd, *args): if self.manager is None: raise Exception("Fatal internal error: Missing repository manager") if cmd not in dir(self.manager): raise Exception("Fatal internal error: Invalid command {} being run".format(cmd)) func = getattr(self.manager, cmd) repo = self return func(repo, *args)
Run a specific command using the manager
def copy_file(src, dest): dir_path = os.path.dirname(dest) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy2(src, dest)
Copy file helper method :type src: str|unicode :type dest: str|unicode
def _call_api(self, verb, url, **request_kwargs): api = 'https://api.github.com{}'.format(url) auth_headers = {'Authorization': 'token {}'.format(self.api_token)} headers = {**auth_headers, **request_kwargs.pop('headers', {})} return getattr(requests, verb)(api, headers=headers, **request_kwargs)
Perform a github API call Args: verb (str): Can be "post", "put", or "get" url (str): The base URL with a leading slash for Github API (v3) auth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object
def error_message(self, message, fh=None, prefix="[error]:", suffix="..."): msg = prefix + message + suffix fh = fh or sys.stderr if fh is sys.stderr: termcolor.cprint(msg, color="red") else: fh.write(msg) pass
print error type message if file handle is `sys.stderr`, print color message :param str message: message to print :param file fh: file handle, default is `sys.stdout` :param str prefix: message prefix,default is `[error]` :param str suffix: message suffix ,default is '...' :return: None
def gather(obj): if hasattr(obj, '__distob_gather__'): return obj.__distob_gather__() elif (isinstance(obj, collections.Sequence) and not isinstance(obj, string_types)): return [gather(subobj) for subobj in obj] else: return obj
Retrieve objects that have been distributed, making them local again
def get_chat_id(self, message): if message.chat.type == 'private': return message.user.id return message.chat.id
Telegram chat type can be either "private", "group", "supergroup" or "channel". Return user ID if it is of type "private", chat ID otherwise.
def publish(self, registry=None): if (registry is None) or (registry == registry_access.Registry_Base_URL): if 'private' in self.description and self.description['private']: return "this %s is private and cannot be published" % (self.description_filename.split('.')[0]) upload_archive = os.path.join(self.path, 'upload.tar.gz') fsutils.rmF(upload_archive) fd = os.open(upload_archive, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, "O_BINARY", 0)) with os.fdopen(fd, 'rb+') as tar_file: tar_file.truncate() self.generateTarball(tar_file) logger.debug('generated tar file of length %s', tar_file.tell()) tar_file.seek(0) shasum = hashlib.sha256() while True: chunk = tar_file.read(1000) if not chunk: break shasum.update(chunk) logger.debug('generated tar file has hash %s', shasum.hexdigest()) tar_file.seek(0) with self.findAndOpenReadme() as readme_file_wrapper: if not readme_file_wrapper: logger.warning("no readme.md file detected") with open(self.getDescriptionFile(), 'r') as description_file: return registry_access.publish( self.getRegistryNamespace(), self.getName(), self.getVersion(), description_file, tar_file, readme_file_wrapper.file, readme_file_wrapper.extension().lower(), registry=registry )
Publish to the appropriate registry, return a description of any errors that occured, or None if successful. No VCS tagging is performed.
def get_resources(self, names=None, stores=None, workspaces=None): stores = self.get_stores( names = stores, workspaces = workspaces ) resources = [] for s in stores: try: resources.extend(s.get_resources()) except FailedRequestError: continue if names is None: names = [] elif isinstance(names, basestring): names = [s.strip() for s in names.split(',') if s.strip()] if resources and names: return ([resource for resource in resources if resource.name in names]) return resources
Resources include feature stores, coverage stores and WMS stores, however does not include layer groups. names, stores and workspaces can be provided as a comma delimited strings or as arrays, and are used for filtering. Will always return an array.
def plot_3(data, ss, *args): if len(data) <= 1: warnings.warn("Only one datapoint. Could not compute t-SNE embedding.") return None scores = np.array([d['mean_test_score'] for d in data]) warped = np.array([ss.point_to_unit(d['parameters']) for d in data]) X = TSNE(n_components=2).fit_transform(warped) e_scores = np.exp(scores) mine, maxe = np.min(e_scores), np.max(e_scores) color = (e_scores - mine) / (maxe - mine) mapped_colors = list(map(rgb2hex, cm.get_cmap('RdBu_r')(color))) p = bk.figure(title='t-SNE (unsupervised)', tools=TOOLS) df_params = nonconstant_parameters(data) df_params['score'] = scores df_params['x'] = X[:, 0] df_params['y'] = X[:, 1] df_params['color'] = mapped_colors df_params['radius'] = 1 p.circle( x='x', y='y', color='color', radius='radius', source=ColumnDataSource(data=df_params), fill_alpha=0.6, line_color=None) cp = p hover = cp.select(dict(type=HoverTool)) format_tt = [(s, '@%s' % s) for s in df_params.columns] hover.tooltips = OrderedDict([("index", "$index")] + format_tt) xax, yax = p.axis xax.axis_label = 't-SNE coord 1' yax.axis_label = 't-SNE coord 2' return p
t-SNE embedding of the parameters, colored by score
def set_concluded_license(self, doc, lic): if self.has_package(doc) and self.has_file(doc): if not self.file_conc_lics_set: self.file_conc_lics_set = True if validations.validate_lics_conc(lic): self.file(doc).conc_lics = lic return True else: raise SPDXValueError('File::ConcludedLicense') else: raise CardinalityError('File::ConcludedLicense') else: raise OrderError('File::ConcludedLicense')
Raises OrderError if no package or file defined. Raises CardinalityError if already set. Raises SPDXValueError if malformed.
def get_argument_starttime(self): try: starttime = self.get_argument(constants.PARAM_STARTTIME) return starttime except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
Helper function to get starttime argument. Raises exception if argument is missing. Returns the starttime argument.
def render_to_message(self, extra_context=None, *args, **kwargs): message = super(TemplatedHTMLEmailMessageView, self)\ .render_to_message(extra_context, *args, **kwargs) if extra_context is None: extra_context = {} context = self.get_context_data(**extra_context) content = self.render_html_body(context) message.attach_alternative(content, mimetype='text/html') return message
Renders and returns an unsent message with the given context. Any extra keyword arguments passed will be passed through as keyword arguments to the message constructor. :param extra_context: Any additional context to use when rendering templated content. :type extra_context: :class:`dict` :returns: A message instance. :rtype: :attr:`.message_class`
def _load_credentials_file(credentials_file): try: credentials_file.seek(0) data = json.load(credentials_file) except Exception: logger.warning( 'Credentials file could not be loaded, will ignore and ' 'overwrite.') return {} if data.get('file_version') != 2: logger.warning( 'Credentials file is not version 2, will ignore and ' 'overwrite.') return {} credentials = {} for key, encoded_credential in iteritems(data.get('credentials', {})): try: credential_json = base64.b64decode(encoded_credential) credential = client.Credentials.new_from_json(credential_json) credentials[key] = credential except: logger.warning( 'Invalid credential {0} in file, ignoring.'.format(key)) return credentials
Load credentials from the given file handle. The file is expected to be in this format: { "file_version": 2, "credentials": { "key": "base64 encoded json representation of credentials." } } This function will warn and return empty credentials instead of raising exceptions. Args: credentials_file: An open file handle. Returns: A dictionary mapping user-defined keys to an instance of :class:`oauth2client.client.Credentials`.
def padnames(names): longname_len = max(len(i) for i in names) padding = 5 pnames = [name + " " * (longname_len - len(name)+ padding) \ for name in names] snppad = "//" + " " * (longname_len - 2 + padding) return np.array(pnames), snppad
pads names for loci output
def stop(self): with self.lock: for dummy in self.threads: self.queue.put(None)
Stop the resolver threads.
def sys_rename(self, oldnamep, newnamep): oldname = self.current.read_string(oldnamep) newname = self.current.read_string(newnamep) ret = 0 try: os.rename(oldname, newname) except OSError as e: ret = -e.errno return ret
Rename filename `oldnamep` to `newnamep`. :param int oldnamep: pointer to oldname :param int newnamep: pointer to newname
def remove_members(self, to_remove): if isinstance(to_remove, string_types) or \ hasattr(to_remove, "id"): warn("need to pass in a list") to_remove = [to_remove] self._members.difference_update(to_remove)
Remove objects from the group. Parameters ---------- to_remove : list A list of cobra objects to remove from the group
def create_input_peptides_files( peptides, max_peptides_per_file=None, group_by_length=False): if group_by_length: peptide_lengths = {len(p) for p in peptides} peptide_groups = {l: [] for l in peptide_lengths} for p in peptides: peptide_groups[len(p)].append(p) else: peptide_groups = {"": peptides} file_names = [] for key, group in peptide_groups.items(): n_peptides = len(group) if not max_peptides_per_file: max_peptides_per_file = n_peptides input_file = None for i, p in enumerate(group): if i % max_peptides_per_file == 0: if input_file is not None: file_names.append(input_file.name) input_file.close() input_file = make_writable_tempfile( prefix_number=i // max_peptides_per_file, prefix_name=key, suffix=".txt") input_file.write("%s\n" % p) if input_file is not None: file_names.append(input_file.name) input_file.close() return file_names
Creates one or more files containing one peptide per line, returns names of files.
def unlink_f(path): try: os.unlink(path) except OSError as err: if err.errno != errno.ENOENT: raise
Unlink path but do not complain if file does not exist.
def draw_lines_heatmap_array(self, image_shape, alpha=1.0, size=1, antialiased=True, raise_if_out_of_image=False): assert len(image_shape) == 2 or ( len(image_shape) == 3 and image_shape[-1] == 1), ( "Expected (H,W) or (H,W,1) as image_shape, got %s." % ( image_shape,)) arr = self.draw_lines_on_image( np.zeros(image_shape, dtype=np.uint8), color=255, alpha=alpha, size=size, antialiased=antialiased, raise_if_out_of_image=raise_if_out_of_image ) return arr.astype(np.float32) / 255.0
Draw the line segments of the line string as a heatmap array. Parameters ---------- image_shape : tuple of int The shape of the image onto which to draw the line mask. alpha : float, optional Opacity of the line string. Higher values denote a more visible line string. size : int, optional Thickness of the line segments. antialiased : bool, optional Whether to draw the line with anti-aliasing activated. raise_if_out_of_image : bool, optional Whether to raise an error if the line string is fully outside of the image. If set to False, no error will be raised and only the parts inside the image will be drawn. Returns ------- ndarray Float array of shape `image_shape` (no channel axis) with drawn line string. All values are in the interval ``[0.0, 1.0]``.
def import_pyfile(filepath, mod_name=None): import sys if sys.version_info.major == 3: import importlib.machinery loader = importlib.machinery.SourceFileLoader('', filepath) mod = loader.load_module(mod_name) else: import imp mod = imp.load_source(mod_name, filepath) return mod
Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module
def labels(self): return sorted(self.channels, key=lambda c: self.channels[c])
Return the names of our marker labels in canonical order.
def get_devpi_url(ctx): cmd = 'devpi use --urls' lines = ctx.run(cmd, hide='out', echo=False).stdout.splitlines() for line in lines: try: line, base_url = line.split(':', 1) except ValueError: notify.warning('Ignoring "{}"!'.format(line)) else: if line.split()[-1].strip() == 'simpleindex': return base_url.split('\x1b')[0].strip().rstrip('/') raise LookupError("Cannot find simpleindex URL in '{}' output:\n {}".format( cmd, '\n '.join(lines), ))
Get currently used 'devpi' base URL.
async def get_oauth_verifier(oauth_token): url = "https://api.twitter.com/oauth/authorize?oauth_token=" + oauth_token try: browser = webbrowser.open(url) await asyncio.sleep(2) if not browser: raise RuntimeError except RuntimeError: print("could not open a browser\ngo here to enter your PIN: " + url) verifier = input("\nEnter your PIN: ") return verifier
Open authorize page in a browser, print the url if it didn't work Arguments --------- oauth_token : str The oauth token received in :func:`get_oauth_token` Returns ------- str The PIN entered by the user
def layers(self): layers = [self._layer_def(style) for style in self.styles] return layers
Renders the list of layers to add to the map. Returns: layers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call
def objHasUnsavedChanges(self): if not self.obj: return False return self.obj.hasUnsavedChanges(cascadeObjects=True)
objHasUnsavedChanges - Check if any object has unsaved changes, cascading.
def parse_list_line_windows(self, b): line = b.decode(encoding=self.encoding).rstrip("\r\n") date_time_end = line.index("M") date_time_str = line[:date_time_end + 1].strip().split(" ") date_time_str = " ".join([x for x in date_time_str if len(x) > 0]) line = line[date_time_end + 1:].lstrip() with setlocale("C"): strptime = datetime.datetime.strptime date_time = strptime(date_time_str, "%m/%d/%Y %I:%M %p") info = {} info["modify"] = self.format_date_time(date_time) next_space = line.index(" ") if line.startswith("<DIR>"): info["type"] = "dir" else: info["type"] = "file" info["size"] = line[:next_space].replace(",", "") if not info["size"].isdigit(): raise ValueError filename = line[next_space:].lstrip() if filename == "." or filename == "..": raise ValueError return pathlib.PurePosixPath(filename), info
Parsing Microsoft Windows `dir` output :param b: response line :type b: :py:class:`bytes` or :py:class:`str` :return: (path, info) :rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)
def get_incorrect_names_by_namespace(graph: BELGraph, namespace: str) -> Set[str]: return { exc.name for _, exc, _ in graph.warnings if isinstance(exc, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and exc.namespace == namespace }
Return the set of all incorrect names from the given namespace in the graph. :return: The set of all incorrect names from the given namespace in the graph
def _sample_with_priority(self, p): parent = 0 while True: left = 2 * parent + 1 if left >= len(self._memory): return parent left_p = self._memory[left] if left < self._capacity - 1 \ else (self._memory[left].priority or 0) if p <= left_p: parent = left else: if left + 1 >= len(self._memory): raise RuntimeError('Right child is expected to exist.') p -= left_p parent = left + 1
Sample random element with priority greater than p.
def get_fsapi_endpoint(self): endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout) text = yield from endpoint.text(encoding='utf-8') doc = objectify.fromstring(text) return doc.webfsapi.text
Parse the fsapi endpoint from the device url.
def fit_transform(self, X, y=None, **params): X = as_features(X, stack=True) X_new = self.transformer.fit_transform(X.stacked_features, y, **params) return self._gather_outputs(X, X_new)
Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features.
def to_representation(self, instance): request = self.context['request'] enterprise_customer = instance.enterprise_customer representation = super(EnterpriseCustomerCatalogDetailSerializer, self).to_representation(instance) paginated_content = instance.get_paginated_content(request.GET) count = paginated_content['count'] search_results = paginated_content['results'] for item in search_results: content_type = item['content_type'] marketing_url = item.get('marketing_url') if marketing_url: item['marketing_url'] = utils.update_query_parameters( marketing_url, utils.get_enterprise_utm_context(enterprise_customer) ) if content_type == 'course': item['enrollment_url'] = instance.get_course_enrollment_url(item['key']) if content_type == 'courserun': item['enrollment_url'] = instance.get_course_run_enrollment_url(item['key']) if content_type == 'program': item['enrollment_url'] = instance.get_program_enrollment_url(item['uuid']) previous_url = None next_url = None page = int(request.GET.get('page', '1')) request_uri = request.build_absolute_uri() if paginated_content['previous']: previous_url = utils.update_query_parameters(request_uri, {'page': page - 1}) if paginated_content['next']: next_url = utils.update_query_parameters(request_uri, {'page': page + 1}) representation['count'] = count representation['previous'] = previous_url representation['next'] = next_url representation['results'] = search_results return representation
Serialize the EnterpriseCustomerCatalog object. Arguments: instance (EnterpriseCustomerCatalog): The EnterpriseCustomerCatalog to serialize. Returns: dict: The EnterpriseCustomerCatalog converted to a dict.
def encode(self, word, max_length=8): word = ''.join( char for char in word.lower() if char in self._initial_phones ) if not word: word = '÷' values = [self._initial_phones[word[0]]] values += [self._trailing_phones[char] for char in word[1:]] shifted_values = [_ >> 1 for _ in values] condensed_values = [values[0]] for n in range(1, len(shifted_values)): if shifted_values[n] != shifted_values[n - 1]: condensed_values.append(values[n]) values = ( [condensed_values[0]] + [0] * max(0, max_length - len(condensed_values)) + condensed_values[1:max_length] ) hash_value = 0 for val in values: hash_value = (hash_value << 8) | val return hash_value
Return the eudex phonetic hash of a word. Parameters ---------- word : str The word to transform max_length : int The length in bits of the code returned (default 8) Returns ------- int The eudex hash Examples -------- >>> pe = Eudex() >>> pe.encode('Colin') 432345564238053650 >>> pe.encode('Christopher') 433648490138894409 >>> pe.encode('Niall') 648518346341351840 >>> pe.encode('Smith') 720575940412906756 >>> pe.encode('Schmidt') 720589151732307997
def create_archive(directory, filename, config={}, ignore_predicate=None, ignored_files=['.git', '.svn']): with zipfile.ZipFile(filename, 'w', compression=zipfile.ZIP_DEFLATED) as zip_file: root_len = len(os.path.abspath(directory)) out("Creating archive: " + str(filename)) for root, dirs, files in os.walk(directory, followlinks=True): archive_root = os.path.abspath(root)[root_len + 1:] for f in files: fullpath = os.path.join(root, f) archive_name = os.path.join(archive_root, f) if filename in fullpath: continue if ignored_files is not None: for name in ignored_files: if fullpath.endswith(name): out("Skipping: " + str(name)) continue if ignore_predicate is not None: if not ignore_predicate(archive_name): out("Skipping: " + str(archive_name)) continue out("Adding: " + str(archive_name)) zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) return filename
Creates an archive from a directory and returns the file that was created.
def all_childnodes_to_nifti1img(h5group): child_nodes = [] def append_parent_if_dataset(name, obj): if isinstance(obj, h5py.Dataset): if name.split('/')[-1] == 'data': child_nodes.append(obj.parent) vols = [] h5group.visititems(append_parent_if_dataset) for c in child_nodes: vols.append(hdfgroup_to_nifti1image(c)) return vols
Returns in a list all images found under h5group. Parameters ---------- h5group: h5py.Group HDF group Returns ------- list of nifti1Image
def t_t_isopen(self, t): r'"|\'' if t.value[0] == '"': t.lexer.push_state('istringquotes') elif t.value[0] == '\'': t.lexer.push_state('istringapostrophe') return t
r'"|\
def expandvars_dict(settings): return dict( (key, os.path.expandvars(value)) for key, value in settings.iteritems() )
Expands all environment variables in a settings dictionary.
def check_pre_requirements(pre_requirements): pre_requirements = set(pre_requirements or []) pre_requirements.add('virtualenv') for requirement in pre_requirements: if not which(requirement): print_error('Requirement {0!r} is not found in system'. format(requirement)) return False return True
Check all necessary system requirements to exist. :param pre_requirements: Sequence of pre-requirements to check by running ``where <pre_requirement>`` on Windows and ``which ...`` elsewhere.
def get_images_bytesize_match(self, images): cnt = 0 max_bytes_size = 15728640 good_images = [] for image in images: if cnt > 30: return good_images src = self.parser.getAttribute(image, attr='src') src = self.build_image_path(src) src = self.add_schema_if_none(src) local_image = self.get_local_image(src) if local_image: filesize = local_image.bytes if (filesize == 0 or filesize > self.images_min_bytes) and filesize < max_bytes_size: good_images.append(image) else: images.remove(image) cnt += 1 return good_images if len(good_images) > 0 else None
\ loop through all the images and find the ones that have the best bytez to even make them a candidate
def _parse_revision_date(self): r doc_datetime = None if not self.is_draft: date_command = LatexCommand( 'date', {'name': 'content', 'required': True, 'bracket': '{'}) try: parsed = next(date_command.parse(self._tex)) command_content = parsed['content'].strip() except StopIteration: command_content = None self._logger.warning('lsstdoc has no date command') if command_content is not None and command_content != r'\today': try: doc_datetime = datetime.datetime.strptime(command_content, '%Y-%m-%d') project_tz = timezone('US/Pacific') localized_datetime = project_tz.localize(doc_datetime) doc_datetime = localized_datetime.astimezone(pytz.utc) self._revision_datetime_source = 'tex' except ValueError: self._logger.warning('Could not parse a datetime from ' 'lsstdoc date command: %r', command_content) if doc_datetime is None: content_extensions = ('tex', 'bib', 'pdf', 'png', 'jpg') try: doc_datetime = get_content_commit_date( content_extensions, root_dir=self._root_dir) self._revision_datetime_source = 'git' except RuntimeError: self._logger.warning('Could not get a datetime from the Git ' 'repository at %r', self._root_dir) if doc_datetime is None: doc_datetime = pytz.utc.localize(datetime.datetime.now()) self._revision_datetime_source = 'now' self._datetime = doc_datetime
r"""Parse the ``\date`` command, falling back to getting the most recent Git commit date and the current datetime. Result is available from the `revision_datetime` attribute.