repo
stringclasses 44
values | path
stringlengths 6
100
| size
int64 146
125k
| copies
int64 0
0
| license
stringclasses 1
value | content
stringlengths 146
125k
| token_count
int64 51
32.1k
| hash
stringlengths 64
64
| line_mean
float64 12.5
80.7
| line_max
int64 30
924
| alpha_frac
float64 0.36
0.8
| ratio
float64 1.55
5.96
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
crestalnetwork/intentkit
|
skills/allora/__init__.py
| 2,059
| 0
|
MIT License
|
"""Allora skill module."""
import logging
from typing import NotRequired, TypedDict
from abstracts.skill import SkillStoreABC
from skills.allora.base import AlloraBaseTool
from skills.allora.price import AlloraGetPrice
from skills.base import SkillConfig, SkillState
# Cache skills at the system level, because they are stateless
_cache: dict[str, AlloraBaseTool] = {}
logger = logging.getLogger(__name__)
class SkillStates(TypedDict):
get_price_prediction: SkillState
class Config(SkillConfig):
"""Configuration for Allora skills."""
states: SkillStates
api_key: NotRequired[str]
async def get_skills(
config: "Config",
is_private: bool,
store: SkillStoreABC,
**_,
) -> list[AlloraBaseTool]:
"""Get all Allora skills.
Args:
config: The configuration for Allora skills.
is_private: Whether to include private skills.
store: The skill store for persisting data.
Returns:
A list of Allora skills.
"""
available_skills = []
# Include skills based on their state
for skill_name, state in config["states"].items():
if state == "disabled":
continue
elif state == "public" or (state == "private" and is_private):
available_skills.append(skill_name)
# Get each skill using the cached getter
result = []
for name in available_skills:
skill = get_allora_skill(name, store)
if skill:
result.append(skill)
return result
def get_allora_skill(
name: str,
store: SkillStoreABC,
) -> AlloraBaseTool:
"""Get an Allora skill by name.
Args:
name: The name of the skill to get
store: The skill store for persisting data
Returns:
The requested Allora skill
"""
if name == "get_price_prediction":
if name not in _cache:
_cache[name] = AlloraGetPrice(
skill_store=store,
)
return _cache[name]
else:
logger.warning(f"Unknown Allora skill: {name}")
return None
| 547
|
c8f767634a1da118872662344f775f97a5819481ad9691ae3885297785e36aca
| 23.807229
| 70
| 0.641088
| 3.764168
| false
| true
| false
| false
|
pydantic/pydantic-ai
|
tests/models/test_cohere.py
| 12,440
| 0
|
MIT License
|
from __future__ import annotations as _annotations
import json
from collections.abc import Sequence
from dataclasses import dataclass
from datetime import timezone
from typing import Any, Union, cast
import pytest
from inline_snapshot import snapshot
from pydantic_ai import Agent, ModelHTTPError, ModelRetry
from pydantic_ai.messages import (
ImageUrl,
ModelRequest,
ModelResponse,
RetryPromptPart,
SystemPromptPart,
TextPart,
ToolCallPart,
ToolReturnPart,
UserPromptPart,
)
from pydantic_ai.usage import Usage
from ..conftest import IsNow, raise_if_exception, try_import
with try_import() as imports_successful:
import cohere
from cohere import (
AssistantMessageResponse,
AsyncClientV2,
ChatResponse,
TextAssistantMessageResponseContentItem,
ToolCallV2,
ToolCallV2Function,
)
from cohere.core.api_error import ApiError
from pydantic_ai.models.cohere import CohereModel
from pydantic_ai.providers.cohere import CohereProvider
# note: we use Union here for compatibility with Python 3.9
MockChatResponse = Union[ChatResponse, Exception]
pytestmark = [
pytest.mark.skipif(not imports_successful(), reason='cohere not installed'),
pytest.mark.anyio,
]
def test_init():
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(api_key='foobar'))
assert m.model_name == 'command-r7b-12-2024'
assert m.system == 'cohere'
assert m.base_url == 'https://api.cohere.com'
@dataclass
class MockAsyncClientV2:
completions: MockChatResponse | Sequence[MockChatResponse] | None = None
index = 0
@classmethod
def create_mock(cls, completions: MockChatResponse | Sequence[MockChatResponse]) -> AsyncClientV2:
return cast(AsyncClientV2, cls(completions=completions))
async def chat( # pragma: no cover
self, *_args: Any, **_kwargs: Any
) -> ChatResponse:
assert self.completions is not None
if isinstance(self.completions, Sequence):
raise_if_exception(self.completions[self.index])
response = cast(ChatResponse, self.completions[self.index])
else:
raise_if_exception(self.completions)
response = cast(ChatResponse, self.completions)
self.index += 1
return response
def completion_message(message: AssistantMessageResponse, *, usage: cohere.Usage | None = None) -> ChatResponse:
return ChatResponse(
id='123',
finish_reason='COMPLETE',
message=message,
usage=usage,
)
async def test_request_simple_success(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=[
TextAssistantMessageResponseContentItem(text='world'),
],
)
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
result = await agent.run('hello')
assert result.data == 'world'
assert result.usage() == snapshot(Usage(requests=1))
# reset the index so we get the same response again
mock_client.index = 0 # type: ignore
result = await agent.run('hello', message_history=result.new_messages())
assert result.data == 'world'
assert result.usage() == snapshot(Usage(requests=1))
assert result.all_messages() == snapshot(
[
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
),
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[TextPart(content='world')], model_name='command-r7b-12-2024', timestamp=IsNow(tz=timezone.utc)
),
]
)
async def test_request_simple_usage(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=[TextAssistantMessageResponseContentItem(text='world')],
role='assistant',
),
usage=cohere.Usage(
tokens=cohere.UsageTokens(input_tokens=1, output_tokens=1),
billed_units=cohere.UsageBilledUnits(input_tokens=1, output_tokens=1),
),
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
result = await agent.run('Hello')
assert result.data == 'world'
assert result.usage() == snapshot(
Usage(
requests=1,
request_tokens=1,
response_tokens=1,
total_tokens=2,
details={
'input_tokens': 1,
'output_tokens': 1,
},
)
)
async def test_request_structured_response(allow_model_requests: None):
c = completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='123',
function=ToolCallV2Function(arguments='{"response": [1, 2, 123]}', name='final_result'),
type='function',
)
],
)
)
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m, result_type=list[int])
result = await agent.run('Hello')
assert result.data == [1, 2, 123]
assert result.all_messages() == snapshot(
[
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
ModelResponse(
parts=[
ToolCallPart(
tool_name='final_result',
args='{"response": [1, 2, 123]}',
tool_call_id='123',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='final_result',
content='Final result processed.',
tool_call_id='123',
timestamp=IsNow(tz=timezone.utc),
)
]
),
]
)
async def test_request_tool_call(allow_model_requests: None):
responses = [
completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='1',
function=ToolCallV2Function(arguments='{"loc_name": "San Fransisco"}', name='get_location'),
type='function',
)
],
),
usage=cohere.Usage(),
),
completion_message(
AssistantMessageResponse(
content=None,
role='assistant',
tool_calls=[
ToolCallV2(
id='2',
function=ToolCallV2Function(arguments='{"loc_name": "London"}', name='get_location'),
type='function',
)
],
),
usage=cohere.Usage(
tokens=cohere.UsageTokens(input_tokens=5, output_tokens=3),
billed_units=cohere.UsageBilledUnits(input_tokens=4, output_tokens=2),
),
),
completion_message(
AssistantMessageResponse(
content=[TextAssistantMessageResponseContentItem(text='final response')],
role='assistant',
)
),
]
mock_client = MockAsyncClientV2.create_mock(responses)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m, system_prompt='this is the system prompt')
@agent.tool_plain
async def get_location(loc_name: str) -> str:
if loc_name == 'London':
return json.dumps({'lat': 51, 'lng': 0})
else:
raise ModelRetry('Wrong location, please try again')
result = await agent.run('Hello')
assert result.data == 'final response'
assert result.all_messages() == snapshot(
[
ModelRequest(
parts=[
SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)),
UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)),
]
),
ModelResponse(
parts=[
ToolCallPart(
tool_name='get_location',
args='{"loc_name": "San Fransisco"}',
tool_call_id='1',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
RetryPromptPart(
content='Wrong location, please try again',
tool_name='get_location',
tool_call_id='1',
timestamp=IsNow(tz=timezone.utc),
)
]
),
ModelResponse(
parts=[
ToolCallPart(
tool_name='get_location',
args='{"loc_name": "London"}',
tool_call_id='2',
)
],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
ModelRequest(
parts=[
ToolReturnPart(
tool_name='get_location',
content='{"lat": 51, "lng": 0}',
tool_call_id='2',
timestamp=IsNow(tz=timezone.utc),
)
]
),
ModelResponse(
parts=[TextPart(content='final response')],
model_name='command-r7b-12-2024',
timestamp=IsNow(tz=timezone.utc),
),
]
)
assert result.usage() == snapshot(
Usage(
requests=3,
request_tokens=5,
response_tokens=3,
total_tokens=8,
details={'input_tokens': 4, 'output_tokens': 2},
)
)
async def test_multimodal(allow_model_requests: None):
c = completion_message(AssistantMessageResponse(content=[TextAssistantMessageResponseContentItem(text='world')]))
mock_client = MockAsyncClientV2.create_mock(c)
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
with pytest.raises(RuntimeError, match='Cohere does not yet support multi-modal inputs.'):
await agent.run(
[
'hello',
ImageUrl(
url='https://t3.ftcdn.net/jpg/00/85/79/92/360_F_85799278_0BBGV9OAdQDTLnKwAPBCcg1J7QtiieJY.jpg'
),
]
)
def test_model_status_error(allow_model_requests: None) -> None:
mock_client = MockAsyncClientV2.create_mock(
ApiError(
status_code=500,
body={'error': 'test error'},
)
)
m = CohereModel('command-r', provider=CohereProvider(cohere_client=mock_client))
agent = Agent(m)
with pytest.raises(ModelHTTPError) as exc_info:
agent.run_sync('hello')
assert str(exc_info.value) == snapshot("status_code: 500, model_name: command-r, body: {'error': 'test error'}")
@pytest.mark.vcr()
async def test_request_simple_success_with_vcr(allow_model_requests: None, co_api_key: str):
m = CohereModel('command-r7b-12-2024', provider=CohereProvider(api_key=co_api_key))
agent = Agent(m)
result = await agent.run('hello')
assert result.data == snapshot('Hello! How can I assist you today?')
| 2,992
|
12d11bb6596db25ba3dcaeb857c4eeccd5026878e60f7a7ebd783c844c83de16
| 33.269972
| 117
| 0.552814
| 4.157754
| false
| false
| false
| false
|
HKUDS/LightRAG
|
examples/get_all_edges_nx.py
| 1,253
| 0
|
MIT License
|
import networkx as nx
G = nx.read_graphml("./dickensTestEmbedcall/graph_chunk_entity_relation.graphml")
def get_all_edges_and_nodes(G):
# Get all edges and their properties
edges_with_properties = []
for u, v, data in G.edges(data=True):
edges_with_properties.append(
{
"start": u,
"end": v,
"label": data.get(
"label", ""
), # Assuming 'label' is used for edge type
"properties": data,
"start_node_properties": G.nodes[u],
"end_node_properties": G.nodes[v],
}
)
return edges_with_properties
# Example usage
if __name__ == "__main__":
# Assume G is your NetworkX graph loaded from Neo4j
all_edges = get_all_edges_and_nodes(G)
# Print all edges and node properties
for edge in all_edges:
print(f"Edge Label: {edge['label']}")
print(f"Edge Properties: {edge['properties']}")
print(f"Start Node: {edge['start']}")
print(f"Start Node Properties: {edge['start_node_properties']}")
print(f"End Node: {edge['end']}")
print(f"End Node Properties: {edge['end_node_properties']}")
print("---")
| 337
|
5e1df27c5f96f08f2ceec7e1dedd734bb25c5cd1f01b37011304b4b500b7314a
| 30.325
| 81
| 0.548284
| 3.718101
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama3_1/prompts.py
| 12,244
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
import textwrap
from typing import List
from llama_stack.models.llama.datatypes import (
BuiltinTool,
RawMessage,
StopReason,
ToolCall,
ToolPromptFormat,
)
from llama_stack.models.llama.prompt_format import (
# llama3_1_e2e_tool_call_dialog,
TextCompletionContent,
UseCase,
llama3_1_builtin_tool_call_dialog,
llama3_1_custom_tool_call_dialog,
)
def wolfram_alpha_response():
return textwrap.dedent(
"""
{
"queryresult": {
"success": true,
"inputstring": "100th decimal of pi",
"pods": [
{
"title": "Input interpretation",
"subpods": [
{
"title": "",
"plaintext": "100th digit | \u03c0"
}
]
},
{
"title": "Nearby digits",
"subpods": [
{
"title": "",
"plaintext": "...86208998628034825342117067982148086513282306647093..."
}
]
},
{
"title": "Result",
"primary": true,
"subpods": [
{
"title": "",
"plaintext": "7"
}
]
}
]
}
}
"""
)
def usecases() -> List[UseCase | str]:
return [
textwrap.dedent(
"""
# Llama 3.1 - Prompt Formats
## Tokens
Here is a list of special tokens that are supported by Llama 3.1:
- `<|begin_of_text|>`: Specifies the start of the prompt
- `<|end_of_text|>`: Model will cease to generate more tokens. This token is generated only by the base models.
- `<|finetune_right_pad_id|>`: This token is used for padding text sequences to the same length in a batch.
- `<|start_header_id|>` and `<|end_header_id|>`: These tokens enclose the role for a particular message. The possible roles are: [system, user, assistant and tool]
- `<|eom_id|>`: End of message. A message represents a possible stopping point for execution where the model can inform the executor that a tool call needs to be made. This is used for multi-step interactions between the model and any available tools. This token is emitted by the model when the Environment: ipython instruction is used in the system prompt, or if the model calls for a built-in tool.
- `<|eot_id|>`: End of turn. Represents when the model has determined that it has finished interacting with the user message that initiated its response. This is used in two scenarios:
- at the end of a direct interaction between the model and the user
- at the end of multiple interactions between the model and any available tools
This token signals to the executor that the model has finished generating a response.
- `<|python_tag|>`: Is a special tag used in the model's response to signify a tool call.
"""
),
textwrap.dedent(
"""
There are 4 different roles that are supported by Llama 3.1
- `system`: Sets the context in which to interact with the AI model. It typically includes rules, guidelines, or necessary information that helps the model respond effectively.
- `user`: Represents the human interacting with the model. It includes the inputs, commands, and questions to the model.
- `tool`: A new role introduced in Llama 3.1. This role is used to mark messages with the output of a tool call when sent back to the model from the executor. (The actual token used by the model for this role is "ipython".)
- `assistant`: Represents the response generated by the AI model based on the context provided in the `system`, `tool` and `user` prompts.
"""
),
UseCase(
title="Llama 3.1 Base Model",
description="Text completion for Llama 3.1 base model uses this format.",
dialogs=[TextCompletionContent(content="Color of sky is blue but sometimes can also be")],
notes="Note start special tag",
),
"## Llama 3.1 Instruct Model",
UseCase(
title="User and assistant conversation",
description="Here is a regular multi-turn user assistant conversation and how its formatted.",
dialogs=[
[
RawMessage(role="system", content="You are a helpful assistant"),
RawMessage(
role="user",
content="Answer who are you in the form of jeopardy?",
),
]
],
notes="",
),
"## Tool Calling Formats",
textwrap.dedent(
"""
The three built-in tools (brave_search, wolfram_alpha, and code interpreter) can be turned on using the system prompt:
- Brave Search: Tool call to perform web searches.
- Wolfram Alpha: Tool call to perform complex mathematical calculations.
- Code Interpreter: Enables the model to output python code.
"""
),
UseCase(
title="Builtin Tool Calling",
description=textwrap.dedent(
"""
Here is an example of a conversation using brave search
"""
),
dialogs=[llama3_1_builtin_tool_call_dialog()],
notes=textwrap.dedent(
"""
- Just including Environment: ipython turns on code interpreter; therefore, you don't need to specify code interpretation on the Tools: line. The model can generate python code which is interpreted by the executor, with the result provided back to the model.
- The message body of the assistant response starts with a special tag <|python_tag|>
- As alluded to above, in such an environment, the model can generate <|eom_id|> instead of just the standard <|eot_id|> . The latter indicates the turn is finished, while the former indicates continued multi-step reasoning. That is, the model is expecting a continuation message with the output of the tool call.
- The model tool call response is of the form `tool.call(query="...")` wher tool is `brave_search` or `wolfram_alpha`
"""
),
),
UseCase(
title="Builtin Code Interpreter",
description="Here is an actual example of model responding with code",
dialogs=[
[
RawMessage(role="system", content="Environment: ipython"),
RawMessage(
role="user",
content="Write code to check if number is prime, use that to see if the number 7 is prime",
),
],
],
notes=textwrap.dedent(
"""
- Model starts with <|python_tag|> and continues writing python code that it needs to be executed
- No explicit mention of code_interpreter in system prompt. `Environment: ipython` implicitly enables it.
"""
),
),
UseCase(
title="Built-in tools full interaction",
description="Here is a full interaction with the built-in tools including the tool response and the final assistant response.",
dialogs=[
[
RawMessage(
role="system",
content="Environment: ipython\nTools: brave_search, wolfram_alpha\n",
),
RawMessage(role="user", content="What is the 100th decimal of pi?"),
RawMessage(
role="assistant",
content="",
stop_reason=StopReason.end_of_message,
tool_calls=[
ToolCall(
call_id="tool_call_id",
tool_name=BuiltinTool.wolfram_alpha,
arguments={"query": "100th decimal of pi"},
)
],
),
RawMessage(
role="tool",
content=wolfram_alpha_response(),
),
],
],
notes=textwrap.dedent(
"""
- Note the `<|python_tag|>` in the assistant response.
- Role is `tool` for the wolfram alpha response that is passed back to the model.
- Final message from assistant has <|eot_id|> tag.
"""
),
),
"## Zero shot tool calling",
UseCase(
title="JSON based tool calling",
description=textwrap.dedent(
"""
Llama models can now output custom tool calls from a single message to allow easier tool calling.
The following prompts provide an example of how custom tools can be called from the output of the model.
It's important to note that the model itself does not execute the calls; it provides structured output to facilitate calling by an executor.
"""
),
dialogs=[llama3_1_custom_tool_call_dialog()],
notes=textwrap.dedent(
"""
- JSON format for providing tools needs name, description and parameters
- Model responds with `<|python_tag|>` and `<|eom_id|>` as `Environment: ipython` was in the system prompt
- Instructions for tools added as a user message
- Only single tool calls are supported as of now
"""
),
),
# FIXME: This is not working yet as expected
# UseCase(
# title="E2E tool call example",
# description=textwrap.dedent(
# """
# Here is an example showing the whole multi-step turn by taking custom tool outputs and passing back to the model.
# """
# ),
# dialogs=[
# llama3_1_e2e_tool_call_dialog(
# tool_prompt_format=ToolPromptFormat.function_tag
# )
# ],
# notes="",
# ),
"## Example of a user defined tool calling",
UseCase(
title="`<function>` based tool calling",
description=textwrap.dedent(
"""
Here is an example of how you could also write custom instructions for model to do zero shot tool calling.
In this example, we define a custom tool calling format using the `<function>` tag.
"""
),
dialogs=[llama3_1_custom_tool_call_dialog(ToolPromptFormat.function_tag)],
notes=textwrap.dedent(
"""
- In this case, model does NOT respond with `<|python_tag|>` and ends with `<|eot_id|>`
- Instructions for tools added as a user message
"""
),
),
]
| 2,524
|
7e69e6e476cd1ff344a20cf4195b03f5316a1ee747b7cca8277c7eef362a3c64
| 46.457364
| 413
| 0.527932
| 4.85103
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
app/services/twitter/oauth2_callback.py
| 4,968
| 0
|
MIT License
|
"""Twitter OAuth2 callback handler."""
from datetime import datetime, timezone
from typing import Optional
from urllib.parse import parse_qs, urlencode, urlparse
import tweepy
from fastapi import APIRouter, HTTPException
from starlette.responses import JSONResponse, RedirectResponse
from app.config.config import config
from app.services.twitter.oauth2 import oauth2_user_handler
from models.agent import Agent, AgentData
router = APIRouter(prefix="/callback/auth", tags=["Callback"])
def is_valid_url(url: str) -> bool:
"""Check if a URL is valid.
Args:
url: URL to validate
Returns:
bool: True if URL is valid, False otherwise
"""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except (ValueError, AttributeError, TypeError):
return False
@router.get("/twitter")
async def twitter_oauth_callback(
state: str,
code: Optional[str] = None,
error: Optional[str] = None,
):
"""Handle Twitter OAuth2 callback.
This endpoint is called by Twitter after the user authorizes the application.
It exchanges the authorization code for access and refresh tokens, then stores
them in the database.
**Query Parameters:**
* `state` - URL-encoded state containing agent_id and redirect_uri
* `code` - Authorization code from Twitter
* `error` - Error message from Twitter (optional)
**Returns:**
* JSONResponse or RedirectResponse depending on redirect_uri
"""
if not state:
raise HTTPException(status_code=400, detail="Missing state parameter")
try:
# Parse state parameter
state_params = parse_qs(state)
agent_id = state_params.get("agent_id", [""])[0]
redirect_uri = state_params.get("redirect_uri", [""])[0]
if error:
raise HTTPException(status_code=400, detail=error)
if not code:
raise HTTPException(status_code=400, detail="Missing code parameter")
if not agent_id:
raise HTTPException(
status_code=400, detail="Missing agent_id in state parameter"
)
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found")
agent_data = await AgentData.get(agent_id)
if not agent_data:
agent_data = AgentData(id=agent_id)
# Exchange code for tokens
authorization_response = (
f"{config.twitter_oauth2_redirect_uri}?state={state}&code={code}"
)
token = oauth2_user_handler.get_token(authorization_response)
# Store tokens in database
agent_data.twitter_access_token = token["access_token"]
agent_data.twitter_refresh_token = token["refresh_token"]
agent_data.twitter_access_token_expires_at = datetime.fromtimestamp(
token["expires_at"], tz=timezone.utc
)
# Get user info
client = tweepy.Client(bearer_token=token["access_token"], return_type=dict)
me = client.get_me(user_auth=False)
username = None
if me and "data" in me:
agent_data.twitter_id = me.get("data").get("id")
username = me.get("data").get("username")
agent_data.twitter_username = username
agent_data.twitter_name = me.get("data").get("name")
# Commit changes
await agent_data.save()
# Handle response based on redirect_uri
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "success", "username": username}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
else:
return JSONResponse(
content={
"message": "Authentication successful, you can close this window",
"username": username,
},
status_code=200,
)
except HTTPException as http_exc:
# Handle error response
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "failed", "error": str(http_exc.detail)}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
# Re-raise HTTP exceptions to preserve their status codes
raise http_exc
except Exception as e:
# Handle error response for unexpected errors
if redirect_uri and is_valid_url(redirect_uri):
params = {"twitter_auth": "failed", "error": str(e)}
redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}"
return RedirectResponse(url=redirect_url)
# For unexpected errors, use 500 status code
raise HTTPException(status_code=500, detail=str(e))
| 1,184
|
f3097720036c5038918e113b07983f57e6afac2e25e38a2e6457797fbae03ac6
| 35.262774
| 100
| 0.625403
| 4.195946
| false
| false
| false
| false
|
virattt/ai-hedge-fund
|
src/data/models.py
| 3,736
| 0
|
MIT License
|
from pydantic import BaseModel
class Price(BaseModel):
open: float
close: float
high: float
low: float
volume: int
time: str
class PriceResponse(BaseModel):
ticker: str
prices: list[Price]
class FinancialMetrics(BaseModel):
ticker: str
report_period: str
period: str
currency: str
market_cap: float | None
enterprise_value: float | None
price_to_earnings_ratio: float | None
price_to_book_ratio: float | None
price_to_sales_ratio: float | None
enterprise_value_to_ebitda_ratio: float | None
enterprise_value_to_revenue_ratio: float | None
free_cash_flow_yield: float | None
peg_ratio: float | None
gross_margin: float | None
operating_margin: float | None
net_margin: float | None
return_on_equity: float | None
return_on_assets: float | None
return_on_invested_capital: float | None
asset_turnover: float | None
inventory_turnover: float | None
receivables_turnover: float | None
days_sales_outstanding: float | None
operating_cycle: float | None
working_capital_turnover: float | None
current_ratio: float | None
quick_ratio: float | None
cash_ratio: float | None
operating_cash_flow_ratio: float | None
debt_to_equity: float | None
debt_to_assets: float | None
interest_coverage: float | None
revenue_growth: float | None
earnings_growth: float | None
book_value_growth: float | None
earnings_per_share_growth: float | None
free_cash_flow_growth: float | None
operating_income_growth: float | None
ebitda_growth: float | None
payout_ratio: float | None
earnings_per_share: float | None
book_value_per_share: float | None
free_cash_flow_per_share: float | None
class FinancialMetricsResponse(BaseModel):
financial_metrics: list[FinancialMetrics]
class LineItem(BaseModel):
ticker: str
report_period: str
period: str
currency: str
# Allow additional fields dynamically
model_config = {"extra": "allow"}
class LineItemResponse(BaseModel):
search_results: list[LineItem]
class InsiderTrade(BaseModel):
ticker: str
issuer: str | None
name: str | None
title: str | None
is_board_director: bool | None
transaction_date: str | None
transaction_shares: float | None
transaction_price_per_share: float | None
transaction_value: float | None
shares_owned_before_transaction: float | None
shares_owned_after_transaction: float | None
security_title: str | None
filing_date: str
class InsiderTradeResponse(BaseModel):
insider_trades: list[InsiderTrade]
class CompanyNews(BaseModel):
ticker: str
title: str
author: str
source: str
date: str
url: str
sentiment: str | None = None
class CompanyNewsResponse(BaseModel):
news: list[CompanyNews]
class Position(BaseModel):
cash: float = 0.0
shares: int = 0
ticker: str
class Portfolio(BaseModel):
positions: dict[str, Position] # ticker -> Position mapping
total_cash: float = 0.0
class AnalystSignal(BaseModel):
signal: str | None = None
confidence: float | None = None
reasoning: dict | str | None = None
max_position_size: float | None = None # For risk management signals
class TickerAnalysis(BaseModel):
ticker: str
analyst_signals: dict[str, AnalystSignal] # agent_name -> signal mapping
class AgentStateData(BaseModel):
tickers: list[str]
portfolio: Portfolio
start_date: str
end_date: str
ticker_analyses: dict[str, TickerAnalysis] # ticker -> analysis mapping
class AgentStateMetadata(BaseModel):
show_reasoning: bool = False
model_config = {"extra": "allow"}
| 1,023
|
d0955b70c6fd1fe552c85c4b576a1d4c6e26e16ccf67d3288ec98fcf52439cf0
| 24.073826
| 77
| 0.680675
| 3.652004
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/apis/common/deployment_types.py
| 753
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, Optional
from pydantic import BaseModel
from llama_stack.apis.common.content_types import URL
from llama_stack.schema_utils import json_schema_type
@json_schema_type
class RestAPIMethod(Enum):
GET = "GET"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
@json_schema_type
class RestAPIExecutionConfig(BaseModel):
url: URL
method: RestAPIMethod
params: Optional[Dict[str, Any]] = None
headers: Optional[Dict[str, Any]] = None
body: Optional[Dict[str, Any]] = None
| 216
|
f074cd9f2c3f20aeda5fd5ceba097b16cdd8e67741a7e2a350279a6d2bb7792a
| 24.1
| 79
| 0.722444
| 3.486111
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/models/llama/llama4/ffn.py
| 2,196
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# top-level folder for each specific model found within the models/ directory at
# the top-level of this source tree.
from typing import Any, Dict, List
from fairscale.nn.model_parallel.layers import ColumnParallelLinear, RowParallelLinear
from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
from torch import nn
from torch.nn import functional as F
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
do_reduce: bool = True,
):
super().__init__()
self.do_reduce = do_reduce
self.w1 = ColumnParallelLinear(dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x)
self.w2 = RowParallelLinear(hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x)
self.w3 = ColumnParallelLinear(dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
local_metadata: Dict[str, Any],
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
) -> None:
if prefix + "mlp.fc1_weight" in state_dict:
w1, w3 = state_dict.pop(prefix + "mlp.fc1_weight").chunk(2, dim=0)
state_dict[prefix + "w1.weight"] = w1
state_dict[prefix + "w3.weight"] = w3
state_dict[prefix + "w2.weight"] = state_dict.pop(prefix + "mlp.fc2_weight")
def forward(self, x):
x = F.silu(F.linear(x, self.w1.weight)) * F.linear(x, self.w3.weight)
out = F.linear(x, self.w2.weight)
if self.do_reduce:
return reduce_from_model_parallel_region(out)
return out
| 642
|
9149f23fe08c0db08c6b5b29f4926d13cab37ba4c8f79a2942186afe8f43e39d
| 36.862069
| 113
| 0.651639
| 3.420561
| false
| false
| false
| false
|
MadcowD/ell
|
examples/future/limbo.py
| 1,455
| 0
|
MIT License
|
from typing import List
import ell
from ell.types.message import Message
ell.init(verbose=True, store='./logdir', autocommit=True)
@ell.tool()
def order_t_shirt(size : str, color : str, address : str):
# ....\
pass
@ell.tool()
def get_order_arrival_date(order_id: str):
"""Gets the arrival date of a t-shirt order"""
# ...
@ell.complex(model="gpt-4o", temperature=0.1, tools=[order_t_shirt, get_order_arrival_date])
def limbo_chat_bot(message_history: List[Message]) -> List[Message]:
return [
ell.system("You are a chatbot mimicing the popstar limbo. She is an alien cat girl from outerspace that writes in all lwoer case kawaii! You interact with all her fans and can help them do various things and are always game to hangout and just chat.."),
] + message_history
if __name__ == "__main__":
message_history = []
while True:
user_message = input("You: ")
message_history.append(ell.user(user_message))
response = limbo_chat_bot(message_history)
print(response)
# print("Limbo: ", response[-1].content)
message_history.append(response)
if response.tool_calls:
tool_results = response.call_tools_and_collect_as_message()
print("Tool results: ", tool_results)
message_history.append(tool_results)
response = limbo_chat_bot(message_history)
message_history.append(response)
| 421
|
bc5531025ccf18b168f4a8ba117b0dfe3f0dad44e073b04b60137883626cf182
| 28.693878
| 262
| 0.646048
| 3.456057
| false
| false
| false
| false
|
crestalnetwork/intentkit
|
app/admin/api.py
| 29,057
| 0
|
MIT License
|
import asyncio
import importlib
import json
import logging
from typing import TypedDict
from aiogram import Bot
from aiogram.exceptions import TelegramConflictError, TelegramUnauthorizedError
from aiogram.utils.token import TokenValidationError
from cdp import Wallet
from cdp.cdp import Cdp
from fastapi import (
APIRouter,
Body,
Depends,
File,
HTTPException,
Path,
Response,
UploadFile,
)
from fastapi.responses import PlainTextResponse
from pydantic import BaseModel, Field, ValidationError
from sqlalchemy import select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm.exc import NoResultFound
from yaml import safe_load
from app.config.config import config
from app.core.engine import clean_agent_memory
from clients.twitter import unlink_twitter
from models.agent import (
Agent,
AgentCreate,
AgentData,
AgentDataTable,
AgentResponse,
AgentTable,
AgentUpdate,
)
from models.db import get_db
from skills import __all__ as skill_categories
from utils.middleware import create_jwt_middleware
from utils.slack_alert import send_slack_message
admin_router_readonly = APIRouter()
admin_router = APIRouter()
# Create JWT middleware with admin config
verify_jwt = create_jwt_middleware(config.admin_auth_enabled, config.admin_jwt_secret)
logger = logging.getLogger(__name__)
async def _process_agent(
agent: AgentCreate, subject: str | None = None, slack_message: str | None = None
) -> tuple[Agent, AgentData]:
"""Shared function to process agent creation or update.
Args:
agent: Agent configuration to process
subject: Optional subject from JWT token
slack_message: Optional custom message for Slack notification
Returns:
tuple[Agent, AgentData]: Tuple of (processed agent, agent data)
"""
logger.info(f"Processing agent: {agent}")
if subject:
agent.owner = subject
# Get the latest agent from create_or_update
latest_agent, is_new = await agent.create_or_update()
# Process common post-creation/update steps
agent_data = await _process_agent_post_actions(latest_agent, is_new, slack_message)
return latest_agent, agent_data
async def _process_agent_post_actions(
agent: Agent, is_new: bool = True, slack_message: str | None = None
) -> AgentData:
"""Process common actions after agent creation or update.
Args:
agent: The agent that was created or updated
is_new: Whether the agent is newly created
slack_message: Optional custom message for Slack notification
Returns:
AgentData: The processed agent data
"""
has_wallet = False
agent_data = None
if not is_new:
# Get agent data
agent_data = await AgentData.get(agent.id)
if agent_data and agent_data.cdp_wallet_data:
has_wallet = True
wallet_data = json.loads(agent_data.cdp_wallet_data)
# Check if twitter need unlink, it will change agent data, so must update agent data
if agent.twitter_entrypoint_enabled:
pass
elif (
agent.skills
and agent.skills.get("twitter")
and agent.skills["twitter"].get("enabled")
):
pass
else:
if agent_data and agent_data.twitter_username:
agent_data = await unlink_twitter(agent.id)
# Run clean_agent_memory in background
asyncio.create_task(clean_agent_memory(agent.id, clean_agent=True))
if not has_wallet:
# create the wallet
Cdp.configure(
api_key_name=config.cdp_api_key_name,
private_key=config.cdp_api_key_private_key.replace("\\n", "\n"),
)
network_id = agent.network_id or agent.cdp_network_id
wallet = Wallet.create(network_id=network_id)
wallet_data = wallet.export_data().to_dict()
wallet_data["default_address_id"] = wallet.default_address.address_id
if not agent_data:
agent_data = AgentData(id=agent.id, cdp_wallet_data=json.dumps(wallet_data))
else:
agent_data.cdp_wallet_data = json.dumps(wallet_data)
await agent_data.save()
logger.info(
"Wallet created for agent %s: %s",
agent.id,
wallet_data["default_address_id"],
)
# Send Slack notification
slack_message = slack_message or ("Agent Created" if is_new else "Agent Updated")
try:
_send_agent_notification(agent, agent_data, wallet_data, slack_message)
except Exception as e:
logger.error("Failed to send Slack notification: %s", e)
return agent_data
async def _process_telegram_config(
agent: AgentUpdate, agent_data: AgentData
) -> AgentData:
"""Process telegram configuration for an agent.
Args:
agent: The agent with telegram configuration
agent_data: The agent data to update
Returns:
AgentData: The updated agent data
"""
changes = agent.model_dump(exclude_unset=True)
if not changes.get("telegram_entrypoint_enabled"):
return agent_data
if not changes.get("telegram_config") or not changes.get("telegram_config").get(
"token"
):
return agent_data
tg_bot_token = changes.get("telegram_config").get("token")
try:
bot = Bot(token=tg_bot_token)
bot_info = await bot.get_me()
agent_data.telegram_id = str(bot_info.id)
agent_data.telegram_username = bot_info.username
agent_data.telegram_name = bot_info.first_name
if bot_info.last_name:
agent_data.telegram_name = f"{bot_info.first_name} {bot_info.last_name}"
await agent_data.save()
try:
await bot.close()
except Exception:
pass
return agent_data
except (
TelegramUnauthorizedError,
TelegramConflictError,
TokenValidationError,
) as req_err:
raise HTTPException(
status_code=400,
detail=f"Unauthorized err getting telegram bot username with token {tg_bot_token}: {req_err}",
)
except Exception as e:
raise Exception(
f"Error getting telegram bot username with token {tg_bot_token}: {e}"
)
async def _validate_telegram_config(token: str) -> None:
"""Validate telegram configuration for an agent.
Args:
token: The telegram bot token
"""
try:
bot = Bot(token=token)
await bot.get_me()
await bot.close()
except (
TelegramUnauthorizedError,
TelegramConflictError,
TokenValidationError,
) as req_err:
raise HTTPException(
status_code=400,
detail=f"Unauthorized err getting telegram bot username with token {token}: {req_err}",
)
except Exception as e:
raise Exception(f"Error getting telegram bot username with token {token}: {e}")
def _send_agent_notification(
agent: Agent, agent_data: AgentData, wallet_data: dict, message: str
) -> None:
"""Send a notification about agent creation or update.
Args:
agent: The agent that was created or updated
agent_data: The agent data to update
wallet_data: The agent's wallet data
message: The notification message
"""
# Format autonomous configurations - show only enabled ones with their id, name, and schedule
autonomous_formatted = ""
if agent.autonomous:
enabled_autonomous = [auto for auto in agent.autonomous if auto.enabled]
if enabled_autonomous:
autonomous_items = []
for auto in enabled_autonomous:
schedule = (
f"cron: {auto.cron}" if auto.cron else f"minutes: {auto.minutes}"
)
autonomous_items.append(
f"• {auto.id}: {auto.name or 'Unnamed'} ({schedule})"
)
autonomous_formatted = "\n".join(autonomous_items)
else:
autonomous_formatted = "No enabled autonomous configurations"
else:
autonomous_formatted = "None"
# Format skills - find categories with enabled: true and list skills in public/private states
skills_formatted = ""
if agent.skills:
enabled_categories = []
for category, skill_config in agent.skills.items():
if skill_config and skill_config.get("enabled") is True:
skills_list = []
states = skill_config.get("states", {})
public_skills = [
skill for skill, state in states.items() if state == "public"
]
private_skills = [
skill for skill, state in states.items() if state == "private"
]
if public_skills:
skills_list.append(f" Public: {', '.join(public_skills)}")
if private_skills:
skills_list.append(f" Private: {', '.join(private_skills)}")
if skills_list:
enabled_categories.append(
f"• {category}:\n{chr(10).join(skills_list)}"
)
if enabled_categories:
skills_formatted = "\n".join(enabled_categories)
else:
skills_formatted = "No enabled skills"
else:
skills_formatted = "None"
send_slack_message(
message,
attachments=[
{
"color": "good",
"fields": [
{"title": "ENV", "short": True, "value": config.env},
{"title": "Number", "short": True, "value": agent.number},
{"title": "ID", "short": True, "value": agent.id},
{"title": "Name", "short": True, "value": agent.name},
{"title": "Model", "short": True, "value": agent.model},
{
"title": "GOAT Enabled",
"short": True,
"value": str(agent.goat_enabled),
},
{
"title": "Twitter Username",
"short": True,
"value": agent_data.twitter_username,
},
{
"title": "Telegram Enabled",
"short": True,
"value": str(agent.telegram_entrypoint_enabled),
},
{
"title": "Telegram Username",
"short": True,
"value": agent_data.telegram_username,
},
{
"title": "Wallet Provider",
"short": True,
"value": agent.wallet_provider,
},
{
"title": "Network",
"short": True,
"value": agent.network_id or agent.cdp_network_id or "Default",
},
{
"title": "Wallet Address",
"value": wallet_data.get("default_address_id"),
},
{
"title": "Autonomous",
"value": autonomous_formatted,
},
{
"title": "Skills",
"value": skills_formatted,
},
],
}
],
)
@admin_router.post(
"/agents",
tags=["Agent"],
status_code=201,
operation_id="post_agent_deprecated",
deprecated=True,
)
async def create_or_update_agent(
agent: AgentCreate = Body(AgentCreate, description="Agent configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Create or update an agent.
THIS ENDPOINT IS DEPRECATED. Please use POST /agents/v2 for creating new agents.
This endpoint:
1. Validates agent ID format
2. Creates or updates agent configuration
3. Reinitializes agent if already in cache
4. Masks sensitive data in response
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `AgentResponse` - Updated agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format
- 500: Database error
"""
latest_agent, agent_data = await _process_agent(agent, subject)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router_readonly.post(
"/agent/validate",
tags=["Agent"],
status_code=204,
operation_id="validate_agent",
)
async def validate_agent(
input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"),
) -> Response:
"""Validate agent configuration.
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `204 No Content` - Agent configuration is valid
**Raises:**
* `HTTPException`:
- 400: Invalid agent configuration
- 500: Server error
"""
input.validate_autonomous_schedule()
changes = input.model_dump(exclude_unset=True)
if (
changes.get("telegram_entrypoint_enabled")
and changes.get("telegram_config")
and changes.get("telegram_config").get("token")
):
await _validate_telegram_config(changes.get("telegram_config").get("token"))
return Response(status_code=204)
@admin_router.post(
"/agents/v2",
tags=["Agent"],
status_code=201,
operation_id="create_agent",
response_model=AgentResponse,
)
async def create_agent(
input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Create a new agent.
This endpoint:
1. Validates agent ID format
2. Creates a new agent configuration (returns 400 error if agent ID already exists)
3. Masks sensitive data in response
**Request Body:**
* `agent` - Agent configuration
**Returns:**
* `AgentResponse` - Created agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format or agent ID already exists
- 500: Database error
"""
agent = AgentCreate.model_validate(input)
if subject:
agent.owner = subject
# Create new agent
await agent.check_upstream_id()
latest_agent = await agent.create()
# Process common post-creation actions
agent_data = await _process_agent_post_actions(latest_agent, True, "Agent Created")
agent_data = await _process_telegram_config(input, agent_data)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router.patch(
"/agents/{agent_id}", tags=["Agent"], status_code=200, operation_id="update_agent"
)
async def update_agent(
agent_id: str = Path(..., description="ID of the agent to update"),
agent: AgentUpdate = Body(AgentUpdate, description="Agent update configuration"),
subject: str = Depends(verify_jwt),
) -> Response:
"""Update an existing agent.
This endpoint:
1. Validates agent ID format
2. Updates the agent configuration if it exists
3. Reinitializes agent if already in cache
4. Masks sensitive data in response
**Path Parameters:**
* `agent_id` - ID of the agent to update
**Request Body:**
* `agent` - Agent update configuration
**Returns:**
* `AgentResponse` - Updated agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 400: Invalid agent ID format
- 404: Agent not found
- 403: Permission denied (if owner mismatch)
- 500: Database error
"""
if subject:
agent.owner = subject
# Update agent
latest_agent = await agent.update(agent_id)
# Process common post-update actions
agent_data = await _process_agent_post_actions(latest_agent, False, "Agent Updated")
agent_data = await _process_telegram_config(agent, agent_data)
agent_response = AgentResponse.from_agent(latest_agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
@admin_router_readonly.get(
"/agents",
tags=["Agent"],
dependencies=[Depends(verify_jwt)],
operation_id="get_agents",
)
async def get_agents(db: AsyncSession = Depends(get_db)) -> list[AgentResponse]:
"""Get all agents with their quota information.
**Returns:**
* `list[AgentResponse]` - List of agents with their quota information and additional processed data
"""
# Query all agents first
agents = (await db.scalars(select(AgentTable))).all()
# Batch get agent data
agent_ids = [agent.id for agent in agents]
agent_data_list = await db.scalars(
select(AgentDataTable).where(AgentDataTable.id.in_(agent_ids))
)
agent_data_map = {data.id: data for data in agent_data_list}
# Convert to AgentResponse objects
return [
AgentResponse.from_agent(
Agent.model_validate(agent),
AgentData.model_validate(agent_data_map.get(agent.id))
if agent.id in agent_data_map
else None,
)
for agent in agents
]
@admin_router_readonly.get(
"/agents/{agent_id}",
tags=["Agent"],
dependencies=[Depends(verify_jwt)],
operation_id="get_agent",
)
async def get_agent(
agent_id: str = Path(..., description="ID of the agent to retrieve"),
) -> Response:
"""Get a single agent by ID.
**Path Parameters:**
* `agent_id` - ID of the agent to retrieve
**Returns:**
* `AgentResponse` - Agent configuration with additional processed data
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Get agent data
agent_data = await AgentData.get(agent_id)
agent_response = AgentResponse.from_agent(agent, agent_data)
# Return Response with ETag header
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
class MemCleanRequest(BaseModel):
"""Request model for agent memory cleanup endpoint.
Attributes:
agent_id (str): Agent ID to clean
chat_id (str): Chat ID to clean
clean_skills_memory (bool): To clean the skills data.
clean_agent_memory (bool): To clean the agent memory.
"""
agent_id: str
clean_agent_memory: bool
clean_skills_memory: bool
chat_id: str | None = Field("")
@admin_router.post(
"/agent/clean-memory",
tags=["Agent"],
status_code=204,
dependencies=[Depends(verify_jwt)],
operation_id="clean_agent_memory",
)
@admin_router.post(
"/agents/clean-memory",
tags=["Agent"],
status_code=201,
dependencies=[Depends(verify_jwt)],
operation_id="clean_agent_memory_deprecated",
deprecated=True,
)
async def clean_memory(
request: MemCleanRequest = Body(
MemCleanRequest, description="Agent memory cleanup request"
),
):
"""Clear an agent memory.
**Request Body:**
* `request` - The execution request containing agent ID, message, and thread ID
**Returns:**
* `str` - Formatted response lines from agent memory cleanup
**Raises:**
* `HTTPException`:
- 400: If input parameters are invalid (empty agent_id, thread_id, or message text)
- 404: If agent not found
- 500: For other server-side errors
"""
# Validate input parameters
if not request.agent_id or not request.agent_id.strip():
raise HTTPException(status_code=400, detail="Agent ID cannot be empty")
try:
agent = await Agent.get(request.agent_id)
if not agent:
raise HTTPException(
status_code=404,
detail=f"Agent with id {request.agent_id} not found",
)
await clean_agent_memory(
request.agent_id,
request.chat_id,
clean_agent=request.clean_agent_memory,
clean_skill=request.clean_skills_memory,
)
except NoResultFound:
raise HTTPException(
status_code=404, detail=f"Agent {request.agent_id} not found"
)
except SQLAlchemyError as e:
raise HTTPException(status_code=500, detail=f"Database error: {str(e)}")
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=f"Server error: {str(e)}")
@admin_router_readonly.get(
"/agents/{agent_id}/export",
tags=["Agent"],
operation_id="export_agent",
dependencies=[Depends(verify_jwt)],
)
async def export_agent(
agent_id: str = Path(..., description="ID of the agent to export"),
) -> str:
"""Export agent configuration as YAML.
**Path Parameters:**
* `agent_id` - ID of the agent to export
**Returns:**
* `str` - YAML configuration of the agent
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Ensure agent.skills is initialized
if agent.skills is None:
agent.skills = {}
# Process all skill categories
for category in skill_categories:
try:
# Dynamically import the skill module
skill_module = importlib.import_module(f"skills.{category}")
# Check if the module has a Config class and get_skills function
if hasattr(skill_module, "Config") and hasattr(skill_module, "get_skills"):
# Get or create the config for this category
category_config = agent.skills.get(category, {})
# Ensure 'enabled' field exists (required by SkillConfig)
if "enabled" not in category_config:
category_config["enabled"] = False
# Ensure states dict exists
if "states" not in category_config:
category_config["states"] = {}
# Get all available skill states from the module
available_skills = []
if hasattr(skill_module, "SkillStates") and hasattr(
skill_module.SkillStates, "__annotations__"
):
available_skills = list(
skill_module.SkillStates.__annotations__.keys()
)
# Add missing skills with disabled state
for skill_name in available_skills:
if skill_name not in category_config["states"]:
category_config["states"][skill_name] = "disabled"
# Get all required fields from Config class and its base classes
config_class = skill_module.Config
# Get all base classes of Config
all_bases = [config_class]
for base in config_class.__mro__[1:]:
if base is TypedDict or base is dict or base is object:
continue
all_bases.append(base)
# Collect all required fields from Config and its base classes
for base in all_bases:
if hasattr(base, "__annotations__"):
for field_name, field_type in base.__annotations__.items():
# Skip fields already set or marked as NotRequired
if field_name in category_config or "NotRequired" in str(
field_type
):
continue
# Add default value based on type
if field_name != "states": # states already handled above
if "str" in str(field_type):
category_config[field_name] = ""
elif "bool" in str(field_type):
category_config[field_name] = False
elif "int" in str(field_type):
category_config[field_name] = 0
elif "float" in str(field_type):
category_config[field_name] = 0.0
elif "list" in str(field_type) or "List" in str(
field_type
):
category_config[field_name] = []
elif "dict" in str(field_type) or "Dict" in str(
field_type
):
category_config[field_name] = {}
# Update the agent's skills config
agent.skills[category] = category_config
except (ImportError, AttributeError):
# Skip if module import fails or doesn't have required components
pass
yaml_content = agent.to_yaml()
return Response(
content=yaml_content,
media_type="application/x-yaml",
headers={"Content-Disposition": f'attachment; filename="{agent_id}.yaml"'},
)
@admin_router.put(
"/agents/{agent_id}/import",
tags=["Agent"],
operation_id="import_agent",
response_class=PlainTextResponse,
)
async def import_agent(
agent_id: str = Path(...),
file: UploadFile = File(
..., description="YAML file containing agent configuration"
),
subject: str = Depends(verify_jwt),
) -> str:
"""Import agent configuration from YAML file.
Only updates existing agents, will not create new ones.
**Path Parameters:**
* `agent_id` - ID of the agent to update
**Request Body:**
* `file` - YAML file containing agent configuration
**Returns:**
* `str` - Success message
**Raises:**
* `HTTPException`:
- 400: Invalid YAML or agent configuration
- 404: Agent not found
- 500: Server error
"""
# First check if agent exists
existing_agent = await Agent.get(agent_id)
if not existing_agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Read and parse YAML
content = await file.read()
try:
yaml_data = safe_load(content)
except Exception as e:
raise HTTPException(status_code=400, detail=f"Invalid YAML format: {e}")
# Create Agent instance from YAML
try:
agent = AgentUpdate.model_validate(yaml_data)
except ValidationError as e:
raise HTTPException(status_code=400, detail=f"Invalid agent configuration: {e}")
# Get the latest agent from create_or_update
latest_agent = await agent.update(agent_id)
# Process common post-creation/update steps
agent_data = await _process_agent_post_actions(
latest_agent, False, "Agent Updated via YAML Import"
)
await _process_telegram_config(agent, agent_data)
return "Agent import successful"
@admin_router.put(
"/agents/{agent_id}/twitter/unlink",
tags=["Agent"],
operation_id="unlink_twitter",
dependencies=[Depends(verify_jwt)],
response_class=Response,
)
async def unlink_twitter_endpoint(
agent_id: str = Path(..., description="ID of the agent to unlink from Twitter"),
) -> Response:
"""Unlink Twitter from an agent.
**Path Parameters:**
* `agent_id` - ID of the agent to unlink from Twitter
**Raises:**
* `HTTPException`:
- 404: Agent not found
"""
# Check if agent exists
agent = await Agent.get(agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Call the unlink_twitter function from clients.twitter
agent_data = await unlink_twitter(agent_id)
agent_response = AgentResponse.from_agent(agent, agent_data)
return Response(
content=agent_response.model_dump_json(),
media_type="application/json",
headers={"ETag": agent_response.etag()},
)
| 6,813
|
b7c0c7b4f2116e0390c1f2f1d072ec17465497bfec0a45bc2869078d91f3a458
| 31.86991
| 106
| 0.588808
| 4.264935
| false
| true
| false
| false
|
fudan-generative-vision/hallo2
|
basicsr/data/__init__.py
| 4,254
| 0
|
MIT License
|
import importlib
import numpy as np
import random
import torch
import torch.utils.data
from copy import deepcopy
from functools import partial
from os import path as osp
from basicsr.data.prefetch_dataloader import PrefetchDataLoader
from basicsr.utils import get_root_logger, scandir
from basicsr.utils.dist_util import get_dist_info
from basicsr.utils.registry import DATASET_REGISTRY
__all__ = ['build_dataset', 'build_dataloader']
# automatically scan and import dataset modules for registry
# scan all the files under the data folder with '_dataset' in file names
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
# import all the dataset modules
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
def build_dataset(dataset_opt):
"""Build dataset from options.
Args:
dataset_opt (dict): Configuration for dataset. It must constain:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_opt = deepcopy(dataset_opt)
dataset = DATASET_REGISTRY.get(dataset_opt['type'])(dataset_opt)
logger = get_root_logger()
logger.info(f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} ' 'is built.')
return dataset
def build_dataloader(dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None):
"""Build dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt['phase']
rank, _ = get_dist_info()
if phase == 'train':
if dist: # distributed training
batch_size = dataset_opt['batch_size_per_gpu']
num_workers = dataset_opt['num_worker_per_gpu']
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True)
if sampler is None:
dataloader_args['shuffle'] = True
dataloader_args['worker_init_fn'] = partial(
worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None
elif phase in ['val', 'test']: # validation
dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
else:
raise ValueError(f'Wrong dataset phase: {phase}. ' "Supported ones are 'train', 'val' and 'test'.")
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
prefetch_mode = dataset_opt.get('prefetch_mode')
if prefetch_mode == 'cpu': # CPUPrefetcher
num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
logger = get_root_logger()
logger.info(f'Use {prefetch_mode} prefetch dataloader: ' f'num_prefetch_queue = {num_prefetch_queue}')
return PrefetchDataLoader(num_prefetch_queue=num_prefetch_queue, **dataloader_args)
else:
# prefetch_mode=None: Normal dataloader
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
return torch.utils.data.DataLoader(**dataloader_args)
def worker_init_fn(worker_id, num_workers, rank, seed):
# Set the worker seed to num_workers * rank + worker_id + seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 1,161
|
3214fde7389be3f63e073e38f206d7448efe6d9b49bab3fd55f2eb1dac16216a
| 41.54
| 113
| 0.657029
| 3.664083
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/remote/inference/nvidia/openai_utils.py
| 7,685
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import warnings
from typing import Any, AsyncGenerator, Dict, List, Optional
from openai import AsyncStream
from openai.types.chat.chat_completion import (
Choice as OpenAIChoice,
)
from openai.types.completion import Completion as OpenAICompletion
from openai.types.completion_choice import Logprobs as OpenAICompletionLogprobs
from llama_stack.apis.inference import (
ChatCompletionRequest,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
GreedySamplingStrategy,
JsonSchemaResponseFormat,
TokenLogProbs,
TopKSamplingStrategy,
TopPSamplingStrategy,
)
from llama_stack.providers.utils.inference.openai_compat import (
_convert_openai_finish_reason,
convert_message_to_openai_dict_new,
convert_tooldef_to_openai_tool,
)
async def convert_chat_completion_request(
request: ChatCompletionRequest,
n: int = 1,
) -> dict:
"""
Convert a ChatCompletionRequest to an OpenAI API-compatible dictionary.
"""
# model -> model
# messages -> messages
# sampling_params TODO(mattf): review strategy
# strategy=greedy -> nvext.top_k = -1, temperature = temperature
# strategy=top_p -> nvext.top_k = -1, top_p = top_p
# strategy=top_k -> nvext.top_k = top_k
# temperature -> temperature
# top_p -> top_p
# top_k -> nvext.top_k
# max_tokens -> max_tokens
# repetition_penalty -> nvext.repetition_penalty
# response_format -> GrammarResponseFormat TODO(mf)
# response_format -> JsonSchemaResponseFormat: response_format = "json_object" & nvext["guided_json"] = json_schema
# tools -> tools
# tool_choice ("auto", "required") -> tool_choice
# tool_prompt_format -> TBD
# stream -> stream
# logprobs -> logprobs
if request.response_format and not isinstance(request.response_format, JsonSchemaResponseFormat):
raise ValueError(
f"Unsupported response format: {request.response_format}. Only JsonSchemaResponseFormat is supported."
)
nvext = {}
payload: Dict[str, Any] = dict(
model=request.model,
messages=[await convert_message_to_openai_dict_new(message) for message in request.messages],
stream=request.stream,
n=n,
extra_body=dict(nvext=nvext),
extra_headers={
b"User-Agent": b"llama-stack: nvidia-inference-adapter",
},
)
if request.response_format:
# server bug - setting guided_json changes the behavior of response_format resulting in an error
# payload.update(response_format="json_object")
nvext.update(guided_json=request.response_format.json_schema)
if request.tools:
payload.update(tools=[convert_tooldef_to_openai_tool(tool) for tool in request.tools])
if request.tool_config.tool_choice:
payload.update(
tool_choice=request.tool_config.tool_choice.value
) # we cannot include tool_choice w/o tools, server will complain
if request.logprobs:
payload.update(logprobs=True)
payload.update(top_logprobs=request.logprobs.top_k)
if request.sampling_params:
nvext.update(repetition_penalty=request.sampling_params.repetition_penalty)
if request.sampling_params.max_tokens:
payload.update(max_tokens=request.sampling_params.max_tokens)
strategy = request.sampling_params.strategy
if isinstance(strategy, TopPSamplingStrategy):
nvext.update(top_k=-1)
payload.update(top_p=strategy.top_p)
payload.update(temperature=strategy.temperature)
elif isinstance(strategy, TopKSamplingStrategy):
if strategy.top_k != -1 and strategy.top_k < 1:
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=strategy.top_k)
elif isinstance(strategy, GreedySamplingStrategy):
nvext.update(top_k=-1)
else:
raise ValueError(f"Unsupported sampling strategy: {strategy}")
return payload
def convert_completion_request(
request: CompletionRequest,
n: int = 1,
) -> dict:
"""
Convert a ChatCompletionRequest to an OpenAI API-compatible dictionary.
"""
# model -> model
# prompt -> prompt
# sampling_params TODO(mattf): review strategy
# strategy=greedy -> nvext.top_k = -1, temperature = temperature
# strategy=top_p -> nvext.top_k = -1, top_p = top_p
# strategy=top_k -> nvext.top_k = top_k
# temperature -> temperature
# top_p -> top_p
# top_k -> nvext.top_k
# max_tokens -> max_tokens
# repetition_penalty -> nvext.repetition_penalty
# response_format -> nvext.guided_json
# stream -> stream
# logprobs.top_k -> logprobs
nvext = {}
payload: Dict[str, Any] = dict(
model=request.model,
prompt=request.content,
stream=request.stream,
extra_body=dict(nvext=nvext),
extra_headers={
b"User-Agent": b"llama-stack: nvidia-inference-adapter",
},
n=n,
)
if request.response_format:
# this is not openai compliant, it is a nim extension
nvext.update(guided_json=request.response_format.json_schema)
if request.logprobs:
payload.update(logprobs=request.logprobs.top_k)
if request.sampling_params:
nvext.update(repetition_penalty=request.sampling_params.repetition_penalty)
if request.sampling_params.max_tokens:
payload.update(max_tokens=request.sampling_params.max_tokens)
if request.sampling_params.strategy == "top_p":
nvext.update(top_k=-1)
payload.update(top_p=request.sampling_params.top_p)
elif request.sampling_params.strategy == "top_k":
if request.sampling_params.top_k != -1 and request.sampling_params.top_k < 1:
warnings.warn("top_k must be -1 or >= 1", stacklevel=2)
nvext.update(top_k=request.sampling_params.top_k)
elif request.sampling_params.strategy == "greedy":
nvext.update(top_k=-1)
payload.update(temperature=request.sampling_params.temperature)
return payload
def _convert_openai_completion_logprobs(
logprobs: Optional[OpenAICompletionLogprobs],
) -> Optional[List[TokenLogProbs]]:
"""
Convert an OpenAI CompletionLogprobs into a list of TokenLogProbs.
"""
if not logprobs:
return None
return [TokenLogProbs(logprobs_by_token=logprobs) for logprobs in logprobs.top_logprobs]
def convert_openai_completion_choice(
choice: OpenAIChoice,
) -> CompletionResponse:
"""
Convert an OpenAI Completion Choice into a CompletionResponse.
"""
return CompletionResponse(
content=choice.text,
stop_reason=_convert_openai_finish_reason(choice.finish_reason),
logprobs=_convert_openai_completion_logprobs(choice.logprobs),
)
async def convert_openai_completion_stream(
stream: AsyncStream[OpenAICompletion],
) -> AsyncGenerator[CompletionResponse, None]:
"""
Convert a stream of OpenAI Completions into a stream
of ChatCompletionResponseStreamChunks.
"""
async for chunk in stream:
choice = chunk.choices[0]
yield CompletionResponseStreamChunk(
delta=choice.text,
stop_reason=_convert_openai_finish_reason(choice.finish_reason),
logprobs=_convert_openai_completion_logprobs(choice.logprobs),
)
| 2,104
|
f82873fb13c906f185bfab605e378ce0141d4c304d83760fab397b7ef7f0d0ea
| 34.578704
| 119
| 0.668705
| 3.652567
| false
| false
| false
| false
|
trycua/cua
|
libs/agent/agent/providers/anthropic/types.py
| 421
| 0
|
MIT License
|
from enum import StrEnum
class LLMProvider(StrEnum):
"""Enum for supported API providers."""
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
VERTEX = "vertex"
PROVIDER_TO_DEFAULT_MODEL_NAME: dict[LLMProvider, str] = {
LLMProvider.ANTHROPIC: "claude-3-7-sonnet-20250219",
LLMProvider.BEDROCK: "anthropic.claude-3-7-sonnet-20250219-v2:0",
LLMProvider.VERTEX: "claude-3-5-sonnet-v2@20241022",
}
| 161
|
9f0fefaa638a906a2dee096912b6fa6069b5ff916d47297342eb3b0f1cabe484
| 25.3125
| 69
| 0.700713
| 2.614907
| false
| false
| false
| false
|
meta-llama/llama-stack
|
llama_stack/providers/utils/telemetry/sqlite_trace_store.py
| 6,946
| 0
|
MIT License
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import json
from datetime import datetime
from typing import Dict, List, Optional, Protocol
import aiosqlite
from llama_stack.apis.telemetry import QueryCondition, Span, SpanWithStatus, Trace
class TraceStore(Protocol):
async def query_traces(
self,
attribute_filters: Optional[List[QueryCondition]] = None,
limit: Optional[int] = 100,
offset: Optional[int] = 0,
order_by: Optional[List[str]] = None,
) -> List[Trace]: ...
async def get_span_tree(
self,
span_id: str,
attributes_to_return: Optional[List[str]] = None,
max_depth: Optional[int] = None,
) -> Dict[str, SpanWithStatus]: ...
class SQLiteTraceStore(TraceStore):
def __init__(self, conn_string: str):
self.conn_string = conn_string
async def query_traces(
self,
attribute_filters: Optional[List[QueryCondition]] = None,
limit: Optional[int] = 100,
offset: Optional[int] = 0,
order_by: Optional[List[str]] = None,
) -> List[Trace]:
def build_where_clause() -> tuple[str, list]:
if not attribute_filters:
return "", []
ops_map = {"eq": "=", "ne": "!=", "gt": ">", "lt": "<"}
conditions = [
f"json_extract(s.attributes, '$.{condition.key}') {ops_map[condition.op.value]} ?"
for condition in attribute_filters
]
params = [condition.value for condition in attribute_filters]
where_clause = " WHERE " + " AND ".join(conditions)
return where_clause, params
def build_order_clause() -> str:
if not order_by:
return ""
order_clauses = []
for field in order_by:
desc = field.startswith("-")
clean_field = field[1:] if desc else field
order_clauses.append(f"t.{clean_field} {'DESC' if desc else 'ASC'}")
return " ORDER BY " + ", ".join(order_clauses)
# Build the main query
base_query = """
WITH matching_traces AS (
SELECT DISTINCT t.trace_id
FROM traces t
JOIN spans s ON t.trace_id = s.trace_id
{where_clause}
),
filtered_traces AS (
SELECT t.trace_id, t.root_span_id, t.start_time, t.end_time
FROM matching_traces mt
JOIN traces t ON mt.trace_id = t.trace_id
LEFT JOIN spans s ON t.trace_id = s.trace_id
{order_clause}
)
SELECT DISTINCT trace_id, root_span_id, start_time, end_time
FROM filtered_traces
LIMIT {limit} OFFSET {offset}
"""
where_clause, params = build_where_clause()
query = base_query.format(
where_clause=where_clause,
order_clause=build_order_clause(),
limit=limit,
offset=offset,
)
# Execute query and return results
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, params) as cursor:
rows = await cursor.fetchall()
return [
Trace(
trace_id=row["trace_id"],
root_span_id=row["root_span_id"],
start_time=datetime.fromisoformat(row["start_time"]),
end_time=datetime.fromisoformat(row["end_time"]),
)
for row in rows
]
async def get_span_tree(
self,
span_id: str,
attributes_to_return: Optional[List[str]] = None,
max_depth: Optional[int] = None,
) -> Dict[str, SpanWithStatus]:
# Build the attributes selection
attributes_select = "s.attributes"
if attributes_to_return:
json_object = ", ".join(f"'{key}', json_extract(s.attributes, '$.{key}')" for key in attributes_to_return)
attributes_select = f"json_object({json_object})"
# SQLite CTE query with filtered attributes
query = f"""
WITH RECURSIVE span_tree AS (
SELECT s.*, 1 as depth, {attributes_select} as filtered_attributes
FROM spans s
WHERE s.span_id = ?
UNION ALL
SELECT s.*, st.depth + 1, {attributes_select} as filtered_attributes
FROM spans s
JOIN span_tree st ON s.parent_span_id = st.span_id
WHERE (? IS NULL OR st.depth < ?)
)
SELECT *
FROM span_tree
ORDER BY depth, start_time
"""
spans_by_id = {}
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (span_id, max_depth, max_depth)) as cursor:
rows = await cursor.fetchall()
if not rows:
raise ValueError(f"Span {span_id} not found")
for row in rows:
span = SpanWithStatus(
span_id=row["span_id"],
trace_id=row["trace_id"],
parent_span_id=row["parent_span_id"],
name=row["name"],
start_time=datetime.fromisoformat(row["start_time"]),
end_time=datetime.fromisoformat(row["end_time"]),
attributes=json.loads(row["filtered_attributes"]),
status=row["status"].lower(),
)
spans_by_id[span.span_id] = span
return spans_by_id
async def get_trace(self, trace_id: str) -> Trace:
query = "SELECT * FROM traces WHERE trace_id = ?"
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (trace_id,)) as cursor:
row = await cursor.fetchone()
if row is None:
raise ValueError(f"Trace {trace_id} not found")
return Trace(**row)
async def get_span(self, trace_id: str, span_id: str) -> Span:
query = "SELECT * FROM spans WHERE trace_id = ? AND span_id = ?"
async with aiosqlite.connect(self.conn_string) as conn:
conn.row_factory = aiosqlite.Row
async with conn.execute(query, (trace_id, span_id)) as cursor:
row = await cursor.fetchone()
if row is None:
raise ValueError(f"Span {span_id} not found")
return Span(**row)
| 1,672
|
3ad99d3123bf995e22334dc4d1869ca72bdd343bb4bebfd340936dfbbb38071f
| 36.344086
| 118
| 0.530089
| 4.154306
| false
| false
| false
| false
|
autoscrape-labs/pydoll
|
pydoll/browser/page.py
| 25,924
| 0
|
MIT License
|
import asyncio
import json
import logging
from contextlib import asynccontextmanager
from functools import partial
from pathlib import Path
from typing import List, Optional, Tuple, Union
import aiofiles
from pydoll.commands import (
DomCommands,
FetchCommands,
NetworkCommands,
PageCommands,
RuntimeCommands,
StorageCommands,
)
from pydoll.connection.connection import ConnectionHandler
from pydoll.constants import By
from pydoll.element import WebElement
from pydoll.events import PageEvents
from pydoll.exceptions import InvalidFileExtension
from pydoll.mixins.find_elements import FindElementsMixin
from pydoll.utils import decode_image_to_bytes
logger = logging.getLogger(__name__)
class Page(FindElementsMixin): # noqa: PLR0904
def __init__(self, connection_port: int, page_id: str):
"""
Initializes the Page instance.
Args:
connection_port (int): The port number for the connection to the
browser.
page_id (str): The ID of the page, obtained via the DevTools
Protocol.
"""
self._connection_handler = ConnectionHandler(connection_port, page_id)
self._page_events_enabled = False
self._network_events_enabled = False
self._fetch_events_enabled = False
self._dom_events_enabled = False
self._intercept_file_chooser_dialog_enabled = False
self._cloudflare_captcha_callback_id = None
@property
def page_events_enabled(self) -> bool:
"""
Returns whether page events are enabled or not.
Returns:
bool: True if page events are enabled, False otherwise.
"""
return self._page_events_enabled
@property
def network_events_enabled(self) -> bool:
"""
Returns whether network events are enabled or not.
Returns:
bool: True if network events are enabled, False otherwise.
"""
return self._network_events_enabled
@property
def fetch_events_enabled(self) -> bool:
"""
Returns whether fetch events are enabled or not.
Returns:
bool: True if fetch events are enabled, False otherwise.
"""
return self._fetch_events_enabled
@property
def dom_events_enabled(self) -> bool:
"""
Returns whether DOM events are enabled or not.
Returns:
bool: True if DOM events are enabled, False otherwise.
"""
return self._dom_events_enabled
@property
def intercept_file_chooser_dialog_enabled(self) -> bool:
"""
Returns whether file chooser dialogs are being intercepted or not.
Returns:
bool: True if file chooser dialogs are being intercepted,
False otherwise.
"""
return self._intercept_file_chooser_dialog_enabled
@property
async def current_url(self) -> str:
"""
Retrieves the current URL of the page.
Returns:
str: The current URL of the page.
"""
response = await self._execute_command(DomCommands.get_current_url())
return response['result']['result']['value']
@property
async def page_source(self) -> str:
"""
Retrieves the source code of the page.
Returns:
str: The source code of the page.
"""
response = await self._execute_command(
RuntimeCommands.evaluate_script(
'document.documentElement.outerHTML'
)
)
return response['result']['result']['value']
async def close(self):
"""
Closes the page.
This method closes the current page in the browser.
Returns:
None
"""
await self._execute_command(PageCommands.close())
async def get_cookies(self) -> list[dict]:
"""
Retrieves the cookies of the page.
Returns:
list[dict]: A list of dictionaries containing cookie data from
the current page.
"""
response = await self._execute_command(
NetworkCommands.get_all_cookies()
)
return response['result']['cookies']
async def set_cookies(self, cookies: list[dict]):
"""
Sets cookies for the page.
Args:
cookies (list[dict]): A list of dictionaries containing cookie
data to set for the current page.
"""
await self._execute_command(StorageCommands.set_cookies(cookies))
await self._execute_command(NetworkCommands.set_cookies(cookies))
async def delete_all_cookies(self):
"""
Deletes all cookies from the browser.
This clears both storage cookies and browser cookies associated with
the current page.
Returns:
None
"""
await self._execute_command(StorageCommands.clear_cookies())
await self._execute_command(NetworkCommands.clear_browser_cookies())
async def has_dialog(self) -> bool:
"""
Checks if a dialog is present on the page.
Returns:
bool: True if a dialog is present, False otherwise.
"""
if self._connection_handler.dialog:
return True
return False
async def get_dialog_message(self) -> str:
"""
Retrieves the message of the dialog on the page.
Returns:
str: The message of the dialog.
"""
if not await self.has_dialog():
raise LookupError('No dialog present on the page')
return self._connection_handler.dialog['params']['message']
async def accept_dialog(self):
"""
Accepts the dialog on the page.
Raises:
LookupError: If no dialog is present on the page.
"""
if not await self.has_dialog():
raise LookupError('No dialog present on the page')
await self._execute_command(PageCommands.handle_dialog(True))
async def go_to(self, url: str, timeout=300):
"""
Navigates to a URL in the page.
Args:
url (str): The URL to navigate to.
timeout (int): Maximum time in seconds to wait for page to load.
Defaults to 300 seconds.
Raises:
TimeoutError: If the page fails to load within the specified
timeout.
"""
if await self._refresh_if_url_not_changed(url):
return
await self._execute_command(PageCommands.go_to(url))
try:
await self._wait_page_load(timeout=timeout)
except asyncio.TimeoutError:
raise TimeoutError('Page load timed out')
async def refresh(self):
"""
Refreshes the page.
This method reloads the current page and waits for it to finish
loading.
Raises:
TimeoutError: If the page does not finish loading within the
default timeout period (300 seconds).
Returns:
None
"""
await self._execute_command(PageCommands.refresh())
try:
await self._wait_page_load()
except asyncio.TimeoutError:
raise TimeoutError('Page load timed out')
async def get_screenshot(self, path: str):
"""
Captures a screenshot of the page.
Args:
path (str): The file path to save the screenshot to.
Returns:
None
"""
fmt = path.split('.')[-1]
if fmt not in {'jpeg', 'jpg', 'png'}:
raise InvalidFileExtension(f'{fmt} extension is not supported.')
response = await self._execute_command(
PageCommands.screenshot(fmt=fmt)
)
screenshot_b64 = response['result']['data'].encode('utf-8')
screenshot_bytes = decode_image_to_bytes(screenshot_b64)
async with aiofiles.open(path, 'wb') as file:
await file.write(screenshot_bytes)
async def get_screenshot_base64(self):
"""
Retrieves the screenshot of the page as a base64 encoded string.
Returns:
str: The base64 encoded screenshot.
# TODO: remove the duplicated logic
"""
response = await self._execute_command(PageCommands.screenshot())
return response['result']['data']
async def set_download_path(self, path: str):
"""
Sets the download path for the page.
Args:
path (str): The path where the downloaded files should be saved.
"""
await self._execute_command(PageCommands.set_download_path(path))
async def get_pdf_base64(self):
"""
Retrieves the PDF data of the page.
Returns:
str: The PDF data of the page.
"""
response = await self._execute_command(PageCommands.print_to_pdf())
return response['result']['data']
async def print_to_pdf(self, path: str):
"""
Prints the page to a PDF file.
Args:
path (str): The file path to save the PDF file to.
"""
response = await self._execute_command(PageCommands.print_to_pdf())
pdf_b64 = response['result']['data'].encode('utf-8')
pdf_bytes = decode_image_to_bytes(pdf_b64)
async with aiofiles.open(path, 'wb') as file:
await file.write(pdf_bytes)
async def get_network_logs(self, matches: list[str] = []):
"""
Retrieves network logs from the page.
Args:
matches (list[str]): A list of URL patterns to match network logs
against. If empty, all logs are returned.
Returns:
list: A list of network logs that match the specified patterns.
Raises:
LookupError: If no network logs match the specified patterns.
"""
network_logs = self._connection_handler.network_logs
logs_matched = []
for log in network_logs:
if not log.get('params', {}).get('request', {}).get('url'):
continue
for match in matches:
if match in log['params']['request']['url']:
logs_matched.append(log)
break
if not logs_matched:
raise LookupError('No network logs matched the specified pattern')
return logs_matched
async def get_network_response_bodies(self, matches: list[str] = []):
"""
Retrieves the response bodies of network requests that match the
specified pattern.
Args:
matches (list): The URL patterns to match network requests against.
Returns:
list: A list of response bodies from network requests that match
the specified patterns.
"""
logs_matched = await self.get_network_logs(matches)
responses = []
for log in logs_matched:
try:
body, base64encoded = await self.get_network_response_body(
log['params']['requestId']
)
except KeyError:
continue
response = json.loads(body) if not base64encoded else body
responses.append(response)
return responses
async def get_network_response_body(self, request_id: str):
"""
Retrieves the response body of a network request.
Args:
request_id (str): The ID of the network request.
Returns:
tuple: A tuple containing:
- str: The response body content
- bool: Flag indicating if the body is base64 encoded
"""
response = await self._execute_command(
NetworkCommands.get_response_body(request_id)
)
return (
response['result']['body'],
response['result']['base64Encoded'],
)
async def enable_page_events(self):
"""
Enables page events for the page.
This allows listening for page-related events such as load, navigate,
and content change events. These events can be captured with the `on`
method.
Returns:
None
"""
await self._execute_command(PageCommands.enable_page())
self._page_events_enabled = True
async def enable_network_events(self):
"""
Enables network events for the page.
This allows listening for network-related events such as request and
response events. These events can be captured with the `on` method.
Returns:
None
"""
await self._execute_command(NetworkCommands.enable_network_events())
self._network_events_enabled = True
async def enable_fetch_events(
self, handle_auth: bool = False, resource_type: str = 'Document'
):
"""
Enables fetch events for the page.
This allows interception of network requests before they are sent.
Args:
handle_auth (bool): Whether to handle authentication requests.
Defaults to False.
resource_type (str): The type of resource to intercept.
Defaults to 'Document'.
Returns:
None
"""
await self._execute_command(
FetchCommands.enable_fetch_events(handle_auth, resource_type)
)
self._fetch_events_enabled = True
async def enable_dom_events(self):
"""
Enables DOM events for the page.
This allows listening for DOM-related events such as node creation,
attribute modification, and node removal events. These events can be
captured with the `on` method.
Returns:
None
"""
await self._execute_command(DomCommands.enable_dom_events())
self._dom_events_enabled = True
async def enable_intercept_file_chooser_dialog(self):
"""
Enable intercepting file chooser dialogs.
When file chooser interception is enabled, native file chooser dialog
is not shown. Instead, a protocol event Page.fileChooserOpened is
emitted.
Returns:
None
"""
await self._execute_command(
PageCommands.set_intercept_file_chooser_dialog(True)
)
self._intercept_file_chooser_dialog_enabled = True
async def disable_fetch_events(self):
"""
Disables fetch events for the page.
This stops the interception of network requests that was previously
enabled with enable_fetch_events().
Returns:
None
"""
await self._execute_command(FetchCommands.disable_fetch_events())
self._fetch_events_enabled = False
async def disable_page_events(self):
"""
Disables page events for the page.
This stops listening for page-related events that were previously
enabled with enable_page_events().
Returns:
None
"""
await self._execute_command(PageCommands.disable_page())
self._page_events_enabled = False
async def disable_intercept_file_chooser_dialog(self):
"""
Disable intercepting file chooser dialogs.
When file chooser interception is disabled, native file chooser
dialog is shown.
Returns:
None
"""
await self._execute_command(
PageCommands.set_intercept_file_chooser_dialog(False)
)
self._intercept_file_chooser_dialog_enabled = False
async def on(
self, event_name: str, callback: callable, temporary: bool = False
):
"""
Registers an event listener for the page.
Args:
event_name (str): The event name to listen for.
callback (callable): The callback function to execute when the
event is triggered.
temporary (bool): If True, the callback will be removed after it's
triggered once. Defaults to False.
Returns:
int: The ID of the registered callback, which can be used to
remove the listener later.
"""
async def callback_wrapper(event):
asyncio.create_task(callback(event))
if asyncio.iscoroutinefunction(callback):
function_to_register = callback_wrapper
else:
function_to_register = callback
return await self._connection_handler.register_callback(
event_name, function_to_register, temporary
)
async def execute_script(self, script: str, element: WebElement = None):
"""
Executes a JavaScript script in the page.
If an element is provided, the script will be executed in the context
of that element. To provide the element context, use the 'argument'
keyword in the script.
Examples:
```python
await page.execute_script('argument.click()', element)
await page.execute_script('argument.value = "Hello, World!"', element)
```
Args:
script (str): The JavaScript script to execute.
element (WebElement, optional): The element to execute the script
on. Use 'argument' in your script to refer to this element.
Defaults to None.
Returns:
dict: The result of the script execution from the browser.
"""
if element:
script = script.replace('argument', 'this')
script = f'function(){{ {script} }}'
object_id = element._object_id
command = RuntimeCommands.call_function_on(
object_id, script, return_by_value=True
)
else:
command = RuntimeCommands.evaluate_script(script)
return await self._execute_command(command)
async def _refresh_if_url_not_changed(self, url: str):
"""
Refreshes the page if the URL has not changed.
Args:
url (str): The URL to compare against.
"""
current_url = await self.current_url
if current_url == url:
await self.refresh()
return True
return False
async def _wait_page_load(self, timeout: int = 300):
"""
Waits for the page to finish loading.
Args:
timeout (int): Maximum time in seconds to wait for the page
to load. Defaults to 300 seconds.
Raises:
asyncio.TimeoutError: If the page does not finish loading within
the specified timeout.
"""
start_time = asyncio.get_event_loop().time()
while True:
response = await self._execute_command(
RuntimeCommands.evaluate_script('document.readyState')
)
if response['result']['result']['value'] == 'complete':
break
if asyncio.get_event_loop().time() - start_time > timeout:
raise asyncio.TimeoutError('Page load timed out')
await asyncio.sleep(0.5)
@asynccontextmanager
async def expect_file_chooser(
self, files: Union[str, Path, List[Union[str, Path]]]
):
"""
Provide a context manager that expects a file chooser dialog to be
opened and handles the file upload. When a file selection signal
is captured, the file is uploaded.
Args:
files (Union[str, Path, List[Union[str, Path]]]): The files to be
uploaded.
Returns:
"""
async def event_handler(event):
await self._execute_command(
DomCommands.upload_files(
files=files,
backend_node_id=event['params']['backendNodeId'],
)
)
if self.page_events_enabled is False:
_before_page_events_enabled = False
await self.enable_page_events()
else:
_before_page_events_enabled = True
if self.intercept_file_chooser_dialog_enabled is False:
await self.enable_intercept_file_chooser_dialog()
await self.on(
PageEvents.FILE_CHOOSER_OPENED, event_handler, temporary=True
)
yield
if self.intercept_file_chooser_dialog_enabled is True:
await self.disable_intercept_file_chooser_dialog()
if _before_page_events_enabled is False:
await self.disable_page_events()
async def _bypass_cloudflare(
self,
event: dict,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: int = 5,
):
"""
Attempt to bypass Cloudflare Turnstile captcha when detected.
Args:
event (dict): The event payload (unused)
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (int): Timeout for the captcha element to be
found in seconds. Defaults to 5 seconds.
"""
try:
selector = custom_selector or (By.CLASS_NAME, 'cf-turnstile')
if element := await self.wait_element(
*selector, timeout=time_to_wait_captcha, raise_exc=False
):
# adjust the div size to shadow root size
await self.execute_script(
'argument.style="width: 300px"', element
)
await asyncio.sleep(time_before_click)
await element.click()
except Exception as exc:
logger.error(f'Error in cloudflare bypass: {exc}')
@asynccontextmanager
async def expect_and_bypass_cloudflare_captcha(
self,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: Optional[int] = 5,
):
"""
Context manager to handle Cloudflare Turnstile captcha.
This method sets up a callback that will automatically attempt to
bypass the Cloudflare captcha when the page loads. The main code
will wait until the captcha handling is complete before continuing.
It creates an event to coordinate between the callback and the main
code.
Args:
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (Optional[int]): Timeout for the captcha
element to be found in seconds. Defaults to 5 seconds.
Returns:
None
"""
captcha_processed = asyncio.Event()
async def bypass_cloudflare(_: dict):
try:
await self._bypass_cloudflare(
_,
custom_selector,
time_before_click,
time_to_wait_captcha,
)
finally:
captcha_processed.set()
_before_page_events_enabled = self.page_events_enabled
if not _before_page_events_enabled:
await self.enable_page_events()
callback_id = await self.on(PageEvents.PAGE_LOADED, bypass_cloudflare)
try:
yield
await captcha_processed.wait()
finally:
await self._connection_handler.remove_callback(callback_id)
if not _before_page_events_enabled:
await self.disable_page_events()
async def enable_auto_solve_cloudflare_captcha(
self,
custom_selector: Optional[Tuple[By, str]] = None,
time_before_click: int = 2,
time_to_wait_captcha: int = 5,
):
"""
Enables automatic solving of Cloudflare Turnstile captcha.
This method sets up a callback that will automatically attempt to
bypass the Cloudflare captcha when the page loads. Unlike the
context manager version, this keeps the callback active until
explicitly disabled.
Args:
custom_selector (Optional[Tuple[By, str]]): Custom selector
to locate the captcha element. Defaults to
(By.CLASS_NAME, 'cf-turnstile').
time_before_click (int): Time to wait before clicking the captcha
element in seconds. Defaults to 2 seconds.
time_to_wait_captcha (int): Timeout for the captcha element to be
found in seconds. Defaults to 5 seconds.
Returns:
int: Callback ID that can be used to disable the auto-solving
"""
if not self.page_events_enabled:
await self.enable_page_events()
callback = partial(
self._bypass_cloudflare,
custom_selector=custom_selector,
time_before_click=time_before_click,
time_to_wait_captcha=time_to_wait_captcha,
)
self._cloudflare_captcha_callback_id = await self.on(
PageEvents.PAGE_LOADED, callback
)
async def disable_auto_solve_cloudflare_captcha(self):
"""
Disables automatic solving of Cloudflare Turnstile captcha.
Returns:
None
"""
await self._connection_handler.remove_callback(
self._cloudflare_captcha_callback_id
)
self._cloudflare_captcha_callback_id = None
| 5,539
|
f3a968ce0028c85f9d309a9d22ea60f7d3fcd2a98c7d49687251526a25d2701c
| 31.486216
| 79
| 0.586638
| 4.680267
| false
| false
| false
| false
|
openai/openai-agents-python
|
src/agents/extensions/handoff_filters.py
| 1,977
| 0
|
MIT License
|
from __future__ import annotations
from ..handoffs import HandoffInputData
from ..items import (
HandoffCallItem,
HandoffOutputItem,
RunItem,
ToolCallItem,
ToolCallOutputItem,
TResponseInputItem,
)
"""Contains common handoff input filters, for convenience. """
def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData:
"""Filters out all tool items: file search, web search and function calls+output."""
history = handoff_input_data.input_history
new_items = handoff_input_data.new_items
filtered_history = (
_remove_tool_types_from_input(history) if isinstance(history, tuple) else history
)
filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items)
filtered_new_items = _remove_tools_from_items(new_items)
return HandoffInputData(
input_history=filtered_history,
pre_handoff_items=filtered_pre_handoff_items,
new_items=filtered_new_items,
)
def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]:
filtered_items = []
for item in items:
if (
isinstance(item, HandoffCallItem)
or isinstance(item, HandoffOutputItem)
or isinstance(item, ToolCallItem)
or isinstance(item, ToolCallOutputItem)
):
continue
filtered_items.append(item)
return tuple(filtered_items)
def _remove_tool_types_from_input(
items: tuple[TResponseInputItem, ...],
) -> tuple[TResponseInputItem, ...]:
tool_types = [
"function_call",
"function_call_output",
"computer_call",
"computer_call_output",
"file_search_call",
"web_search_call",
]
filtered_items: list[TResponseInputItem] = []
for item in items:
itype = item.get("type")
if itype in tool_types:
continue
filtered_items.append(item)
return tuple(filtered_items)
| 533
|
7da5d4e08f0d80b8d44816665c6bc70024922afb10e68b599c44ce255823ce07
| 28.507463
| 95
| 0.650986
| 3.709193
| false
| false
| false
| false
|
browser-use/browser-use
|
examples/use-cases/google_sheets.py
| 7,576
| 0
|
MIT License
|
import os
import sys
from browser_use.browser.context import BrowserContext
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import asyncio
import pyperclip
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
from browser_use import ActionResult, Agent, Controller
from browser_use.browser.browser import Browser, BrowserConfig
browser = Browser(
config=BrowserConfig(
browser_binary_path='/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
),
)
# Load environment variables
load_dotenv()
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY is not set. Please add it to your environment variables.')
controller = Controller()
def is_google_sheet(page) -> bool:
return page.url.startswith('https://docs.google.com/spreadsheets/')
@controller.registry.action('Google Sheets: Open a specific Google Sheet')
async def open_google_sheet(browser: BrowserContext, google_sheet_url: str):
page = await browser.get_current_page()
if page.url != google_sheet_url:
await page.goto(google_sheet_url)
await page.wait_for_load_state()
if not is_google_sheet(page):
return ActionResult(error='Failed to open Google Sheet, are you sure you have permissions to access this sheet?')
return ActionResult(extracted_content=f'Opened Google Sheet {google_sheet_url}', include_in_memory=False)
@controller.registry.action('Google Sheets: Get the contents of the entire sheet', page_filter=is_google_sheet)
async def get_sheet_contents(browser: BrowserContext):
page = await browser.get_current_page()
# select all cells
await page.keyboard.press('Enter')
await page.keyboard.press('Escape')
await page.keyboard.press('ControlOrMeta+A')
await page.keyboard.press('ControlOrMeta+C')
extracted_tsv = pyperclip.paste()
return ActionResult(extracted_content=extracted_tsv, include_in_memory=True)
@controller.registry.action('Google Sheets: Select a specific cell or range of cells', page_filter=is_google_sheet)
async def select_cell_or_range(browser: BrowserContext, cell_or_range: str):
page = await browser.get_current_page()
await page.keyboard.press('Enter') # make sure we dont delete current cell contents if we were last editing
await page.keyboard.press('Escape') # to clear current focus (otherwise select range popup is additive)
await asyncio.sleep(0.1)
await page.keyboard.press('Home') # move cursor to the top left of the sheet first
await page.keyboard.press('ArrowUp')
await asyncio.sleep(0.1)
await page.keyboard.press('Control+G') # open the goto range popup
await asyncio.sleep(0.2)
await page.keyboard.type(cell_or_range, delay=0.05)
await asyncio.sleep(0.2)
await page.keyboard.press('Enter')
await asyncio.sleep(0.2)
await page.keyboard.press('Escape') # to make sure the popup still closes in the case where the jump failed
return ActionResult(extracted_content=f'Selected cell {cell_or_range}', include_in_memory=False)
@controller.registry.action('Google Sheets: Get the contents of a specific cell or range of cells', page_filter=is_google_sheet)
async def get_range_contents(browser: BrowserContext, cell_or_range: str):
page = await browser.get_current_page()
await select_cell_or_range(browser, cell_or_range)
await page.keyboard.press('ControlOrMeta+C')
await asyncio.sleep(0.1)
extracted_tsv = pyperclip.paste()
return ActionResult(extracted_content=extracted_tsv, include_in_memory=True)
@controller.registry.action('Google Sheets: Clear the currently selected cells', page_filter=is_google_sheet)
async def clear_selected_range(browser: BrowserContext):
page = await browser.get_current_page()
await page.keyboard.press('Backspace')
return ActionResult(extracted_content='Cleared selected range', include_in_memory=False)
@controller.registry.action('Google Sheets: Input text into the currently selected cell', page_filter=is_google_sheet)
async def input_selected_cell_text(browser: BrowserContext, text: str):
page = await browser.get_current_page()
await page.keyboard.type(text, delay=0.1)
await page.keyboard.press('Enter') # make sure to commit the input so it doesn't get overwritten by the next action
await page.keyboard.press('ArrowUp')
return ActionResult(extracted_content=f'Inputted text {text}', include_in_memory=False)
@controller.registry.action('Google Sheets: Batch update a range of cells', page_filter=is_google_sheet)
async def update_range_contents(browser: BrowserContext, range: str, new_contents_tsv: str):
page = await browser.get_current_page()
await select_cell_or_range(browser, range)
# simulate paste event from clipboard with TSV content
await page.evaluate(f"""
const clipboardData = new DataTransfer();
clipboardData.setData('text/plain', `{new_contents_tsv}`);
document.activeElement.dispatchEvent(new ClipboardEvent('paste', {{clipboardData}}));
""")
return ActionResult(extracted_content=f'Updated cell {range} with {new_contents_tsv}', include_in_memory=False)
# many more snippets for keyboard-shortcut based Google Sheets automation can be found here, see:
# - https://github.com/philc/sheetkeys/blob/master/content_scripts/sheet_actions.js
# - https://github.com/philc/sheetkeys/blob/master/content_scripts/commands.js
# - https://support.google.com/docs/answer/181110?hl=en&co=GENIE.Platform%3DDesktop#zippy=%2Cmac-shortcuts
# Tip: LLM is bad at spatial reasoning, don't make it navigate with arrow keys relative to current cell
# if given arrow keys, it will try to jump from G1 to A2 by pressing Down, without realizing needs to go Down+LeftLeftLeftLeft
async def main():
async with await browser.new_context() as context:
model = ChatOpenAI(model='gpt-4o')
eraser = Agent(
task="""
Clear all the existing values in columns A through F in this Google Sheet:
https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
""",
llm=model,
browser_context=context,
controller=controller,
)
await eraser.run()
researcher = Agent(
task="""
Google to find the full name, nationality, and date of birth of the CEO of the top 10 Fortune 100 companies.
For each company, append a row to this existing Google Sheet: https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Make sure column headers are present and all existing values in the sheet are formatted correctly.
Columns:
A: Company Name
B: CEO Full Name
C: CEO Country of Birth
D: CEO Date of Birth (YYYY-MM-DD)
E: Source URL where the information was found
""",
llm=model,
browser_context=context,
controller=controller,
)
await researcher.run()
improvised_continuer = Agent(
task="""
Read the Google Sheet https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Add 3 more rows to the bottom continuing the existing pattern, make sure any data you add is sourced correctly.
""",
llm=model,
browser_context=context,
controller=controller,
)
await improvised_continuer.run()
final_fact_checker = Agent(
task="""
Read the Google Sheet https://docs.google.com/spreadsheets/d/1INaIcfpYXlMRWO__de61SHFCaqt1lfHlcvtXZPItlpI/edit
Fact-check every entry, add a new column F with your findings for each row.
Make sure to check the source URL for each row, and make sure the information is correct.
""",
llm=model,
browser_context=context,
controller=controller,
)
await final_fact_checker.run()
if __name__ == '__main__':
asyncio.run(main())
| 2,298
|
623f6d876c280782cedee4a319aa635d451ac4dbde667498850d02757688e115
| 38.253886
| 154
| 0.757128
| 3.29678
| false
| false
| false
| false
|
HKUDS/AutoAgent
|
autoagent/workflows/math_solver_workflow_flow.py
| 8,795
| 0
|
MIT License
|
import asyncio
import json
import argparse
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageToolCall
from autoagent.flow import default_drive, EventInput, ReturnBehavior
from autoagent.flow.dynamic import goto_events, abort_this
import re
from autoagent import MetaChain
from autoagent.types import Response
from autoagent.registry import register_workflow
def extract_answer(response: str, key: str):
pattern = f"<{key}>(.*?)</{key}>"
matches = re.findall(pattern, response)
return matches[0] if len(matches) > 0 else None
from autoagent.agents import get_math_solver_agent
from autoagent.agents import get_vote_aggregator_agent
@default_drive.make_event
async def on_start(event: EventInput, global_ctx):
print("start the workflow:" + 'math_solver_workflow')
@default_drive.listen_group([on_start])
async def solve_with_gpt4(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'gpt4_solution', 'description': 'The solution generated by GPT-4 model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('gpt-4o-2024-08-06')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([on_start])
async def solve_with_claude(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'claude_solution', 'description': 'The solution generated by Claude model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('claude-3-5-sonnet-20241022')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([on_start])
async def solve_with_deepseek(event: EventInput, global_ctx):
inputs = [{'key': 'math_problem', 'description': 'The math problem that needs to be solved.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Solve the math problem using systematic approach and show detailed steps.'
outputs = [{'key': 'deepseek_solution', 'description': 'The solution generated by Deepseek model.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_math_solver_agent('deepseek/deepseek-chat')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@default_drive.listen_group([solve_with_gpt4, solve_with_claude, solve_with_deepseek])
async def aggregate_solutions(event: EventInput, global_ctx):
inputs = [{'key': 'gpt4_solution', 'description': 'The solution generated by GPT-4 model.'}, {'key': 'claude_solution', 'description': 'The solution generated by Claude model.'}, {'key': 'deepseek_solution', 'description': 'The solution generated by Deepseek model.'}]
input_dict = dict()
for inp in inputs:
input_dict[inp["key"]] = global_ctx.get(inp["key"], None)
messages = global_ctx.get('messages', [])
task = 'Compare all solutions and determine the final answer through majority voting.'
outputs = [{'key': 'final_solution', 'description': 'The final agreed-upon solution after majority voting.', 'condition': None, 'action': {'type': 'RESULT', 'value': None}}]
agent = get_vote_aggregator_agent('gpt-4o-2024-08-06')
input_str = []
for key, value in input_dict.items():
input_str.append(f"The {key.replace('_', ' ')} is {value}")
input_str = "\n".join(input_str) + "\n"
query = input_str + '.\nThe task is: ' + task + '.\n'
messages.append({
"role": "user",
"content": query
})
client = MetaChain()
response: Response = await client.run_async(agent = agent, messages = messages, context_variables = global_ctx, debug = True)
result = response.messages[-1]["content"]
messages.extend(response.messages)
global_ctx["messages"] = messages
for output in outputs:
ans = extract_answer(result, output["key"])
if ans:
if output["action"]["type"] == "RESULT":
global_ctx[output["key"]] = ans
return ans
elif output["action"]["type"] == "ABORT":
return abort_this()
elif output["action"]["type"] == "GO_TO":
return goto_events([output["action"]["value"]])
elif len(outputs) == 1:
global_ctx[output["key"]] = result
return result
raise Exception("No valid answer found")
@register_workflow(name = 'majority_voting')
async def majority_voting(system_input: str):
storage_results = dict(math_problem = system_input)
await default_drive.invoke_event(
on_start,
global_ctx=storage_results,
)
system_output = storage_results.get('final_solution', None)
return system_output
| 2,320
|
c86700e0fd59c6b80869c02761a41ded8e8ea73a9979cb1a6bacfc6807888ce7
| 42.756219
| 272
| 0.612166
| 3.790948
| false
| false
| false
| false
|
MadcowD/ell
|
examples/claude.py
| 387
| 0
|
MIT License
|
import ell # type: ignore
@ell.simple(model="claude-3-5-sonnet-20241022", max_tokens=100)
def hello_from_claude():
"""You are an AI assistant. Your task is to respond to the user's message with a friendly greeting."""
return "Say hello to the world!!!"
if __name__ == "__main__":
ell.init(verbose=True, store="./logdir", autocommit=True)
print(hello_from_claude())
| 124
|
e4c04ea149b61475c86a19b1b3cec0ffaf179b02e6cef871cd068c354a327e47
| 31.25
| 106
| 0.666667
| 3.120968
| false
| false
| false
| false
|
fudan-generative-vision/hallo
|
scripts/train_stage2.py
| 37,347
| 0
|
MIT License
|
# pylint: disable=E1101,C0415,W0718,R0801
# scripts/train_stage2.py
"""
This is the main training script for stage 2 of the project.
It imports necessary packages, defines necessary classes and functions, and trains the model using the provided configuration.
The script includes the following classes and functions:
1. Net: A PyTorch model that takes noisy latents, timesteps, reference image latents, face embeddings,
and face masks as input and returns the denoised latents.
2. get_attention_mask: A function that rearranges the mask tensors to the required format.
3. get_noise_scheduler: A function that creates and returns the noise schedulers for training and validation.
4. process_audio_emb: A function that processes the audio embeddings to concatenate with other tensors.
5. log_validation: A function that logs the validation information using the given VAE, image encoder,
network, scheduler, accelerator, width, height, and configuration.
6. train_stage2_process: A function that processes the training stage 2 using the given configuration.
7. load_config: A function that loads the configuration file from the given path.
The script also includes the necessary imports and a brief description of the purpose of the file.
"""
import argparse
import copy
import logging
import math
import os
import random
import time
import warnings
from datetime import datetime
from typing import List, Tuple
import diffusers
import mlflow
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import DistributedDataParallelKwargs
from diffusers import AutoencoderKL, DDIMScheduler
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version
from diffusers.utils.import_utils import is_xformers_available
from einops import rearrange, repeat
from omegaconf import OmegaConf
from torch import nn
from tqdm.auto import tqdm
from hallo.animate.face_animate import FaceAnimatePipeline
from hallo.datasets.audio_processor import AudioProcessor
from hallo.datasets.image_processor import ImageProcessor
from hallo.datasets.talk_video import TalkingVideoDataset
from hallo.models.audio_proj import AudioProjModel
from hallo.models.face_locator import FaceLocator
from hallo.models.image_proj import ImageProjModel
from hallo.models.mutual_self_attention import ReferenceAttentionControl
from hallo.models.unet_2d_condition import UNet2DConditionModel
from hallo.models.unet_3d import UNet3DConditionModel
from hallo.utils.util import (compute_snr, delete_additional_ckpt,
import_filename, init_output_dir,
load_checkpoint, save_checkpoint,
seed_everything, tensor_to_video)
warnings.filterwarnings("ignore")
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.10.0.dev0")
logger = get_logger(__name__, log_level="INFO")
class Net(nn.Module):
"""
The Net class defines a neural network model that combines a reference UNet2DConditionModel,
a denoising UNet3DConditionModel, a face locator, and other components to animate a face in a static image.
Args:
reference_unet (UNet2DConditionModel): The reference UNet2DConditionModel used for face animation.
denoising_unet (UNet3DConditionModel): The denoising UNet3DConditionModel used for face animation.
face_locator (FaceLocator): The face locator model used for face animation.
reference_control_writer: The reference control writer component.
reference_control_reader: The reference control reader component.
imageproj: The image projection model.
audioproj: The audio projection model.
Forward method:
noisy_latents (torch.Tensor): The noisy latents tensor.
timesteps (torch.Tensor): The timesteps tensor.
ref_image_latents (torch.Tensor): The reference image latents tensor.
face_emb (torch.Tensor): The face embeddings tensor.
audio_emb (torch.Tensor): The audio embeddings tensor.
mask (torch.Tensor): Hard face mask for face locator.
full_mask (torch.Tensor): Pose Mask.
face_mask (torch.Tensor): Face Mask
lip_mask (torch.Tensor): Lip Mask
uncond_img_fwd (bool): A flag indicating whether to perform reference image unconditional forward pass.
uncond_audio_fwd (bool): A flag indicating whether to perform audio unconditional forward pass.
Returns:
torch.Tensor: The output tensor of the neural network model.
"""
def __init__(
self,
reference_unet: UNet2DConditionModel,
denoising_unet: UNet3DConditionModel,
face_locator: FaceLocator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
):
super().__init__()
self.reference_unet = reference_unet
self.denoising_unet = denoising_unet
self.face_locator = face_locator
self.reference_control_writer = reference_control_writer
self.reference_control_reader = reference_control_reader
self.imageproj = imageproj
self.audioproj = audioproj
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
generator = torch.manual_seed(42)
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iteration
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
return tensor_result
def train_stage2_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=False)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
checkpoint_dir = os.path.join(save_dir, "checkpoints")
module_dir = os.path.join(save_dir, "modules")
validation_dir = os.path.join(save_dir, "validation")
if accelerator.is_main_process:
init_output_dir([save_dir, checkpoint_dir, module_dir, validation_dir])
accelerator.wait_for_everyone()
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# Create Models
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
cfg.mm_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
cfg.unet_additional_kwargs),
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
audioproj = AudioProjModel(
seq_len=5,
blocks=12,
channels=768,
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device="cuda", dtype=weight_dtype)
# load module weight from stage 1
stage1_ckpt_dir = cfg.stage1_ckpt_dir
denoising_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "denoising_unet.pth"),
map_location="cpu",
),
strict=False,
)
reference_unet.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "reference_unet.pth"),
map_location="cpu",
),
strict=False,
)
face_locator.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "face_locator.pth"),
map_location="cpu",
),
strict=False,
)
imageproj.load_state_dict(
torch.load(
os.path.join(stage1_ckpt_dir, "imageproj.pth"),
map_location="cpu",
),
strict=False,
)
# Freeze
vae.requires_grad_(False)
imageproj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audioproj.requires_grad_(True)
# Set motion module learnable
trainable_modules = cfg.trainable_para
for name, module in denoising_unet.named_modules():
if any(trainable_mod in name for trainable_mod in trainable_modules):
for params in module.parameters():
params.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
).to(dtype=weight_dtype)
# get noise scheduler
train_noise_scheduler, val_noise_scheduler = get_noise_scheduler(cfg)
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
logger.info(f"Total trainable params {len(trainable_params)}")
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# Scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = TalkingVideoDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
sample_rate=cfg.data.sample_rate,
n_sample_frames=cfg.data.n_sample_frames,
n_motion_frames=cfg.data.n_motion_frames,
audio_margin=cfg.data.audio_margin,
data_meta_paths=cfg.data.train_meta_paths,
wav2vec_cfg=cfg.wav2vec_config,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=16
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / cfg.solver.gradient_accumulation_steps
)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(
cfg.solver.max_train_steps / num_update_steps_per_epoch
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
# dump config file
mlflow.log_dict(
OmegaConf.to_container(
cfg), "config.yaml"
)
logger.info(f"save config to {save_dir}")
OmegaConf.save(
cfg, os.path.join(save_dir, "config.yaml")
)
# Train!
total_batch_size = (
cfg.data.train_bs
* accelerator.num_processes
* cfg.solver.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {cfg.data.train_bs}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(
f" Gradient Accumulation steps = {cfg.solver.gradient_accumulation_steps}"
)
logger.info(f" Total optimization steps = {cfg.solver.max_train_steps}")
global_step = 0
first_epoch = 0
# # Potentially load in the weights and states from a previous save
if cfg.resume_from_checkpoint:
logger.info(f"Loading checkpoint from {checkpoint_dir}")
global_step = load_checkpoint(cfg, checkpoint_dir, accelerator)
first_epoch = global_step // num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(
range(global_step, cfg.solver.max_train_steps),
disable=not accelerator.is_local_main_process,
)
progress_bar.set_description("Steps")
for _ in range(first_epoch, num_train_epochs):
train_loss = 0.0
t_data_start = time.time()
for _, batch in enumerate(train_dataloader):
t_data = time.time() - t_data_start
with accelerator.accumulate(net):
# Convert videos to latent space
pixel_values_vid = batch["pixel_values_vid"].to(weight_dtype)
pixel_values_face_mask = batch["pixel_values_face_mask"]
pixel_values_face_mask = get_attention_mask(
pixel_values_face_mask, weight_dtype
)
pixel_values_lip_mask = batch["pixel_values_lip_mask"]
pixel_values_lip_mask = get_attention_mask(
pixel_values_lip_mask, weight_dtype
)
pixel_values_full_mask = batch["pixel_values_full_mask"]
pixel_values_full_mask = get_attention_mask(
pixel_values_full_mask, weight_dtype
)
with torch.no_grad():
video_length = pixel_values_vid.shape[1]
pixel_values_vid = rearrange(
pixel_values_vid, "b f c h w -> (b f) c h w"
)
latents = vae.encode(pixel_values_vid).latent_dist.sample()
latents = rearrange(
latents, "(b f) c h w -> b c f h w", f=video_length
)
latents = latents * 0.18215
noise = torch.randn_like(latents)
if cfg.noise_offset > 0:
noise += cfg.noise_offset * torch.randn(
(latents.shape[0], latents.shape[1], 1, 1, 1),
device=latents.device,
)
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(
0,
train_noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
# mask for face locator
pixel_values_mask = (
batch["pixel_values_mask"].unsqueeze(
1).to(dtype=weight_dtype)
)
pixel_values_mask = repeat(
pixel_values_mask,
"b f c h w -> b (repeat f) c h w",
repeat=video_length,
)
pixel_values_mask = pixel_values_mask.transpose(
1, 2)
uncond_img_fwd = random.random() < cfg.uncond_img_ratio
uncond_audio_fwd = random.random() < cfg.uncond_audio_ratio
start_frame = random.random() < cfg.start_ratio
pixel_values_ref_img = batch["pixel_values_ref_img"].to(
dtype=weight_dtype
)
# initialize the motion frames as zero maps
if start_frame:
pixel_values_ref_img[:, 1:] = 0.0
ref_img_and_motion = rearrange(
pixel_values_ref_img, "b f c h w -> (b f) c h w"
)
with torch.no_grad():
ref_image_latents = vae.encode(
ref_img_and_motion
).latent_dist.sample()
ref_image_latents = ref_image_latents * 0.18215
image_prompt_embeds = batch["face_emb"].to(
dtype=imageproj.dtype, device=imageproj.device
)
# add noise
noisy_latents = train_noise_scheduler.add_noise(
latents, noise, timesteps
)
# Get the target for loss depending on the prediction type
if train_noise_scheduler.prediction_type == "epsilon":
target = noise
elif train_noise_scheduler.prediction_type == "v_prediction":
target = train_noise_scheduler.get_velocity(
latents, noise, timesteps
)
else:
raise ValueError(
f"Unknown prediction type {train_noise_scheduler.prediction_type}"
)
# ---- Forward!!! -----
model_pred = net(
noisy_latents=noisy_latents,
timesteps=timesteps,
ref_image_latents=ref_image_latents,
face_emb=image_prompt_embeds,
mask=pixel_values_mask,
full_mask=pixel_values_full_mask,
face_mask=pixel_values_face_mask,
lip_mask=pixel_values_lip_mask,
audio_emb=batch["audio_tensor"].to(
dtype=weight_dtype),
uncond_img_fwd=uncond_img_fwd,
uncond_audio_fwd=uncond_audio_fwd,
)
if cfg.snr_gamma == 0:
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
else:
snr = compute_snr(train_noise_scheduler, timesteps)
if train_noise_scheduler.config.prediction_type == "v_prediction":
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack(
[snr, cfg.snr_gamma * torch.ones_like(timesteps)], dim=1
).min(dim=1)[0]
/ snr
)
loss = F.mse_loss(
model_pred.float(),
target.float(),
reduction="mean",
)
loss = (
loss.mean(dim=list(range(1, len(loss.shape))))
* mse_loss_weights
).mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(
loss.repeat(cfg.data.train_bs)).mean()
train_loss += avg_loss.item() / cfg.solver.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(
trainable_params,
cfg.solver.max_grad_norm,
)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
reference_control_reader.clear()
reference_control_writer.clear()
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if global_step % cfg.val.validation_steps == 0 or global_step==1:
if accelerator.is_main_process:
generator = torch.Generator(device=accelerator.device)
generator.manual_seed(cfg.seed)
log_validation(
accelerator=accelerator,
vae=vae,
net=net,
scheduler=val_noise_scheduler,
width=cfg.data.train_width,
height=cfg.data.train_height,
clip_length=cfg.data.n_sample_frames,
cfg=cfg,
save_dir=validation_dir,
global_step=global_step,
times=cfg.single_inference_times if cfg.single_inference_times is not None else None,
face_analysis_model_path=cfg.face_analysis_model_path
)
logs = {
"step_loss": loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
"td": f"{t_data:.2f}s",
}
t_data_start = time.time()
progress_bar.set_postfix(**logs)
if (
global_step % cfg.checkpointing_steps == 0
or global_step == cfg.solver.max_train_steps
):
# save model
save_path = os.path.join(
checkpoint_dir, f"checkpoint-{global_step}")
if accelerator.is_main_process:
delete_additional_ckpt(checkpoint_dir, 30)
accelerator.wait_for_everyone()
accelerator.save_state(save_path)
# save model weight
unwrap_net = accelerator.unwrap_model(net)
if accelerator.is_main_process:
save_checkpoint(
unwrap_net,
module_dir,
"net",
global_step,
total_limit=30,
)
if global_step >= cfg.solver.max_train_steps:
break
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
accelerator.end_training()
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", type=str, default="./configs/train/stage2.yaml"
)
args = parser.parse_args()
try:
config = load_config(args.config)
train_stage2_process(config)
except Exception as e:
logging.error("Failed to execute the training process: %s", e)
| 9,334
|
4d47c87601970edb30a6004ea1f343f7e1b3fffcbef50adb7d2729a39fc672af
| 36.686176
| 126
| 0.590891
| 4.001178
| false
| false
| false
| false
|
microsoft/TRELLIS
|
dataset_toolkits/datasets/HSSD.py
| 3,654
| 0
|
MIT License
|
import os
import re
import argparse
import tarfile
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
import pandas as pd
import huggingface_hub
from utils import get_file_hash
def add_args(parser: argparse.ArgumentParser):
pass
def get_metadata(**kwargs):
metadata = pd.read_csv("hf://datasets/JeffreyXiang/TRELLIS-500K/HSSD.csv")
return metadata
def download(metadata, output_dir, **kwargs):
os.makedirs(os.path.join(output_dir, 'raw'), exist_ok=True)
# check login
try:
huggingface_hub.whoami()
except:
print("\033[93m")
print("Haven't logged in to the Hugging Face Hub.")
print("Visit https://huggingface.co/settings/tokens to get a token.")
print("\033[0m")
huggingface_hub.login()
try:
huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename="README.md", repo_type="dataset")
except:
print("\033[93m")
print("Error downloading HSSD dataset.")
print("Check if you have access to the HSSD dataset.")
print("Visit https://huggingface.co/datasets/hssd/hssd-models for more information")
print("\033[0m")
downloaded = {}
metadata = metadata.set_index("file_identifier")
with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor, \
tqdm(total=len(metadata), desc="Downloading") as pbar:
def worker(instance: str) -> str:
try:
huggingface_hub.hf_hub_download(repo_id="hssd/hssd-models", filename=instance, repo_type="dataset", local_dir=os.path.join(output_dir, 'raw'))
sha256 = get_file_hash(os.path.join(output_dir, 'raw', instance))
pbar.update()
return sha256
except Exception as e:
pbar.update()
print(f"Error extracting for {instance}: {e}")
return None
sha256s = executor.map(worker, metadata.index)
executor.shutdown(wait=True)
for k, sha256 in zip(metadata.index, sha256s):
if sha256 is not None:
if sha256 == metadata.loc[k, "sha256"]:
downloaded[sha256] = os.path.join('raw', k)
else:
print(f"Error downloading {k}: sha256s do not match")
return pd.DataFrame(downloaded.items(), columns=['sha256', 'local_path'])
def foreach_instance(metadata, output_dir, func, max_workers=None, desc='Processing objects') -> pd.DataFrame:
import os
from concurrent.futures import ThreadPoolExecutor
from tqdm import tqdm
# load metadata
metadata = metadata.to_dict('records')
# processing objects
records = []
max_workers = max_workers or os.cpu_count()
try:
with ThreadPoolExecutor(max_workers=max_workers) as executor, \
tqdm(total=len(metadata), desc=desc) as pbar:
def worker(metadatum):
try:
local_path = metadatum['local_path']
sha256 = metadatum['sha256']
file = os.path.join(output_dir, local_path)
record = func(file, sha256)
if record is not None:
records.append(record)
pbar.update()
except Exception as e:
print(f"Error processing object {sha256}: {e}")
pbar.update()
executor.map(worker, metadata)
executor.shutdown(wait=True)
except:
print("Error happened during processing.")
return pd.DataFrame.from_records(records)
| 941
|
2941f745453ea7f0e0e270ce24debab54a8e2721fc0a4b92b0a5a14fbb63bf49
| 34.475728
| 158
| 0.594964
| 3.883103
| false
| false
| false
| false
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 1