Spaces:
Running
Running
| """ | |
| Verifier Agent: Checks if the current plan and implementation sufficiently answer the query. | |
| This agent evaluates whether the work done so far is enough to answer the original question. | |
| """ | |
| from langchain_core.messages import AIMessage | |
| from ..utils.formatters import format_plan, gemini_text | |
| from ..utils.state import DSStarState | |
| def verifier_node(state: DSStarState) -> dict: | |
| """ | |
| Verifier Agent Node: Determines if plan sufficiently answers the query. | |
| Analyzes: | |
| - Original query | |
| - Current plan | |
| - Code implementation | |
| - Execution results | |
| Args: | |
| state: Current DSStarState | |
| Returns: | |
| Dictionary with updated state fields: | |
| - is_sufficient: Boolean indicating if work is complete | |
| - messages: Agent communication messages | |
| - next: "finalyzer" if sufficient, "router" if not | |
| """ | |
| print("=" * 60) | |
| print("VERIFIER AGENT STARTING...") | |
| print("=" * 60) | |
| plan_text = format_plan(state["plan"]) | |
| prompt = f"""You are an expert data analyst verifier. | |
| Original Question: {state["query"]} | |
| Current Plan: | |
| {plan_text} | |
| Implementation Code: | |
| {state["current_code"]} | |
| Execution Result: | |
| {state["execution_result"][:1000]} | |
| Task: Verify if this plan and implementation are SUFFICIENT to fully answer the question. | |
| Consider: | |
| - Does the plan address all aspects of the question? | |
| - Does the execution result contain the answer? | |
| - Is any additional analysis needed? | |
| Answer with ONLY one word: "Yes" or "No" | |
| - "Yes" if sufficient to answer the question | |
| - "No" if more analysis is needed""" | |
| try: | |
| # Get LLM response | |
| response = state["llm"].invoke(prompt) | |
| # Handle different response formats | |
| if hasattr(response, "content") and isinstance(response.content, list): | |
| response_text = gemini_text(response) | |
| elif hasattr(response, "content"): | |
| response_text = response.content | |
| else: | |
| response_text = str(response) | |
| response_lower = response_text.strip().lower() | |
| is_sufficient = "yes" in response_lower | |
| status = "SUFFICIENT ✓" if is_sufficient else "INSUFFICIENT ✗" | |
| print(f"\nVerification Result: {status}") | |
| print("=" * 60) | |
| next_node = "finalyzer" if is_sufficient else "router" | |
| return { | |
| "is_sufficient": is_sufficient, | |
| "messages": [ | |
| AIMessage( | |
| content=f"Verification: {'Sufficient' if is_sufficient else 'Insufficient'}" | |
| ) | |
| ], | |
| "next": next_node, | |
| } | |
| except Exception as e: | |
| # On error, assume insufficient and continue | |
| print(f"\n✗ Verifier error: {str(e)}") | |
| print("Defaulting to insufficient, continuing...") | |
| return { | |
| "is_sufficient": False, | |
| "messages": [AIMessage(content=f"Verifier error: {str(e)}, continuing...")], | |
| "next": "router", | |
| } | |
| # Standalone test function | |
| def test_verifier(llm, query: str, plan: list, code: str, execution_result: str): | |
| """ | |
| Test the verifier agent independently. | |
| Args: | |
| llm: LLM instance | |
| query: User query | |
| plan: List of plan steps | |
| code: Generated code | |
| execution_result: Result from code execution | |
| Returns: | |
| Dictionary with verifier results | |
| """ | |
| # Create minimal test state | |
| test_state = { | |
| "llm": llm, | |
| "query": query, | |
| "data_descriptions": {}, | |
| "plan": plan, | |
| "current_code": code, | |
| "execution_result": execution_result, | |
| "is_sufficient": False, | |
| "router_decision": "", | |
| "iteration": 0, | |
| "max_iterations": 20, | |
| "messages": [], | |
| "next": "verifier", | |
| } | |
| result = verifier_node(test_state) | |
| print("\n" + "=" * 60) | |
| print("VERIFIER TEST RESULTS") | |
| print("=" * 60) | |
| print(f"Is Sufficient: {result.get('is_sufficient', False)}") | |
| print(f"Next Node: {result.get('next', 'unknown')}") | |
| return result | |