tf_code/packages/tf-sync/test_tools.py
2026-03-27 08:42:09 +11:00

310 lines
11 KiB
Python

"""
End-to-end tests for ToothFairyAI agent sync and prompt injection.
Tests the entire flow from Python sync to TypeScript agent loading to prompt building.
"""
import json
import os
import tempfile
from pathlib import Path
# Test 1: Verify parse_agent extracts all required fields
def test_parse_agent_extracts_all_fields():
print("\n" + "=" * 80)
print("TEST 1: Verify parse_agent extracts all required fields")
print("=" * 80)
from tf_sync.tools import parse_agent, SyncedTool, ToolType
# Create a mock agent with all the fields we need
class MockAgent:
id = "test-agent-123"
label = "Code Reviewer"
description = "Reviews code for quality and best practices"
interpolation_string = "You are a code reviewer. Always check for bugs and suggest improvements."
goals = "Review code thoroughly. Provide actionable feedback. Ensure code quality."
temperature = 0.3
max_tokens = 4096
llm_base_model = "claude-3-5-sonnet"
llm_provider = "toothfairyai"
mode = "coder"
agent = MockAgent()
result = parse_agent(agent)
print(f"\nParsed tool:")
print(f" id: {result.id}")
print(f" name: {result.name}")
print(f" description: {result.description}")
print(f" tool_type: {result.tool_type}")
print(f" interpolation_string: {result.interpolation_string}")
print(f" goals: {result.goals}")
print(f" temperature: {result.temperature}")
print(f" max_tokens: {result.max_tokens}")
print(f" llm_base_model: {result.llm_base_model}")
print(f" llm_provider: {result.llm_provider}")
# Verify all fields are populated
assert result.id == "test-agent-123", f"Expected id='test-agent-123', got '{result.id}'"
assert result.name == "Code Reviewer", f"Expected name='Code Reviewer', got '{result.name}'"
assert result.tool_type == ToolType.CODER_AGENT, f"Expected CODER_AGENT, got {result.tool_type}"
assert result.interpolation_string == "You are a code reviewer. Always check for bugs and suggest improvements.", \
f"interpolation_string not set correctly"
assert result.goals == "Review code thoroughly. Provide actionable feedback. Ensure code quality.", \
f"goals not set correctly"
assert result.temperature == 0.3, f"Expected temperature=0.3, got {result.temperature}"
assert result.max_tokens == 4096, f"Expected max_tokens=4096, got {result.max_tokens}"
assert result.llm_base_model == "claude-3-5-sonnet", f"llm_base_model not set correctly"
assert result.llm_provider == "toothfairyai", f"llm_provider not set correctly"
print("\n✅ TEST 1 PASSED: parse_agent extracts all fields correctly")
return True
# Test 2: Verify model mapping for different llm_provider values
def test_model_mapping_for_tf_providers():
print("\n" + "=" * 80)
print("TEST 2: Verify model mapping for different llm_provider values")
print("=" * 80)
from tf_sync.tools import parse_agent, ToolType
# Test with None provider (should map to toothfairyai in TypeScript)
class MockAgentNone:
id = "test-agent-none"
label = "Agent None"
description = "Test"
interpolation_string = "Test prompt"
goals = "Test goals"
temperature = 0.7
max_tokens = 2048
llm_base_model = "gpt-4"
llm_provider = None
mode = "coder"
# Test with "toothfairyai" provider
class MockAgentTF:
id = "test-agent-tf"
label = "Agent TF"
description = "Test"
interpolation_string = "Test prompt"
goals = "Test goals"
temperature = 0.7
max_tokens = 2048
llm_base_model = "gpt-4"
llm_provider = "toothfairyai"
mode = "coder"
# Test with "tf" provider
class MockAgentTFShort:
id = "test-agent-tf-short"
label = "Agent TF Short"
description = "Test"
interpolation_string = "Test prompt"
goals = "Test goals"
temperature = 0.7
max_tokens = 2048
llm_base_model = "gpt-4"
llm_provider = "tf"
mode = "coder"
# Test with external provider (should NOT map to toothfairyai)
class MockAgentExternal:
id = "test-agent-external"
label = "Agent External"
description = "Test"
interpolation_string = "Test prompt"
goals = "Test goals"
temperature = 0.7
max_tokens = 2048
llm_base_model = "claude-3-5-sonnet"
llm_provider = "anthropic"
mode = "coder"
results = {
"None provider": parse_agent(MockAgentNone()),
"toothfairyai provider": parse_agent(MockAgentTF()),
"tf provider": parse_agent(MockAgentTFShort()),
"anthropic provider": parse_agent(MockAgentExternal()),
}
for name, result in results.items():
print(f"\n{name}:")
print(f" llm_provider: {result.llm_provider}")
print(f" llm_base_model: {result.llm_base_model}")
# The TypeScript code will check if llm_provider is None, "toothfairyai", or "tf"
# and map to toothfairyai provider. Here we just verify the values are preserved.
assert results["None provider"].llm_provider is None
assert results["toothfairyai provider"].llm_provider == "toothfairyai"
assert results["tf provider"].llm_provider == "tf"
assert results["anthropic provider"].llm_provider == "anthropic"
print("\n✅ TEST 2 PASSED: Provider values are preserved correctly for TypeScript mapping")
return True
# Test 3: Verify SyncedTool serializes correctly to JSON
def test_synced_tool_json_serialization():
print("\n" + "=" * 80)
print("TEST 3: Verify SyncedTool serializes correctly to JSON")
print("=" * 80)
from tf_sync.tools import parse_agent
class MockAgent:
id = "test-agent-json"
label = "JSON Test Agent"
description = "Test JSON serialization"
interpolation_string = "You are a JSON test agent."
goals = "Test JSON output."
temperature = 0.5
max_tokens = 8192
llm_base_model = "gpt-4-turbo"
llm_provider = "toothfairyai"
mode = "coder"
result = parse_agent(MockAgent())
# Simulate what tfcode.js does
tool_data = {
"id": result.id,
"name": result.name,
"description": result.description,
"tool_type": result.tool_type.value,
"request_type": result.request_type.value if result.request_type else None,
"url": result.url,
"auth_via": result.auth_via,
"interpolation_string": result.interpolation_string,
"goals": result.goals,
"temperature": result.temperature,
"max_tokens": result.max_tokens,
"llm_base_model": result.llm_base_model,
"llm_provider": result.llm_provider,
}
print(f"\nSerialized JSON:")
print(json.dumps(tool_data, indent=2))
# Verify all fields are present in JSON
assert tool_data["id"] == "test-agent-json"
assert tool_data["name"] == "JSON Test Agent"
assert tool_data["tool_type"] == "coder_agent"
assert tool_data["interpolation_string"] == "You are a JSON test agent."
assert tool_data["goals"] == "Test JSON output."
assert tool_data["temperature"] == 0.5
assert tool_data["max_tokens"] == 8192
assert tool_data["llm_base_model"] == "gpt-4-turbo"
assert tool_data["llm_provider"] == "toothfairyai"
print("\n✅ TEST 3 PASSED: SyncedTool serializes correctly to JSON")
return True
# Test 4: Create a mock tools.json and verify TypeScript can parse it
def test_tools_json_format():
print("\n" + "=" * 80)
print("TEST 4: Verify tools.json format matches TypeScript expectations")
print("=" * 80)
# Create a mock tools.json content
mock_tools = {
"success": True,
"tools": [
{
"id": "coder-agent-1",
"name": "Code Reviewer",
"description": "Reviews code for quality and best practices",
"tool_type": "coder_agent",
"request_type": None,
"url": None,
"auth_via": "tf_agent",
"interpolation_string": "You are a code reviewer. Your job is to review code thoroughly and provide actionable feedback.",
"goals": "Review all code changes. Identify bugs. Suggest improvements. Ensure best practices.",
"temperature": 0.3,
"max_tokens": 4096,
"llm_base_model": "claude-3-5-sonnet",
"llm_provider": "toothfairyai",
},
{
"id": "coder-agent-2",
"name": "Test Writer",
"description": "Writes comprehensive tests",
"tool_type": "coder_agent",
"request_type": None,
"url": None,
"auth_via": "tf_agent",
"interpolation_string": "You are a test writer. Write comprehensive tests for all code.",
"goals": "Write unit tests. Write integration tests. Ensure code coverage.",
"temperature": 0.5,
"max_tokens": 8192,
"llm_base_model": None,
"llm_provider": None, # Should map to toothfairyai in TypeScript
},
],
"by_type": {
"coder_agent": 2,
},
}
print(f"\nMock tools.json content:")
print(json.dumps(mock_tools, indent=2))
# Verify the structure matches what TypeScript expects
assert mock_tools["success"] == True
assert len(mock_tools["tools"]) == 2
for tool in mock_tools["tools"]:
assert "id" in tool
assert "name" in tool
assert "tool_type" in tool
assert "interpolation_string" in tool
assert "goals" in tool
assert "temperature" in tool
assert "max_tokens" in tool
assert "llm_base_model" in tool
assert "llm_provider" in tool
assert tool["tool_type"] == "coder_agent"
print("\n✅ TEST 4 PASSED: tools.json format matches TypeScript expectations")
return True
def run_all_tests():
"""Run all tests in sequence."""
print("\n" + "=" * 80)
print("RUNNING ALL PYTHON TESTS")
print("=" * 80)
tests = [
test_parse_agent_extracts_all_fields,
test_model_mapping_for_tf_providers,
test_synced_tool_json_serialization,
test_tools_json_format,
]
passed = 0
failed = 0
for test in tests:
try:
if test():
passed += 1
else:
failed += 1
except Exception as e:
print(f"\n❌ TEST FAILED: {test.__name__}")
print(f" Error: {e}")
failed += 1
print("\n" + "=" * 80)
print(f"PYTHON TEST RESULTS: {passed} passed, {failed} failed")
print("=" * 80)
return failed == 0
if __name__ == "__main__":
import sys
success = run_all_tests()
sys.exit(0 if success else 1)