Skip to content

Commit 247ec0a

Browse files
author
Dylan Huang
committed
Refactor testing configuration and clean up project files
- Removed pytest configuration from pyproject.toml. - Updated pytest.ini to include additional test paths and file patterns. - Adjusted VSCode settings to use pytest.ini for test arguments. - Minor code adjustments in langfuse.py and llm_judge_openai_responses.py for consistency and clarity.
1 parent 6e9f7af commit 247ec0a

File tree

5 files changed

+9
-16
lines changed

5 files changed

+9
-16
lines changed

.vscode/settings.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
{
22
"python.testing.unittestEnabled": false,
33
"python.testing.pytestEnabled": true,
4-
"python.testing.pytestArgs": ["tests", "examples", "-s", "--tb=short"],
4+
"python.testing.pytestArgs": ["-c", "pytest.ini"],
55
"python.testing.autoTestDiscoverOnSaveEnabled": true,
66
"python.defaultInterpreterPath": "./.venv/bin/python",
77
"python.testing.cwd": "${workspaceFolder}",

eval_protocol/adapters/langfuse.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __call__(
5656

5757

5858
def convert_trace_to_evaluation_row(
59-
trace: TraceWithFullDetails, include_tool_calls: bool = True, span_name: Optional[str] = None
59+
trace: "TraceWithFullDetails", include_tool_calls: bool = True, span_name: Optional[str] = None
6060
) -> Optional[EvaluationRow]:
6161
"""Convert a Langfuse trace to EvaluationRow format.
6262

eval_protocol/quickstart/llm_judge_openai_responses.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@
4040
)
4141

4242

43-
@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI") # pyright: ignore[reportAttributeAccessIssue]
44-
@pytest.mark.asyncio # pyright: ignore[reportAttributeAccessIssue]
43+
@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI")
44+
@pytest.mark.asyncio
4545
@evaluation_test(
4646
input_rows=[input_rows],
4747
completion_params=[

pyproject.toml

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -152,15 +152,6 @@ langgraph_tools = [
152152
"langchain-fireworks>=0.3.0",
153153
]
154154

155-
[tool.pytest.ini_options]
156-
addopts = "-q"
157-
testpaths = [
158-
"examples",
159-
]
160-
plugins = [
161-
"eval_protocol.pytest.plugin",
162-
]
163-
164155
[project.scripts]
165156
fireworks-reward = "eval_protocol.cli:main"
166157
eval-protocol = "eval_protocol.cli:main"

pytest.ini

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,14 @@ markers =
33
asyncio
44
asyncio_mode = auto
55
asyncio_default_fixture_loop_scope = function
6-
testpaths = tests
7-
python_files = test_*.py
6+
testpaths = tests examples ./eval_protocol/quickstart
7+
python_files = test_*.py llm_judge_*.py
8+
plugins =
9+
eval_protocol.pytest.plugin
810
python_classes = Test*
911
python_functions = test_*
1012
# Configure stdout/stderr capture for debugging
11-
addopts = -s --tb=short
13+
addopts = -s --tb=short -q
1214
# Alternative: disable capture completely for debugging
1315
# addopts = -s --tb=short --capture=no
1416
filterwarnings =

0 commit comments

Comments
 (0)