Skip to content

Commit 3d142fc

Browse files
committed
Allow dataset preparation tests to run without openai
1 parent 0232312 commit 3d142fc

File tree

1 file changed

+102
-1
lines changed

1 file changed

+102
-1
lines changed

tests/pytest/test_dataset_preparation.py

Lines changed: 102 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,109 @@
1+
from __future__ import annotations
2+
3+
import importlib
4+
import sys
5+
import types
16
from typing import cast
27

38
import pytest
9+
from pydantic import BaseModel, ConfigDict
10+
11+
12+
def _install_dependency_stubs() -> None:
13+
"""Register lightweight stubs for optional runtime dependencies."""
14+
15+
def _ensure_module(name: str, **attrs) -> None:
16+
if name in sys.modules:
17+
return
18+
module = types.ModuleType(name)
19+
for key, value in attrs.items():
20+
setattr(module, key, value)
21+
sys.modules[name] = module
22+
23+
try: # pragma: no cover - prefer real dependency when available
24+
importlib.import_module("loguru")
25+
except ModuleNotFoundError:
26+
class _Logger: # pragma: no cover - inert logging shim
27+
def __getattr__(self, _name: str):
28+
def _noop(*_args, **_kwargs):
29+
return None
30+
31+
return _noop
32+
33+
_ensure_module("loguru", logger=_Logger())
34+
35+
def _noop_loader(*_args, **_kwargs): # pragma: no cover - placeholder loader
36+
return {}
37+
38+
optional_stub_attrs = {
39+
"toml": {"loads": _noop_loader, "load": _noop_loader},
40+
"datasets": {},
41+
"addict": {"Dict": dict},
42+
"deepdiff": {},
43+
"litellm": {},
44+
"peewee": {},
45+
"backoff": {},
46+
}
447

5-
pytest.importorskip("openai")
48+
for optional_module, attrs in optional_stub_attrs.items():
49+
try:
50+
importlib.import_module(optional_module)
51+
except ModuleNotFoundError:
52+
_ensure_module(optional_module, **attrs)
53+
54+
try:
55+
importlib.import_module("openai")
56+
return
57+
except ModuleNotFoundError:
58+
pass
59+
60+
openai_mod = types.ModuleType("openai")
61+
types_mod = types.ModuleType("openai.types")
62+
completion_usage_mod = types.ModuleType("openai.types.completion_usage")
63+
chat_mod = types.ModuleType("openai.types.chat")
64+
chat_message_mod = types.ModuleType("openai.types.chat.chat_completion_message")
65+
tool_call_mod = types.ModuleType("openai.types.chat.chat_completion_message_tool_call")
66+
67+
class CompletionUsage(BaseModel): # pragma: no cover - simple data container
68+
prompt_tokens: int | None = None
69+
completion_tokens: int | None = None
70+
total_tokens: int | None = None
71+
72+
model_config = ConfigDict(extra="allow")
73+
74+
class FunctionCall(BaseModel): # pragma: no cover - simple data container
75+
name: str | None = None
76+
arguments: str | None = None
77+
78+
model_config = ConfigDict(extra="allow")
79+
80+
class ChatCompletionMessageToolCall(BaseModel): # pragma: no cover - simple data container
81+
id: str | None = None
82+
type: str | None = None
83+
function: FunctionCall | None = None
84+
85+
model_config = ConfigDict(extra="allow")
86+
87+
types_mod.CompletionUsage = CompletionUsage
88+
completion_usage_mod.CompletionUsage = CompletionUsage
89+
chat_message_mod.FunctionCall = FunctionCall
90+
tool_call_mod.ChatCompletionMessageToolCall = ChatCompletionMessageToolCall
91+
92+
openai_mod.types = types_mod
93+
types_mod.completion_usage = completion_usage_mod
94+
types_mod.chat = chat_mod
95+
chat_mod.chat_completion_message = chat_message_mod
96+
chat_mod.chat_completion_message_tool_call = tool_call_mod
97+
98+
sys.modules["openai"] = openai_mod
99+
sys.modules["openai.types"] = types_mod
100+
sys.modules["openai.types.completion_usage"] = completion_usage_mod
101+
sys.modules["openai.types.chat"] = chat_mod
102+
sys.modules["openai.types.chat.chat_completion_message"] = chat_message_mod
103+
sys.modules["openai.types.chat.chat_completion_message_tool_call"] = tool_call_mod
104+
105+
106+
_install_dependency_stubs()
6107

7108
from eval_protocol.models import EvaluationRow, Message
8109
from eval_protocol.pytest.dataset_preparation import load_and_prepare_rows

0 commit comments

Comments
 (0)