Skip to content

Commit e01dc36

Browse files
committed
style: format code with black
- Format all Python files using black formatter - Ensure consistent code style across the project - Fix formatting issues detected by CI
1 parent d13f7bf commit e01dc36

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+185
-350
lines changed

MANIFEST.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,4 @@ global-exclude test-install
1010
global-exclude .venv
1111
global-exclude .git
1212

13+

examples/basic_usage.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,19 +10,20 @@
1010
async def main():
1111
# Initialize SDK
1212
sdk = LessTokensSDK(
13-
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"),
14-
provider="openai"
13+
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"), provider="openai"
1514
)
1615

1716
# Process prompt
18-
response = await sdk.process_prompt({
19-
"prompt": "Explain what artificial intelligence is",
20-
"llm_config": {
21-
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
22-
"model": "gpt-4",
23-
"temperature": 0.7,
17+
response = await sdk.process_prompt(
18+
{
19+
"prompt": "Explain what artificial intelligence is",
20+
"llm_config": {
21+
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
22+
"model": "gpt-4",
23+
"temperature": 0.7,
24+
},
2425
}
25-
})
26+
)
2627

2728
print("Response:", response.content)
2829
print(f"Tokens saved: {response.usage.savings}%")
@@ -31,5 +32,3 @@ async def main():
3132

3233
if __name__ == "__main__":
3334
asyncio.run(main())
34-
35-

examples/multi_turn.py

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -9,40 +9,41 @@
99

1010
async def main():
1111
sdk = LessTokensSDK(
12-
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"),
13-
provider="openai"
12+
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"), provider="openai"
1413
)
1514

1615
# First message
17-
response1 = await sdk.process_prompt({
18-
"prompt": "What is the capital of France?",
19-
"llm_config": {
20-
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
21-
"model": "gpt-4",
16+
response1 = await sdk.process_prompt(
17+
{
18+
"prompt": "What is the capital of France?",
19+
"llm_config": {
20+
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
21+
"model": "gpt-4",
22+
},
2223
}
23-
})
24+
)
2425

2526
print("User: What is the capital of France?")
2627
print(f"Assistant: {response1.content}\n")
2728

2829
# Second message with conversation history
29-
response2 = await sdk.process_prompt({
30-
"prompt": "What is its population?",
31-
"llm_config": {
32-
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
33-
"model": "gpt-4",
34-
},
35-
"messages": [
36-
{"role": "user", "content": "What is the capital of France?"},
37-
{"role": "assistant", "content": response1.content},
38-
]
39-
})
30+
response2 = await sdk.process_prompt(
31+
{
32+
"prompt": "What is its population?",
33+
"llm_config": {
34+
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
35+
"model": "gpt-4",
36+
},
37+
"messages": [
38+
{"role": "user", "content": "What is the capital of France?"},
39+
{"role": "assistant", "content": response1.content},
40+
],
41+
}
42+
)
4043

4144
print("User: What is its population?")
4245
print(f"Assistant: {response2.content}")
4346

4447

4548
if __name__ == "__main__":
4649
asyncio.run(main())
47-
48-

examples/streaming.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,18 @@
99

1010
async def main():
1111
sdk = LessTokensSDK(
12-
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"),
13-
provider="openai"
12+
api_key=os.getenv("LESSTOKENS_API_KEY", "your-less-tokens-api-key"), provider="openai"
1413
)
1514

16-
async for chunk in sdk.process_prompt_stream({
17-
"prompt": "Tell a story about a robot learning to paint",
18-
"llm_config": {
19-
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
20-
"model": "gpt-4",
15+
async for chunk in sdk.process_prompt_stream(
16+
{
17+
"prompt": "Tell a story about a robot learning to paint",
18+
"llm_config": {
19+
"api_key": os.getenv("OPENAI_API_KEY", "your-openai-api-key"),
20+
"model": "gpt-4",
21+
},
2122
}
22-
}):
23+
):
2324
if chunk.done:
2425
print(f"\n\nTokens saved: {chunk.usage.savings}%")
2526
print(f"Total tokens: {chunk.usage.total_tokens}")
@@ -29,5 +30,3 @@ async def main():
2930

3031
if __name__ == "__main__":
3132
asyncio.run(main())
32-
33-

lesstokens_sdk/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,3 @@
3434
"CompressedPrompt",
3535
"StreamChunk",
3636
]
37-

lesstokens_sdk/clients/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,3 @@
44
from lesstokens_sdk.clients.llm_client import LLMClient
55

66
__all__ = ["LessTokensClient", "LLMClient"]
7-

lesstokens_sdk/clients/less_tokens_client.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -68,18 +68,15 @@ async def _perform_compression_request(
6868
data = response_data.get("data") or response_data
6969

7070
# Handle both API response formats
71-
compression_ratio = (
72-
data.get("compressionRatio") or data.get("ratio") or 1.0
73-
)
71+
compression_ratio = data.get("compressionRatio") or data.get("ratio") or 1.0
7472
original_tokens = data.get("originalTokens") or 0
7573
compressed_tokens = data.get("compressedTokens") or 0
7674

7775
# Always calculate savings percentage from original and compressed tokens
7876
# This ensures accuracy regardless of API response format
7977
if original_tokens > 0 and compressed_tokens >= 0:
8078
savings = (
81-
(float(original_tokens) - float(compressed_tokens))
82-
/ float(original_tokens)
79+
(float(original_tokens) - float(compressed_tokens)) / float(original_tokens)
8380
) * 100
8481
else:
8582
savings = 0.0
@@ -149,4 +146,3 @@ async def perform_request() -> CompressedPrompt:
149146
}
150147

151148
return await retry(perform_request, retry_config)
152-

lesstokens_sdk/clients/llm_client.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@ class LLMClient:
1414
def __init__(self, provider: str, api_key: str, base_url: Optional[str] = None):
1515
self.provider = create_provider(provider, api_key, base_url)
1616

17-
async def chat(
18-
self, messages: List[Dict[str, str]], config: LLMConfig
19-
) -> LLMResponse:
17+
async def chat(self, messages: List[Dict[str, str]], config: LLMConfig) -> LLMResponse:
2018
"""Send a chat completion request"""
2119
return await self.provider.chat(messages, config)
2220

lesstokens_sdk/errors.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,4 +56,3 @@ def create_error(
5656
) -> LessTokensError:
5757
"""Create error from error code"""
5858
return LessTokensError(message, code, status_code, details)
59-

lesstokens_sdk/providers/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,3 @@
44
from lesstokens_sdk.providers.factory import create_provider
55

66
__all__ = ["LLMProvider", "create_provider"]
7-

0 commit comments

Comments
 (0)