Complete reference for all classes, methods, and types in the deapi package.
- Client Classes
- Resource: images
- Resource: audio
- Resource: video
- Resource: transcription
- Resource: embeddings
- Resource: prompts
- Resource: models
- Job Polling
- Types & Models
- Exceptions
- Webhook Utilities
Synchronous client for the DeAPI platform.
from deapi import DeapiClient
client = DeapiClient(
*,
api_key: str | None = None, # Falls back to DEAPI_API_KEY env var
base_url: str | None = None, # Falls back to DEAPI_BASE_URL (default: "https://api.deapi.ai")
timeout: float | None = None, # Request timeout in seconds (default: 30.0)
max_retries: int | None = None, # Retry attempts for 429/5xx/network errors (default: 3)
api_version: str | None = None, # Falls back to DEAPI_API_VERSION (default: "v1")
)Methods:
| Method | Returns | Description |
|---|---|---|
client.balance() |
Balance |
Get current account balance |
client.close() |
None |
Close the underlying HTTP connection |
Resource namespaces: client.images, client.audio, client.video, client.transcription, client.embeddings, client.models, client.prompts
Context manager:
with DeapiClient(api_key="sk-...") as client:
... # client.close() called automaticallyAsynchronous client — identical API surface, all methods are async.
from deapi import AsyncDeapiClient
async with AsyncDeapiClient(api_key="sk-...") as client:
balance = await client.balance()Access via client.images.
Generate an image from a text prompt (text-to-image).
job = client.images.generate(
*,
prompt: str, # Required
model: str, # Required
width: int, # Required
height: int, # Required
seed: int, # Required
negative_prompt: str | None = None,
loras: list[LoraWeight | dict] | None = None, # [{"name": "...", "weight": 0.8}]
guidance: float | None = None,
steps: int | None = None,
webhook_url: str | None = None,
) -> JobCalculate the price for a text-to-image generation. Same parameters as generate() except webhook_url.
price = client.images.generate_price(...) -> PriceResultTransform an existing image using a text prompt (image-to-image). Accepts file uploads.
job = client.images.transform(
*,
prompt: str, # Required
model: str, # Required
steps: int, # Required
seed: int, # Required
image: FileInput | None = None, # Single image (mutually exclusive with images)
images: list[FileInput] | None = None, # Multiple images
negative_prompt: str | None = None,
loras: list[LoraWeight | dict] | None = None,
width: int | None = None,
height: int | None = None,
guidance: float | None = None,
webhook_url: str | None = None,
) -> JobFileInput accepts: str (path), Path, bytes, or BinaryIO (file-like object).
price = client.images.transform_price(
*, prompt: str, model: str, steps: int, seed: int,
loras: list | None = None, guidance: float | None = None,
) -> PriceResultUpscale an image to higher resolution.
job = client.images.upscale(
*,
image: FileInput, # Required
model: str, # Required
webhook_url: str | None = None,
) -> Jobprice = client.images.upscale_price(
*, model: str,
image: FileInput | None = None, # Provide image OR width+height
width: int | None = None,
height: int | None = None,
) -> PriceResultRemove the background from an image.
job = client.images.remove_background(
*,
image: FileInput, # Required
model: str, # Required
webhook_url: str | None = None,
) -> Jobprice = client.images.remove_background_price(
*, model: str,
image: FileInput | None = None, # Provide image OR width+height
width: int | None = None,
height: int | None = None,
) -> PriceResultAccess via client.audio.
Convert text to speech (TTS). Supports voice cloning via ref_audio.
job = client.audio.synthesize(
*,
text: str, # Required
model: str, # Required
lang: str, # Required — language code (e.g. "en-us")
format: str, # Required — "wav", "flac", or "mp3"
speed: float, # Required — speech speed (min 0.5)
sample_rate: float, # Required — Hz (model-specific)
mode: str | None = None, # "custom_voice", "voice_clone", or "voice_design"
voice: str | None = None, # Required when mode is "custom_voice"
ref_audio: FileInput | None = None, # Required for "voice_clone" mode
ref_text: str | None = None, # Optional reference text for voice cloning
instruct: str | None = None, # Required for "voice_design" mode
webhook_url: str | None = None,
) -> Jobprice = client.audio.synthesize_price(
*, model: str, lang: str, format: str, speed: float, sample_rate: float,
text: str | None = None, # Provide text OR count_text
count_text: int | None = None, # Character count (skip sending full text)
mode: str | None = None,
voice: str | None = None,
instruct: str | None = None,
) -> PriceResultGenerate music from a text description.
job = client.audio.compose(
*,
caption: str, # Required — music description
model: str, # Required
duration: float, # Required — seconds (10–600)
inference_steps: int, # Required
guidance_scale: float, # Required
seed: int, # Required
format: str, # Required — "wav", "flac", or "mp3"
lyrics: str | None = None,
bpm: int | None = None, # 30–300
keyscale: str | None = None,
timesignature: int | None = None, # 2, 3, 4, or 6
vocal_language: str | None = None,
webhook_url: str | None = None,
) -> Jobprice = client.audio.compose_price(
*, model: str, duration: float, inference_steps: int,
) -> PriceResultAccess via client.video.
Generate a video from a text prompt (text-to-video).
job = client.video.generate(
*,
prompt: str, # Required
model: str, # Required
width: int, # Required
height: int, # Required
steps: int, # Required
seed: int, # Required
frames: int, # Required
fps: int, # Required
negative_prompt: str | None = None,
guidance: float | None = None,
webhook_url: str | None = None,
) -> JobSame parameters as generate() except webhook_url. Returns PriceResult.
Animate a still image into a video (image-to-video).
job = client.video.animate(
*,
prompt: str, # Required
model: str, # Required
first_frame_image: FileInput, # Required
seed: int, # Required
width: int, # Required
height: int, # Required
frames: int, # Required
fps: int, # Required
negative_prompt: str | None = None,
last_frame_image: FileInput | None = None,
guidance: float | None = None,
steps: int | None = None,
webhook_url: str | None = None,
) -> Jobprice = client.video.animate_price(
*, model: str, width: int, height: int, frames: int, fps: int,
steps: int | None = None, guidance: float | None = None,
) -> PriceResultUpscale a video to higher resolution.
job = client.video.upscale(*, video: FileInput, model: str, webhook_url: str | None = None) -> Jobprice = client.video.upscale_price(
*, model: str, video: FileInput | None = None, width: int | None = None, height: int | None = None,
) -> PriceResultRemove background from a video.
job = client.video.remove_background(*, video: FileInput, model: str, webhook_url: str | None = None) -> Jobprice = client.video.remove_background_price(
*, model: str, video: FileInput | None = None, width: int | None = None, height: int | None = None,
) -> PriceResultAccess via client.transcription.
Unified transcription endpoint — auto-detects source type.
job = client.transcription.create(
*,
include_ts: bool, # Required — include timestamps
model: str, # Required
source_url: str | None = None, # URL (YouTube, Twitch, Twitter, etc.)
source_file: FileInput | None = None, # Audio or video file upload
return_result_in_response: bool | None = None, # Inline result in status response
webhook_url: str | None = None,
) -> JobProvide either
source_urlorsource_file, not both.
Auto-detection:
- Twitter Spaces URL → Audio transcription
- Other URL → Video transcription
- Audio MIME file → Audio file transcription
- Video MIME file → Video file transcription
price = client.transcription.create_price(
*, include_ts: bool, model: str,
source_url: str | None = None,
source_file: FileInput | None = None,
duration_seconds: float | None = None, # Skip source, use known duration
) -> PriceResultThese target specific endpoints directly:
| Method | Description |
|---|---|
from_video_url(*, video_url, include_ts, model, ...) |
Transcribe from video URL |
from_video_url_price(...) |
Price for video URL transcription |
from_audio_url(*, audio_url, include_ts, model, ...) |
Transcribe from audio URL (Twitter Spaces) |
from_audio_url_price(...) |
Price for audio URL transcription |
from_audio_file(*, audio, include_ts, model, ...) |
Transcribe from audio file upload |
from_audio_file_price(...) |
Price for audio file transcription |
from_video_file(*, video, include_ts, model, ...) |
Transcribe from video file upload |
from_video_file_price(...) |
Price for video file transcription |
All legacy methods return Job and their _price variants return PriceResult.
Access via client.embeddings.
Generate text embeddings (single or batch).
job = client.embeddings.create(
*,
input: str | list[str], # Required — single text or batch
model: str, # Required
return_result_in_response: bool | None = None,
webhook_url: str | None = None,
) -> Jobprice = client.embeddings.create_price(
*, input: str | list[str], model: str,
return_result_in_response: bool | None = None,
) -> PriceResultAccess via client.prompts.
Prompt methods return results immediately — no job polling needed.
AI-enhance a text-to-image prompt.
result = client.prompts.enhance_image(
*, prompt: str, negative_prompt: str | None = None,
) -> EnhancePromptResultAI-enhance a video generation prompt, optionally with a reference image.
result = client.prompts.enhance_video(
*, prompt: str, negative_prompt: str | None = None, image: FileInput | None = None,
) -> EnhancePromptResultAI-enhance a text-to-speech prompt.
result = client.prompts.enhance_speech(
*, prompt: str, lang_code: str | None = None,
) -> EnhanceSpeechPromptResultAI-enhance an image-to-image prompt with a reference image.
result = client.prompts.enhance_image2image(
*, prompt: str, image: FileInput, negative_prompt: str | None = None,
) -> EnhancePromptResultGet a sample prompt for inspiration.
result = client.prompts.samples(
*, type: str, # "image" or "speech"
topic: str | None = None,
lang_code: str | None = None,
) -> SamplePromptResultEvery prompt method has a _price variant: enhance_image_price(), enhance_video_price(), enhance_speech_price(), enhance_image2image_price(), samples_price(). All return PriceResult.
Access via client.models.
List available AI models with their capabilities.
response = client.models.list(
*,
per_page: int = 15, # Max 50
page: int = 1,
inference_types: list[str] | None = None, # Filter, e.g. ["txt2img", "img2img"]
) -> ModelsResponseAll generation/submission methods return a Job (sync) or AsyncJob (async).
class Job:
request_id: str # The unique job ID
def status() -> JobResult # Poll once (makes an HTTP request)
def wait(
*,
poll_interval: float = 1.0, # Initial seconds between polls
max_wait: float = 300.0, # Total timeout in seconds
backoff_factor: float = 1.5, # Multiplier for backoff
max_interval: float = 10.0, # Cap on poll interval
) -> JobResult # Blocks until done/error
def is_done() -> bool # Convenience (makes an HTTP request)
def is_error() -> bool # Convenience (makes an HTTP request)Note:
is_done()andis_error()each make an HTTP request. For efficient polling, usestatus()once and check the result.
Same interface, all methods are async.
Returned by job.status() and job.wait().
class JobResult(BaseModel):
status: JobStatus # "pending", "processing", "done", "error"
preview: str | None # Preview URL (during processing)
result_url: str | None # Final output URL (when done)
results_alt_formats: dict[str, str] | None # {"jpg": "...", "webp": "..."} (image jobs)
result: str | None # Inline text result (when return_result_in_response=True)
progress: float # 0.0–100.0class PriceResult(BaseModel):
price: float # Cost in creditsclass Balance(BaseModel):
balance: float # Current account balanceclass ModelsResponse(BaseModel):
data: list[ModelInfo]
links: PaginationLinks
meta: PaginationMeta
class ModelInfo(BaseModel):
name: str
inference_types: list[str]
specs: dict | None
class PaginationMeta(BaseModel):
current_page: int
last_page: int
per_page: int
total: intclass EnhancePromptResult(BaseModel):
prompt: str
negative_prompt: str | Noneclass EnhanceSpeechPromptResult(BaseModel):
prompt: strclass SamplePromptResult(BaseModel):
type: str
prompt: strclass JobStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
DONE = "done"
ERROR = "error"
class InferenceType(str, Enum):
TXT2IMG = "txt2img"
IMG2IMG = "img2img"
TXT2AUDIO = "txt2audio"
TXT2MUSIC = "txt2music"
# ... and more (see deapi._types for full list)All exceptions inherit from DeapiError.
DeapiError # Base — attrs: message, status_code, body
├── AuthenticationError # 401 — invalid/missing API key
├── AccountSuspendedError # 403 — account suspended
├── NotFoundError # 404 — resource not found
├── ValidationError # 422 — invalid parameters
│ └── InsufficientBalanceError # 422 with balance error
├── RateLimitError # 429 — rate limited
├── ServerError # 5xx — server error
├── JobTimeoutError # Polling exceeded max_wait
└── NetworkError # Connection/timeout failure
class DeapiError(Exception):
message: str
status_code: int | None
body: dict | Noneclass ValidationError(DeapiError):
errors: dict[str, list[str]] # {"field": ["error message", ...]}class RateLimitError(DeapiError):
retry_after: float # Seconds to wait (from Retry-After header)
limit_type: str # "minute" or "daily"The client automatically retries on:
| Error | Behavior |
|---|---|
RateLimitError (429) |
Waits Retry-After seconds, retries up to max_retries |
ServerError (5xx) |
Exponential backoff (1s, 2s, 4s, 8s), up to max_retries |
NetworkError |
Exponential backoff (1s, 2s, 4s, 8s), up to max_retries |
from deapi.webhook import verify_signature, construct_event, InvalidSignatureErrorVerify a webhook request's HMAC-SHA256 signature. Raises InvalidSignatureError on failure.
verify_signature(
*,
payload: bytes | str, # Raw request body
signature: str, # X-DeAPI-Signature header
timestamp: str, # X-DeAPI-Timestamp header
secret: str, # Your webhook signing secret
tolerance: int = 300, # Max age in seconds (0 = disable)
) -> NoneVerify signature and parse the webhook payload into a typed object.
event = construct_event(
*,
payload: bytes | str,
signature: str,
timestamp: str,
secret: str,
tolerance: int = 300,
) -> WebhookEventclass WebhookEvent(BaseModel):
event: str # "job.completed", "job.processing", "job.failed"
delivery_id: str
timestamp: str
data: WebhookEventData
@property
def type(self) -> str # Alias for event
class WebhookEventData(BaseModel):
job_request_id: str
status: str
previous_status: str | None
job_type: str | None
completed_at: str | None
result_url: str | None
processing_time_ms: int | Noneclass InvalidSignatureError(DeapiError):
... # Raised by verify_signature / construct_event| Variable | Default | Description |
|---|---|---|
DEAPI_API_KEY |
(required) | API key for authentication |
DEAPI_BASE_URL |
https://api.deapi.ai |
API base URL |
DEAPI_API_VERSION |
v1 |
API version |