From 4929fd5b1e11db7ff8632e04b6d7c02990c82344 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 13 Feb 2026 09:01:44 -0800 Subject: [PATCH] Add Telnyx integration example with AI services support This commit adds documentation and configuration examples for using Telnyx AI services with EchoKit Server. Why Telnyx: - OpenAI-compatible API enables drop-in integration - 53+ AI models including GPT-4, Claude, and Llama - Global edge network for low-latency inference - Unified API key for ASR, TTS, and LLM services - Pay-per-use pricing with transparent costs Changes: - examples/telnyx/config.toml: Ready-to-use configuration - examples/telnyx/README.md: Complete integration guide --- examples/telnyx/README.md | 141 ++++++++++++++++++++++++++++++++++++ examples/telnyx/config.toml | 64 ++++++++++++++++ 2 files changed, 205 insertions(+) create mode 100644 examples/telnyx/README.md create mode 100644 examples/telnyx/config.toml diff --git a/examples/telnyx/README.md b/examples/telnyx/README.md new file mode 100644 index 0000000..730c03d --- /dev/null +++ b/examples/telnyx/README.md @@ -0,0 +1,141 @@ +# Telnyx Integration for EchoKit Server + +This guide explains how to configure EchoKit Server to use Telnyx AI services. + +## Why Telnyx? + +Telnyx provides a comprehensive AI platform that integrates seamlessly with EchoKit: + +- **OpenAI-Compatible API**: All endpoints follow OpenAI specifications, enabling drop-in compatibility with EchoKit's architecture +- **53+ AI Models**: Access to GPT-4, Claude, Llama, Mistral, and many open-source models through a single API +- **Global Edge Network**: Low-latency inference from data centers worldwide +- **Unified Billing**: Single API key for ASR, TTS, and LLM services +- **Competitive Pricing**: Pay-per-use with transparent, per-token pricing + +## Prerequisites + +1. A Telnyx account ([sign up here](https://telnyx.com/sign-up)) +2. An API key from the [Telnyx Portal](https://portal.telnyx.com) + +## Quick Start + +### 1. Set Your API Key + +```bash +export TELNYX_API_KEY="your-api-key-here" +``` + +### 2. Use the Example Configuration + +```bash +cp examples/telnyx/config.toml config.toml +``` + +### 3. Build and Run + +```bash +cargo build --release +./target/release/echokit_server +``` + +## Configuration Reference + +### ASR (Speech Recognition) + +```toml +[asr] +platform = "openai" +url = "https://api.telnyx.com/v2/ai/transcriptions" +api_key = "${TELNYX_API_KEY}" +model = "whisper-1" +lang = "en" +``` + +Available models: +- `whisper-1` - OpenAI Whisper (recommended) + +### TTS (Text-to-Speech) + +```toml +[tts] +platform = "openai" +url = "https://api.telnyx.com/v2/ai/speech" +model = "tts-1" +api_key = "${TELNYX_API_KEY}" +voice = "alloy" +``` + +Available voices: +- `alloy`, `echo`, `fable`, `onyx`, `nova`, `shimmer` + +Available models: +- `tts-1` - Optimized for speed +- `tts-1-hd` - Higher quality audio + +### LLM (Language Model) + +```toml +[llm] +platform = "openai_chat" +url = "https://api.telnyx.com/v2/ai/chat/completions" +api_key = "${TELNYX_API_KEY}" +model = "gpt-4o-mini" +history = 5 +``` + +Popular model options: +- `gpt-4o` - Latest GPT-4 optimized +- `gpt-4o-mini` - Fast, cost-effective +- `claude-3-5-sonnet` - Anthropic Claude +- `llama-3.1-70b-instruct` - Open-source alternative +- `llama-3.1-8b-instruct` - Lightweight, fast inference + +See the [Telnyx AI documentation](https://developers.telnyx.com/docs/ai/introduction) for the complete model list. + +## Using Telnyx LiteLLM Proxy + +For advanced use cases, Telnyx offers a LiteLLM proxy that provides: + +- Automatic fallback between models +- Load balancing across providers +- Unified rate limiting +- Custom model aliases + +Configure the proxy URL in your `config.toml`: + +```toml +[llm] +platform = "openai_chat" +url = "https://api.telnyx.com/v2/ai/chat/completions" +# Use any supported model +model = "claude-3-5-sonnet" +``` + +## Troubleshooting + +### Authentication Errors + +Verify your API key is set correctly: + +```bash +echo $TELNYX_API_KEY +``` + +### Model Not Found + +Check available models in your Telnyx Portal or consult the [API documentation](https://developers.telnyx.com/docs/ai/introduction). + +### High Latency + +Telnyx routes requests to the nearest edge location automatically. If you experience latency issues, verify your network connection or contact Telnyx support. + +## Additional Resources + +- [Telnyx AI Documentation](https://developers.telnyx.com/docs/ai/introduction) +- [Telnyx API Reference](https://developers.telnyx.com/docs/api/v2/overview) +- [EchoKit Documentation](https://echokit.dev/docs/quick-start/) +- [Telnyx Discord Community](https://discord.gg/telnyx) + +## License + +This integration example is provided under the same license as EchoKit Server. diff --git a/examples/telnyx/config.toml b/examples/telnyx/config.toml new file mode 100644 index 0000000..a9a0925 --- /dev/null +++ b/examples/telnyx/config.toml @@ -0,0 +1,64 @@ +# EchoKit Server Configuration with Telnyx +# +# This example demonstrates using Telnyx AI services with EchoKit Server. +# Telnyx offers 53 AI models via an OpenAI-compatible inference API, +# making it a natural fit for EchoKit's OpenAI-spec architecture. +# +# Key benefits: +# - Single API key for LLM, ASR, and TTS +# - Global edge network for low-latency inference +# - 53+ AI models including GPT, Claude, Llama, and open-source options +# - Pay-per-use pricing with no minimum commitments +# +# Setup: +# 1. Create a Telnyx account at https://telnyx.com +# 2. Generate an API key from the Portal +# 3. Set your TELNYX_API_KEY environment variable +# +# API Documentation: https://developers.telnyx.com + +addr = "0.0.0.0:8080" +hello_wav = "hello.wav" + +[asr] +platform = "openai" +url = "https://api.telnyx.com/v2/ai/transcriptions" +api_key = "${TELNYX_API_KEY}" +model = "whisper-1" +lang = "en" + +[tts] +platform = "openai" +url = "https://api.telnyx.com/v2/ai/speech" +model = "tts-1" +api_key = "${TELNYX_API_KEY}" +voice = "alloy" + +[llm] +platform = "openai_chat" +url = "https://api.telnyx.com/v2/ai/chat/completions" +api_key = "${TELNYX_API_KEY}" +model = "gpt-4o-mini" +history = 5 + +[[llm.sys_prompts]] +role = "system" +content = """ +You are a helpful assistant. Answer truthfully and concisely. Always answer in English. + +- NEVER use bullet points +- NEVER use tables +- Answer in complete English sentences as if you are in a conversation. + +""" + +# Alternative: Use Telnyx's LiteLLM proxy for unified access to 53+ models +# Uncomment the section below and comment out the [llm] section above +# +# [llm] +# platform = "openai_chat" +# url = "https://api.telnyx.com/v2/ai/chat/completions" +# api_key = "${TELNYX_API_KEY}" +# # Available models include: gpt-4o, claude-3-5-sonnet, llama-3.1-70b, and more +# model = "llama-3.1-70b-instruct" +# history = 5