|
5 | 5 | // Created by Firdavs Khaydarov on 01/08/2025. |
6 | 6 | // |
7 | 7 |
|
| 8 | +/// Model type to define OpenAI models. |
| 9 | +/// |
| 10 | +/// For more information about models and their capabilities visit https://platform.openai.com/docs/models |
8 | 11 | public typealias Model = String |
9 | 12 |
|
| 13 | +// MARK: - o-series |
| 14 | + |
| 15 | +extension Model { |
| 16 | + /// o1 models think before they answer, producing a long internal chain of thought before responding to the user. |
| 17 | + /// |
| 18 | + /// * Context window: 200,000 |
| 19 | + /// * Max output tokens: 100,000 |
| 20 | + public static let o1: Model = "o1" |
| 21 | + |
| 22 | + /// The o1-pro model uses more compute to think harder and provide consistently better answers. |
| 23 | + /// |
| 24 | + /// o1-pro is available in the Responses API only to enable support for multi-turn model |
| 25 | + /// interactions before responding to API requests, and other advanced API features in the future. |
| 26 | + /// |
| 27 | + /// * Context window: 200,000 |
| 28 | + /// * Max output tokens: 100,000 |
| 29 | + public static let o1_pro: Model = "o1-pro" |
| 30 | + |
| 31 | + /// o3 is a well-rounded and powerful model across domains. |
| 32 | + /// |
| 33 | + /// Use it to think through multi-step problems that involve analysis across text, code, and images. |
| 34 | + /// |
| 35 | + /// * Context window: 200,000 |
| 36 | + /// * Max output tokens: 100,000 |
| 37 | + public static let o3: Model = "o3" |
| 38 | + |
| 39 | + /// The o3-pro model uses more compute to think harder and provide consistently better answers. |
| 40 | + /// |
| 41 | + /// o3-pro is available in the Responses API only to enable support for multi-turn model |
| 42 | + /// interactions before responding to API requests, and other advanced API features in the future. |
| 43 | + /// Since o3-pro is designed to tackle tough problems, some requests may take several minutes to |
| 44 | + /// finish. To avoid timeouts, try using background mode. |
| 45 | + /// |
| 46 | + /// * Context window: 200,000 |
| 47 | + /// * Max output tokens: 100,000 |
| 48 | + public static let o3_pro: Model = "o3-pro" |
| 49 | + |
| 50 | + /// o3-mini is our newest small reasoning model, providing high intelligence at the same cost |
| 51 | + /// and latency targets of o1-mini. |
| 52 | + /// |
| 53 | + /// o3-mini supports key developer features, like Structured Outputs, function calling, and Batch API. |
| 54 | + /// |
| 55 | + /// * Context window: 200,000 |
| 56 | + /// * Max output tokens: 100,000 |
| 57 | + public static let o3_mini: Model = "o3-mini" |
| 58 | + |
| 59 | + /// o3-deep-research is the most advanced model for deep research, designed to tackle complex, |
| 60 | + /// multi-step research tasks. |
| 61 | + /// |
| 62 | + /// It can search and synthesize information from across the internet as well as from your own data—brought in through MCP connectors. |
| 63 | + /// |
| 64 | + /// * Context window: 200,000 |
| 65 | + /// * Max output tokens: 100,000 |
| 66 | + public static let o3_deep_research: Model = "o3-deep-research" |
| 67 | + |
| 68 | + /// o4-mini is the latest small o-series model. |
| 69 | + /// |
| 70 | + /// It's optimized for fast, effective reasoning with exceptionally efficient performance in |
| 71 | + /// coding and visual tasks. |
| 72 | + /// |
| 73 | + /// * Context window: 200,000 |
| 74 | + /// * Max output tokens: 100,000 |
| 75 | + public static let o4_mini: Model = "o4-mini" |
| 76 | + |
| 77 | + /// o4-mini-deep-research is a faster, more affordable deep research model—ideal for tackling |
| 78 | + /// complex, multi-step research tasks. |
| 79 | + /// |
| 80 | + /// It can search and synthesize information from across the internet as well as from your own |
| 81 | + /// data, brought in through MCP connectors. |
| 82 | + /// |
| 83 | + /// * Context window: 200,000 |
| 84 | + /// * Max output tokens: 100,000 |
| 85 | + public static let o4_mini_deep_research: Model = "o4-mini-deep-research" |
| 86 | +} |
| 87 | + |
10 | 88 | // MARK: - GPT-5 |
11 | 89 |
|
12 | 90 | extension Model { |
|
0 commit comments