Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 8568f9a

Browse files
authored
chore: update benchmark using cortex client (cortex-node) (#787)
1 parent a5f04e1 commit 8568f9a

File tree

7 files changed

+12
-16
lines changed

7 files changed

+12
-16
lines changed

cortex-js/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@
5353
"decompress": "^4.2.1",
5454
"js-yaml": "^4.1.0",
5555
"nest-commander": "^3.13.0",
56-
"openai": "^4.50.0",
5756
"readline": "^1.3.0",
5857
"reflect-metadata": "^0.2.0",
5958
"rxjs": "^7.8.1",
@@ -81,6 +80,7 @@
8180
"@typescript-eslint/eslint-plugin": "^6.0.0",
8281
"@typescript-eslint/parser": "^6.0.0",
8382
"bun": "^1.1.15",
83+
"cortexso-node": "^0.0.4",
8484
"cpx": "^1.5.0",
8585
"eslint": "^8.42.0",
8686
"eslint-config-prettier": "^9.0.0",

cortex-js/src/domain/models/assistant.interface.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
import { Assistant as OpenAiAssistant } from 'openai/resources/beta/assistants';
2-
import { AssistantResponseFormatOption as OpenAIAssistantResponseFormatOption } from 'openai/resources/beta/threads/threads';
1+
import { Assistant as OpenAiAssistant } from 'cortexso-node/resources/beta/assistants';
2+
import { AssistantResponseFormatOption as OpenAIAssistantResponseFormatOption } from 'cortexso-node/resources/beta/threads/threads';
33

44
export interface Assistant extends OpenAiAssistant {
55
avatar?: string;

cortex-js/src/domain/models/message.interface.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import {
22
Message as OpenAiMessage,
33
MessageContent as OpenAiMessageContent,
44
TextContentBlock as OpenAiTextContentBlock,
5-
} from 'openai/resources/beta/threads/messages';
5+
} from 'cortexso-node/resources/beta/threads/messages';
66

77
export interface Message extends OpenAiMessage {}
88

cortex-js/src/domain/models/model.interface.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { Model as OpenAiModel } from 'openai/resources/models';
1+
import { Model as OpenAiModel } from 'cortexso-node/resources/models';
22

33
export interface Model
44
extends OpenAiModel,

cortex-js/src/domain/models/thread.interface.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { Thread as OpenAiThread } from 'openai/resources/beta/threads/threads';
1+
import { Thread as OpenAiThread } from 'cortexso-node/resources/beta/threads/threads';
22
import { Assistant } from './assistant.interface';
33

44
export interface ThreadToolResources extends OpenAiThread.ToolResources {}

cortex-js/src/infrastructure/commanders/types/benchmark-config.interface.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { ChatCompletionMessageParam } from 'openai/resources';
1+
import { ChatCompletionMessageParam } from 'cortexso-node/resources';
22

33
export interface ApiConfig {
44
base_url: string;

cortex-js/src/infrastructure/commanders/usecases/benchmark.cli.usecases.ts

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { Injectable } from '@nestjs/common';
22
import si from 'systeminformation';
33
import fs, { existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs';
4-
import OpenAI from 'openai';
4+
import Cortex from 'cortexso-node';
55
import { Presets, SingleBar } from 'cli-progress';
66
import yaml from 'js-yaml';
77
import { FileManagerService } from '@/infrastructure/services/file-manager/file-manager.service';
@@ -27,7 +27,7 @@ export class BenchmarkCliUsecases {
2727
) {}
2828

2929
config: BenchmarkConfig;
30-
openai?: OpenAI;
30+
cortexClient?: Cortex;
3131
/**
3232
* Benchmark and analyze the performance of a specific AI model using a variety of system resources
3333
*/
@@ -43,7 +43,7 @@ export class BenchmarkCliUsecases {
4343

4444
const model = params?.model ?? this.config.api.parameters.model;
4545
// TODO: Using OpenAI client or Cortex client to benchmark?
46-
this.openai = new OpenAI({
46+
this.cortexClient = new Cortex({
4747
apiKey: this.config.api.api_key,
4848
baseURL: this.config.api.base_url,
4949
timeout: 20 * 1000,
@@ -60,11 +60,7 @@ export class BenchmarkCliUsecases {
6060
.then(() =>
6161
this.psUsecases
6262
.getModels()
63-
.then((models) =>
64-
models.find(
65-
(e) => e.modelId === model,
66-
),
67-
),
63+
.then((models) => models.find((e) => e.modelId === model)),
6864
)
6965
.then((model) => {
7066
if (!model)
@@ -147,7 +143,7 @@ export class BenchmarkCliUsecases {
147143
let firstTokenTime = null;
148144

149145
try {
150-
const stream = await this.openai!.chat.completions.create({
146+
const stream = await this.cortexClient!.chat.completions.create({
151147
model: this.config.api.parameters.model,
152148
messages: this.config.api.parameters.messages,
153149
max_tokens: this.config.api.parameters.max_tokens,

0 commit comments

Comments
 (0)