Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit e3dfea3

Browse files
committed
fix: e2e tests
1 parent 282aae3 commit e3dfea3

14 files changed

+35
-63
lines changed

docs/static/openapi/cortex.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2754,7 +2754,7 @@
27542754
},
27552755
"version": {
27562756
"type": "string",
2757-
"example": "0.1.35-28.10.24"
2757+
"example": "b4920"
27582758
}
27592759
}
27602760
}
@@ -2763,11 +2763,11 @@
27632763
{
27642764
"engine": "llama-cpp",
27652765
"name": "mac-arm64",
2766-
"version": "0.1.35-28.10.24"
2766+
"version": "b4920"
27672767
},
27682768
{
27692769
"engine": "llama-cpp",
2770-
"name": "linux-amd64-avx",
2770+
"name": "linux-avx-x64",
27712771
"version": "0.1.35-27.10.24"
27722772
}
27732773
]
@@ -2901,7 +2901,7 @@
29012901
"name": {
29022902
"type": "string",
29032903
"description": "The name of the variant, including OS, architecture, and capabilities",
2904-
"example": "linux-amd64-avx-cuda-11-7"
2904+
"example": "linux-avx-x64-cuda-11-7"
29052905
},
29062906
"created_at": {
29072907
"type": "string",
@@ -2973,7 +2973,7 @@
29732973
},
29742974
"name": {
29752975
"type": "string",
2976-
"example": "0.1.39-linux-amd64-avx-cuda-11-7"
2976+
"example": "llama-b4920-bin-linux-avx-cuda-cu11.7"
29772977
},
29782978
"size": {
29792979
"type": "integer",
@@ -3250,7 +3250,7 @@
32503250
},
32513251
"version": {
32523252
"type": "string",
3253-
"example": "0.1.35-28.10.24"
3253+
"example": "b4920"
32543254
}
32553255
}
32563256
}

engine/e2e-test/api/engines/test_api_engine.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,14 @@ def test_engines_get_llamacpp_should_be_successful(self):
2828

2929
# engines install
3030
def test_engines_install_llamacpp_specific_version_and_variant(self):
31-
data = {"version": "v0.1.40-b4354", "variant": "linux-amd64-avx"}
31+
data = {"version": "b4920", "variant": "linux-avx-x64"}
3232
response = requests.post(
3333
"http://localhost:3928/v1/engines/llama-cpp/install", json=data
3434
)
3535
assert response.status_code == 200
3636

3737
def test_engines_install_llamacpp_specific_version_and_null_variant(self):
38-
data = {"version": "v0.1.40-b4354"}
38+
data = {"version": "b4920"}
3939
response = requests.post(
4040
"http://localhost:3928/v1/engines/llama-cpp/install", json=data
4141
)
@@ -55,14 +55,14 @@ async def test_engines_install_uninstall_llamacpp_should_be_successful(self):
5555
@pytest.mark.asyncio
5656
async def test_engines_install_uninstall_llamacpp_with_only_version_should_be_failed(self):
5757
# install first
58-
data = {"variant": "mac-arm64"}
58+
data = {"variant": "linux-avx-x64"}
5959
install_response = requests.post(
6060
"http://127.0.0.1:3928/v1/engines/llama-cpp/install", json=data
6161
)
6262
await wait_for_websocket_download_success_event(timeout=120)
6363
assert install_response.status_code == 200
6464

65-
data = {"version": "v0.1.35"}
65+
data = {"version": "b4920"}
6666
response = requests.delete(
6767
"http://localhost:3928/v1/engines/llama-cpp/install", json=data
6868
)
@@ -72,7 +72,7 @@ async def test_engines_install_uninstall_llamacpp_with_only_version_should_be_fa
7272
@pytest.mark.asyncio
7373
async def test_engines_install_uninstall_llamacpp_with_variant_should_be_successful(self):
7474
# install first
75-
data = {"variant": "mac-arm64"}
75+
data = {"variant": "linux-avx-x64"}
7676
install_response = requests.post(
7777
"http://127.0.0.1:3928/v1/engines/llama-cpp/install", json=data
7878
)
@@ -85,7 +85,7 @@ async def test_engines_install_uninstall_llamacpp_with_variant_should_be_success
8585
def test_engines_install_uninstall_llamacpp_with_specific_variant_and_version_should_be_successful(
8686
self,
8787
):
88-
data = {"variant": "mac-arm64", "version": "v0.1.35"}
88+
data = {"variant": "linux-avx-x64", "version": "b4920"}
8989
# install first
9090
install_response = requests.post(
9191
"http://localhost:3928/v1/engines/llama-cpp/install", json=data

engine/e2e-test/api/engines/test_api_engine_install_nightly.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import requests
33
from utils.test_runner import start_server, stop_server, get_latest_pre_release_tag
44

5-
latest_pre_release_tag = get_latest_pre_release_tag("menloresearch", "cortex.llamacpp")
5+
latest_pre_release_tag = get_latest_pre_release_tag("menloresearch", "llama.cpp")
66

77
class TestApiEngineInstall:
88

@@ -23,7 +23,7 @@ def test_engines_install_llamacpp_should_be_successful(self):
2323
assert response.status_code == 200
2424

2525
def test_engines_install_llamacpp_specific_version_and_variant(self):
26-
data = {"version": latest_pre_release_tag, "variant": "linux-amd64-avx"}
26+
data = {"version": latest_pre_release_tag, "variant": "linux-avx-x64"}
2727
response = requests.post(
2828
"http://localhost:3928/v1/engines/llama-cpp/install", json=data
2929
)

engine/e2e-test/api/engines/test_api_get_default_engine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ def setup_and_teardown(self):
2424
def test_api_get_default_engine_successfully(self):
2525
# Data test
2626
engine= "llama-cpp"
27-
name= "linux-amd64-avx"
28-
version= "v0.1.35-27.10.24"
27+
name= "linux-avx-x64"
28+
version= "b4920"
2929

3030
data = {"version": version, "variant": name}
3131
post_install_url = f"http://localhost:3928/v1/engines/{engine}/install"

engine/e2e-test/api/engines/test_api_get_list_engine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ def setup_and_teardown(self):
2424
def test_api_get_list_engines_successfully(self):
2525
# Data test
2626
engine= "llama-cpp"
27-
name= "linux-amd64-avx"
28-
version= "v0.1.35-27.10.24"
27+
name= "linux-avx-x64"
28+
version= "b4920"
2929

3030
post_install_url = f"http://localhost:3928/v1/engines/{engine}/install"
3131
response = requests.delete(

engine/e2e-test/api/engines/test_api_post_default_engine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def setup_and_teardown(self):
2323
def test_api_set_default_engine_successfully(self):
2424
# Data test
2525
engine= "llama-cpp"
26-
name= "linux-amd64-avx"
27-
version= "v0.1.35-27.10.24"
26+
name= "linux-avx-x64"
27+
version= "b4920"
2828

2929
data = {"version": version, "variant": name}
3030
post_install_url = f"http://localhost:3928/v1/engines/{engine}/install"

engine/e2e-test/api/hardware/test_api_get_hardware.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -88,25 +88,6 @@ def test_api_get_hardware_successfully(self):
8888
"example": True,
8989
"description": "Indicates if the GPU is currently activated."
9090
},
91-
"additional_information": {
92-
"type": "object",
93-
"properties": {
94-
"compute_cap": {
95-
"type": "string",
96-
"example": "8.6",
97-
"description": "The compute capability of the GPU."
98-
},
99-
"driver_version": {
100-
"type": "string",
101-
"example": "535.183",
102-
"description": "The version of the installed driver."
103-
}
104-
},
105-
"required": [
106-
"compute_cap",
107-
"driver_version"
108-
]
109-
},
11091
"free_vram": {
11192
"type": "integer",
11293
"example": 23983,
@@ -140,7 +121,6 @@ def test_api_get_hardware_successfully(self):
140121
},
141122
"required": [
142123
"activated",
143-
"additional_information",
144124
"free_vram",
145125
"id",
146126
"name",

engine/e2e-test/api/model/test_api_model.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@ async def test_models_start_stop_should_be_successful(self):
9595
time.sleep(30)
9696

9797
print("Pull model")
98+
requests.delete("http://localhost:3928/v1/models/tinyllama:1b")
9899
json_body = {"model": "tinyllama:1b"}
99100
response = requests.post("http://localhost:3928/v1/models/pull", json=json_body)
100101
assert response.status_code == 200, f"Failed to pull model: tinyllama:1b"

engine/e2e-test/cli/engines/test_cli_engine_install.py

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -31,25 +31,9 @@ def test_engines_install_llamacpp_should_be_successfully(self):
3131
assert len(response.json()) > 0
3232
assert exit_code == 0, f"Install engine failed with error: {error}"
3333

34-
@pytest.mark.skipif(reason="Ignore onnx-runtime test")
35-
def test_engines_install_onnx_on_macos_should_be_failed(self):
36-
exit_code, output, error = run(
37-
"Install Engine", ["engines", "install", "onnxruntime"]
38-
)
39-
assert "is not supported on" in output, "Should display error message"
40-
assert exit_code == 0, f"Install engine failed with error: {error}"
41-
42-
@pytest.mark.skipif(reason="Ignore tensorrt-llm test")
43-
def test_engines_install_onnx_on_tensorrt_should_be_failed(self):
44-
exit_code, output, error = run(
45-
"Install Engine", ["engines", "install", "tensorrt-llm"]
46-
)
47-
assert "is not supported on" in output, "Should display error message"
48-
assert exit_code == 0, f"Install engine failed with error: {error}"
49-
5034
@pytest.mark.skipif(platform.system() == "Windows", reason="Progress bar log issue on Windows")
5135
def test_engines_install_pre_release_llamacpp(self):
52-
engine_version = "v0.1.43"
36+
engine_version = "b4920"
5337
exit_code, output, error = run(
5438
"Install Engine",
5539
["engines", "install", "llama-cpp", "-v", engine_version],

engine/e2e-test/runner/cortex-llamacpp-e2e-nightly.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from api.engines.test_api_get_default_engine import TestApiDefaultEngine
2222
from api.engines.test_api_get_engine_release import TestApiEngineRelease
2323
from api.engines.test_api_get_engine_release_latest import TestApiEngineReleaseLatest
24-
from test_api_post_default_engine import TestApiSetDefaultEngine
24+
from api.engines.test_api_post_default_engine import TestApiSetDefaultEngine
2525
from api.model.test_api_model import TestApiModel
2626
from api.model.test_api_model_import import TestApiModelImport
2727
from api.files.test_api_create_file import TestApiCreateFile

0 commit comments

Comments
 (0)