-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapi.py
More file actions
175 lines (154 loc) · 6.25 KB
/
api.py
File metadata and controls
175 lines (154 loc) · 6.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
import os
import time
import asyncio
import traceback
import logging
from typing import List, Optional, Union, Any, Dict
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field
from llama_cpp import Llama, LlamaGrammar
import json
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler(), logging.FileHandler("/root/claude_tests/NEODEMO4/server.log")]
)
logger = logging.getLogger("inference-server")
app = FastAPI(title="Mistral-7B Structured Inference Server")
MODEL_PATH = "/root/claude_tests/NEODEMO4/model_assets/mistral-7b-v0.1.Q4_K_M.gguf"
# Global model instance
try:
llm = Llama(
model_path=MODEL_PATH,
n_ctx=2048, # Increased context
n_threads=4,
n_batch=512,
verbose=False
)
logger.info("Model loaded successfully.")
except Exception as e:
logger.error(f"Failed to load model: {e}")
raise
class InferenceRequest(BaseModel):
prompt: str
priority: str = Field(default="batch", pattern="^(interactive|batch)$")
max_tokens: int = 128
temperature: float = 0.7
format: str = Field(default="raw", pattern="^(raw|json)$")
json_schema: Optional[Dict[str, Any]] = None
class InferenceResponse(BaseModel):
text: str
structured_data: Optional[Dict[str, Any]] = None
metrics: Dict[str, Any]
priority_queue = asyncio.PriorityQueue()
interactive_waiting = False
async def worker():
global interactive_waiting
loop = asyncio.get_running_loop()
while True:
priority, _, req, fut, t_start = await priority_queue.get()
t_gen_start = time.time()
try:
def run_inference():
p = req.prompt
kwargs = {
"max_tokens": req.max_tokens,
"temperature": req.temperature,
"stop": ["</s>"],
"stream": True
}
if req.format == "json":
try:
if req.json_schema:
kwargs["grammar"] = LlamaGrammar.from_json_schema(json.dumps(req.json_schema))
else:
# Default highly-reliable JSON grammar
json_grammar = r'''
root ::= object
value ::= object | array | string | number | ("true" | "false" | "null") ws
object ::= "{" ws ( pair ( "," ws pair )* )? "}" ws
pair ::= string ":" ws value
array ::= "[" ws ( value ( "," ws value )* )? "]" ws
string ::= "\"" ( [^"\\\\] | "\\" ( ["\\\\/bfnrt] | "u" [0-9a-fA-F]{4} ) )* "\"" ws
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [+-]? [0-9]+)? ws
ws ::= ([ \t\n\r]*)
'''
kwargs["grammar"] = LlamaGrammar.from_string(json_grammar)
if "json" not in p.lower():
p += "\nOutput JSON format:"
except Exception as ge:
logger.error(f"Grammar error: {ge}")
gen = llm(prompt=p, **kwargs)
full, count = "", 0
for chunk in gen:
# Check for preemption
if req.priority == "batch" and interactive_waiting:
logger.info("Batch request pre-empted by interactive request.")
full += " [PREEMPTED]"
break
text = chunk["choices"][0]["text"]
full += text
count += 1
structured = None
if req.format == "json":
try:
structured = json.loads(full)
except:
pass
return {"text": full, "structured": structured, "count": count}
res = await loop.run_in_executor(None, run_inference)
fut.set_result({
"text": res["text"],
"structured": res["structured"],
"tokens": res["count"],
"g_t": time.time()-t_gen_start,
"q_t": t_gen_start-t_start
})
except Exception as e:
logger.error(f"Worker Error: {traceback.format_exc()}")
fut.set_exception(e)
finally:
priority_queue.task_done()
# Update waiting status
interactive_waiting = any(item[0] == 0 for item in priority_queue._queue)
@app.on_event("startup")
async def startup_event():
asyncio.create_task(worker())
@app.post("/generate", response_model=InferenceResponse)
async def generate(req: InferenceRequest):
global interactive_waiting
# 0 for interactive (high priority), 1 for batch
priority_val = 0 if req.priority == "interactive" else 1
if priority_val == 0:
interactive_waiting = True
fut = asyncio.get_running_loop().create_future()
t_entry = time.time()
# PriorityQueue stores (priority, timestamp, req, future, t_entry)
await priority_queue.put((priority_val, t_entry, req, fut, t_entry))
try:
r = await fut
return {
"text": r["text"],
"structured_data": r["structured"],
"metrics": {
"queue_time": r["q_t"],
"generation_time": r["g_t"],
"tokens": r["tokens"],
"tps": r["tokens"] / r["g_t"] if r["g_t"] > 0 else 0
}
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health():
return {"status": "ok", "model": "Mistral-7B-v0.1-GGUF", "device": "CPU"}
if __name__ == "__main__":
import uvicorn
# Auto-shutdown pattern (5 mins)
import threading, os
def shutdown():
time.sleep(300)
logger.info("Auto-shutdown triggered.")
os._exit(0)
threading.Thread(target=shutdown, daemon=True).start()
uvicorn.run(app, host="0.0.0.0", port=8000)