@@ -14,6 +14,9 @@ import (
1414 openai "github.com/sashabaranov/go-openai"
1515)
1616
17+ const testLMStudioModel = "test-org/test-model"
18+ const testBaseURL = "http://example.invalid/v1"
19+
1720// TestNewClient проверяет корректное создание клиента LLM
1821func TestNewClient (t * testing.T ) {
1922 tests := []struct {
@@ -27,7 +30,7 @@ func TestNewClient(t *testing.T) {
2730 }{
2831 {
2932 name : "valid client creation" ,
30- baseURL : "http://localhost:1234/v1" ,
33+ baseURL : testBaseURL ,
3134 model : "gpt-4" ,
3235 apiKey : "test-api-key" ,
3336 retryCount : 3 ,
@@ -117,6 +120,9 @@ func TestSendRequest_CreatesRequest(t *testing.T) {
117120 for _ , tt := range tests {
118121 t .Run (tt .name , func (t * testing.T ) {
119122 client := NewClient (tt .baseURL , tt .model , tt .apiKey , tt .retry )
123+ client .doRequest = func (_ context.Context , _ openai.ChatCompletionRequest ) (openai.ChatCompletionResponse , error ) {
124+ return openai.ChatCompletionResponse {}, errors .New ("mock transport failure" )
125+ }
120126
121127 messages := []openai.ChatCompletionMessage {
122128 {
@@ -183,6 +189,10 @@ func TestSendRequest_ContextCancelled(t *testing.T) {
183189 for _ , tt := range tests {
184190 t .Run (tt .name , func (t * testing.T ) {
185191 client := NewClient (tt .baseURL , tt .model , tt .apiKey , tt .retry )
192+ client .doRequest = func (ctx context.Context , _ openai.ChatCompletionRequest ) (openai.ChatCompletionResponse , error ) {
193+ <- ctx .Done ()
194+ return openai.ChatCompletionResponse {}, ctx .Err ()
195+ }
186196
187197 messages := []openai.ChatCompletionMessage {
188198 {
@@ -247,6 +257,9 @@ func TestSendRequest_ExhaustedRetries(t *testing.T) {
247257 for _ , tt := range tests {
248258 t .Run (tt .name , func (t * testing.T ) {
249259 client := NewClient (tt .baseURL , tt .model , tt .apiKey , tt .retry )
260+ client .doRequest = func (_ context.Context , _ openai.ChatCompletionRequest ) (openai.ChatCompletionResponse , error ) {
261+ return openai.ChatCompletionResponse {}, errors .New ("mock transport failure" )
262+ }
250263
251264 messages := []openai.ChatCompletionMessage {
252265 {
@@ -471,7 +484,7 @@ func TestSendRequestWithMetrics_ExplicitModelOverridesDefault(t *testing.T) {
471484}
472485
473486func TestNewEmbedder (t * testing.T ) {
474- embedder := NewEmbedder ("http://localhost:1234/v1" , "embed-model" , "key" , 2 )
487+ embedder := NewEmbedder (testBaseURL , "embed-model" , "key" , 2 )
475488 if embedder .Model () != "embed-model" {
476489 t .Fatalf ("Model() = %q, want embed-model" , embedder .Model ())
477490 }
@@ -601,10 +614,10 @@ func TestResolveAutoContextLength_UsesPathAPIModels(t *testing.T) {
601614 _ = json .NewEncoder (w ).Encode (map [string ]any {
602615 "data" : []map [string ]any {
603616 {
604- "id" : "qwen/qwen3.5-9b" ,
617+ "id" : testLMStudioModel ,
605618 "loaded_instances" : []map [string ]any {
606619 {
607- "id" : "qwen/qwen3.5-9b" ,
620+ "id" : testLMStudioModel ,
608621 "config" : map [string ]any {
609622 "context_length" : 32768 ,
610623 },
@@ -622,7 +635,7 @@ func TestResolveAutoContextLength_UsesPathAPIModels(t *testing.T) {
622635 }))
623636 defer server .Close ()
624637
625- client := NewClient (server .URL + "/lm/v1" , "qwen/qwen3.5-9b" , "" , 0 )
638+ client := NewClient (server .URL + "/lm/v1" , testLMStudioModel , "" , 0 )
626639 value , err := client .ResolveAutoContextLength (context .Background ())
627640 if err != nil {
628641 t .Fatalf ("ResolveAutoContextLength() error = %v" , err )
@@ -651,10 +664,10 @@ func TestResolveAutoContextLength_FallsBackToRootAPIModels(t *testing.T) {
651664 _ = json .NewEncoder (w ).Encode (map [string ]any {
652665 "data" : []map [string ]any {
653666 {
654- "id" : "qwen/qwen3.5-9b" ,
667+ "id" : testLMStudioModel ,
655668 "loaded_instances" : []map [string ]any {
656669 {
657- "id" : "qwen/qwen3.5-9b" ,
670+ "id" : testLMStudioModel ,
658671 "config" : map [string ]any {
659672 "context_length" : 65536 ,
660673 },
@@ -669,7 +682,7 @@ func TestResolveAutoContextLength_FallsBackToRootAPIModels(t *testing.T) {
669682 }))
670683 defer server .Close ()
671684
672- client := NewClient (server .URL + "/nested/v1" , "qwen/qwen3.5-9b" , "" , 0 )
685+ client := NewClient (server .URL + "/nested/v1" , testLMStudioModel , "" , 0 )
673686 value , err := client .ResolveAutoContextLength (context .Background ())
674687 if err != nil {
675688 t .Fatalf ("ResolveAutoContextLength() error = %v" , err )
@@ -687,18 +700,18 @@ func TestResolveAutoContextLength_FallsBackToRootAPIModels(t *testing.T) {
687700
688701func TestResolveAutoContextLength_WarmupWhenModelNotLoaded (t * testing.T ) {
689702 var modelsCalls int
690- client := NewClient ("http://localhost:1234/v1" , "qwen/qwen3.5-9b" , "" , 0 )
703+ client := NewClient (testBaseURL , testLMStudioModel , "" , 0 )
691704
692705 client .doHTTP = func (_ * http.Request ) (* http.Response , error ) {
693706 modelsCalls ++
694707 if modelsCalls == 1 {
695- body := `{"models":[{"key":"qwen/qwen3.5-9b ","loaded_instances":[]}]}`
708+ body := `{"models":[{"key":"` + testLMStudioModel + ` ","loaded_instances":[]}]}`
696709 return & http.Response {
697710 StatusCode : http .StatusOK ,
698711 Body : ioNopCloser (strings .NewReader (body )),
699712 }, nil
700713 }
701- body := `{"models":[{"key":"qwen/qwen3.5-9b ","loaded_instances":[{"id":"qwen/qwen3.5-9b ","config":{"context_length":150000}}]}]}`
714+ body := `{"models":[{"key":"` + testLMStudioModel + ` ","loaded_instances":[{"id":"` + testLMStudioModel + ` ","config":{"context_length":150000}}]}]}`
702715 return & http.Response {
703716 StatusCode : http .StatusOK ,
704717 Body : ioNopCloser (strings .NewReader (body )),
@@ -708,8 +721,8 @@ func TestResolveAutoContextLength_WarmupWhenModelNotLoaded(t *testing.T) {
708721 var warmupCalls int
709722 client .doRequest = func (_ context.Context , req openai.ChatCompletionRequest ) (openai.ChatCompletionResponse , error ) {
710723 warmupCalls ++
711- if req .Model != "qwen/qwen3.5-9b" {
712- t .Fatalf ("warmup req.Model = %q, want qwen/qwen3.5-9b " , req .Model )
724+ if req .Model != testLMStudioModel {
725+ t .Fatalf ("warmup req.Model = %q, want %s " , req .Model , testLMStudioModel )
713726 }
714727 return openai.ChatCompletionResponse {
715728 Choices : []openai.ChatCompletionChoice {
@@ -744,10 +757,10 @@ func TestResolveAutoContextLength_ParsesModelsFieldAndKey(t *testing.T) {
744757 _ = json .NewEncoder (w ).Encode (map [string ]any {
745758 "models" : []map [string ]any {
746759 {
747- "key" : "qwen/qwen3.5-9b" ,
760+ "key" : testLMStudioModel ,
748761 "loaded_instances" : []map [string ]any {
749762 {
750- "id" : "qwen/qwen3.5-9b" ,
763+ "id" : testLMStudioModel ,
751764 "config" : map [string ]any {
752765 "context_length" : 150000 ,
753766 },
@@ -759,7 +772,7 @@ func TestResolveAutoContextLength_ParsesModelsFieldAndKey(t *testing.T) {
759772 }))
760773 defer server .Close ()
761774
762- client := NewClient (server .URL + "/v1" , "qwen/qwen3.5-9b" , "" , 0 )
775+ client := NewClient (server .URL + "/v1" , testLMStudioModel , "" , 0 )
763776 value , err := client .ResolveAutoContextLength (context .Background ())
764777 if err != nil {
765778 t .Fatalf ("ResolveAutoContextLength() error = %v" , err )
@@ -770,7 +783,7 @@ func TestResolveAutoContextLength_ParsesModelsFieldAndKey(t *testing.T) {
770783}
771784
772785func TestResolveAutoContextLength_UsesProbeFromError (t * testing.T ) {
773- client := NewClient ("http://localhost:1234/v1" , "test-model" , "" , 0 )
786+ client := NewClient (testBaseURL , "test-model" , "" , 0 )
774787 client .doHTTP = func (_ * http.Request ) (* http.Response , error ) {
775788 return & http.Response {
776789 StatusCode : http .StatusNotFound ,
@@ -791,7 +804,7 @@ func TestResolveAutoContextLength_UsesProbeFromError(t *testing.T) {
791804}
792805
793806func TestResolveAutoContextLength_ReturnsErrorWhenUndetected (t * testing.T ) {
794- client := NewClient ("http://localhost:1234/v1" , "test-model" , "" , 0 )
807+ client := NewClient (testBaseURL , "test-model" , "" , 0 )
795808 client .doHTTP = func (_ * http.Request ) (* http.Response , error ) {
796809 return & http.Response {
797810 StatusCode : http .StatusOK ,
0 commit comments