22from pathlib import Path
33import os
44
5- from .. src . openize .markitdown .converters import WordConverter , PDFConverter , ExcelConverter , PowerPointConverter
6- from .. src . openize .markitdown .factory import ConverterFactory
7- from .. src . openize .markitdown .llm_strategy import SaveLocally , LLMFactory , OpenAIClient , ClaudeClient
8- from .. src . openize .markitdown .processor import DocumentProcessor
5+ from openize .markitdown .converters import WordConverter , PDFConverter , ExcelConverter , PowerPointConverter
6+ from openize .markitdown .factory import ConverterFactory
7+ from openize .markitdown .llm_strategy import SaveLocally , LLMFactory , OpenAIClient , ClaudeClient , MistralClient , GeminiClient
8+ from openize .markitdown .processor import DocumentProcessor
99
1010
1111@pytest .fixture
@@ -60,14 +60,14 @@ def test_insert_into_llm_openai(mocker, sample_md_file):
6060 mocker .patch ("openai.ChatCompletion.create" , return_value = {
6161 "choices" : [{"message" : {"content" : "Mocked OpenAI Response" }}]
6262 })
63- strategy = OpenAIClient (provider = "openai" )
63+ strategy = OpenAIClient ()
6464 strategy .process (sample_md_file )
6565
6666def test_insert_into_llm_claude (mocker , sample_md_file ):
6767 mock_anthropic = mocker .patch ("openize.markitdown.llm_strategy.Anthropic" )
6868 mock_client = mock_anthropic .return_value
6969 mock_client .messages .create .return_value .content = "Mocked Claude Response"
70- strategy = ClaudeClient (provider = "claude" )
70+ strategy = ClaudeClient ()
7171 strategy .process (sample_md_file )
7272
7373
@@ -76,7 +76,7 @@ def test_insert_into_llm_claude(mocker, sample_md_file):
7676def test_document_processor_local_conversion (mocker , sample_output_dir ):
7777 mock_converter = mocker .patch ("openize.markitdown.factory.ConverterFactory.get_converter" , return_value = WordConverter ())
7878 processor = DocumentProcessor (output_dir = sample_output_dir )
79- processor .process_document ("sample.docx" , insert_into_llm = False )
79+ processor .process_document ("test_input/ sample.docx" , insert_into_llm = False )
8080 output_file = sample_output_dir / "sample.md"
8181 assert output_file .exists ()
8282
@@ -85,8 +85,8 @@ def test_document_processor_with_llm_openai(mocker, sample_output_dir):
8585 mocker .patch ("openai.ChatCompletion.create" , return_value = {
8686 "choices" : [{"message" : {"content" : "LLM Output" }}]
8787 })
88- processor = DocumentProcessor (output_dir = sample_output_dir )
89- processor .process_document ("sample.docx" , insert_into_llm = True , llm_provider = "openai" )
88+ processor = DocumentProcessor (output_dir = sample_output_dir , llm_client_name = "openai" )
89+ processor .process_document ("test_input/ sample.docx" , insert_into_llm = True )
9090 output_file = sample_output_dir / "sample.md"
9191 assert output_file .exists ()
9292
@@ -95,8 +95,44 @@ def test_document_processor_with_llm_claude(mocker, sample_output_dir):
9595 mock_anthropic = mocker .patch ("openize.markitdown.llm_strategy.Anthropic" )
9696 mock_client = mock_anthropic .return_value
9797 mock_client .messages .create .return_value .content = "LLM Claude Output"
98- processor = DocumentProcessor (output_dir = sample_output_dir )
99- processor .process_document ("sample.docx" , insert_into_llm = True , llm_provider = "claude" )
98+ processor = DocumentProcessor (output_dir = sample_output_dir , llm_client_name = "claude" )
99+ processor .process_document ("test_input/ sample.docx" , insert_into_llm = True )
100100 output_file = sample_output_dir / "sample.md"
101101 assert output_file .exists ()
102102
103+ def test_insert_into_llm_gemini (mocker , sample_md_file ):
104+ mock_response = mocker .Mock ()
105+ mock_response .raise_for_status .return_value = None
106+ mock_response .json .return_value = {
107+ "candidates" : [
108+ {"content" : {"parts" : [{"text" : "Mocked Gemini Response" }]}}
109+ ]
110+ }
111+
112+ mocker .patch ("requests.post" , return_value = mock_response )
113+ mocker .patch .dict (os .environ , {
114+ "GEMINI_API_KEY" : "dummy_key" ,
115+ "GEMINI_MODEL" : "gemini-pro"
116+ })
117+
118+ client = GeminiClient ()
119+ client .process (sample_md_file )
120+ def test_insert_into_llm_mistral (mocker , sample_md_file ):
121+ mock_response = mocker .Mock ()
122+ mock_response .raise_for_status .return_value = None
123+ mock_response .json .return_value = {
124+ "choices" : [
125+ {"message" : {"content" : "Mocked Mistral Response" }}
126+ ]
127+ }
128+
129+ mocker .patch ("requests.post" , return_value = mock_response )
130+ mocker .patch .dict (os .environ , {
131+ "MISTRAL_API_KEY" : "dummy_key" ,
132+ "MISTRAL_MODEL" : "mistral-medium"
133+ })
134+
135+ client = MistralClient ()
136+ client .process (sample_md_file )
137+
138+
0 commit comments