Skip to content

Commit d341e55

Browse files
authored
Merge pull request #125 from vicsanity623/vicsanity623-patch-1
Vicsanity623 patch 1
2 parents a1e9f05 + 97b0e54 commit d341e55

3 files changed

Lines changed: 71 additions & 21 deletions

File tree

src/pyob/entrance.py

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -492,22 +492,42 @@ def build_initial_analysis(self):
492492
proj_prompt, lambda t: len(t) > 5, context="Project Genesis"
493493
).strip()
494494
content = f"# Project Analysis\n\n**Project Summary:**\n{project_summary}\n\n---\n\n## 📂 File Directory\n\n"
495+
file_structures = {}
495496
for f_path in all_files:
496497
rel = os.path.relpath(f_path, self.target_dir)
497-
logger.info(f"Deep Symbolic Parsing: {rel}")
498498
with open(f_path, "r", encoding="utf-8", errors="ignore") as f:
499499
full_code = f.read()
500500
self.update_ledger_for_file(rel, full_code)
501-
structure_dropdowns = self.code_parser.generate_structure_dropdowns(
501+
file_structures[rel] = self.code_parser.generate_structure_dropdowns(
502502
f_path, full_code
503503
)
504-
sum_prompt = f"Provide a one-sentence plain text summary of what the file `{rel}` does. \n\nCRITICAL: Do NOT include any HTML tags, <details> blocks, or code signatures in your response. Just the sentence.\n\nFile Structure for context:\n{structure_dropdowns}"
505-
desc = self.llm_engine.get_valid_llm_response(
506-
sum_prompt, lambda t: "<details>" not in t and len(t) > 5, context=rel
507-
).strip()
508-
content += (
509-
f"### `{rel}`\n**Summary:** {desc}\n\n{structure_dropdowns}\n---\n"
510-
)
504+
505+
logger.info(f"Batching Deep Symbolic Parsing for {len(all_files)} files...")
506+
batch_prompt = "Provide a succinct one-sentence plain text summary for EACH of the following files based on their structure. CRITICAL: Output MUST be strictly in 'filepath: summary' format on each line. Do NOT include any HTML tags, markdown, <details> blocks, or code signatures.\n\n"
507+
for rel_path, struct in file_structures.items():
508+
batch_prompt += f"File `{rel_path}` Structure:\n{struct}\n\n"
509+
510+
batch_response = self.llm_engine.get_valid_llm_response(
511+
batch_prompt, lambda t: ":" in t and len(t) > 10, context="Batch Genesis"
512+
).strip()
513+
514+
summaries = {}
515+
for line in batch_response.splitlines():
516+
if ":" in line:
517+
path_part, summary_part = line.split(":", 1)
518+
path_clean = path_part.replace("`", "").replace("*", "").strip()
519+
summaries[path_clean] = summary_part.strip()
520+
521+
for f_path in all_files:
522+
rel = os.path.relpath(f_path, self.target_dir)
523+
structured_summary = summaries.get(rel, "No summary generated.")
524+
if structured_summary == "No summary generated.":
525+
for k, v in summaries.items():
526+
if k in rel or rel in k:
527+
structured_summary = v
528+
break
529+
content += f"### `{rel}`\n**Summary:** {structured_summary}\n\n{file_structures[rel]}\n---\n"
530+
511531
with open(self.analysis_path, "w", encoding="utf-8") as f:
512532
f.write(content)
513533
self.save_ledger()

src/pyob/get_valid_edit.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,9 @@ def _fetch_llm_with_retries(
9090
self, prompt: str, display_name: str, attempts: int
9191
) -> tuple[str, int]:
9292
is_cloud = (
93-
os.environ.get("GITHUB_ACTIONS") == "true" or os.environ.get("CI") == "true"
93+
os.environ.get("GITHUB_ACTIONS") == "true"
94+
or os.environ.get("CI") == "true"
95+
or "GITHUB_RUN_ID" in os.environ
9496
)
9597
key_cooldowns = getattr(self, "key_cooldowns", {})
9698

@@ -128,15 +130,15 @@ def _fetch_llm_with_retries(
128130
prompt, key=None, context=display_name, gh_model=gh_model
129131
)
130132
else:
131-
logger.info("\n[Attempting Local Ollama]")
133+
logger.info("\n[All keys exhausted. Falling back to Local Ollama]")
132134
response = getattr(self, "_stream_single_llm")(
133135
prompt, key=None, context=display_name
134136
)
135137

136138
if response.startswith("ERROR_CODE_429"):
137139
if key:
138-
key_cooldowns[key] = time.time() + 60
139-
logger.warning("Key rate limited. Rotating instantly...")
140+
key_cooldowns[key] = time.time() + 120
141+
logger.warning("Key rate limited. Pivoting to next key...")
140142
attempts += 1
141143
continue
142144
elif "RateLimitReached" in response:
@@ -165,6 +167,17 @@ def _fetch_llm_with_retries(
165167
continue
166168

167169
if response.startswith("ERROR_CODE_") or not response.strip():
170+
if key and "429" not in (response or ""):
171+
key_cooldowns[key] = time.time() + 10
172+
173+
if available_keys:
174+
logger.warning(
175+
f"Engine failed with error: {str(response)[:60]}... Rotating..."
176+
)
177+
attempts += 1
178+
time.sleep(2)
179+
continue
180+
168181
logger.warning("API Error or Empty Response. Sleeping 60s...")
169182
time.sleep(60)
170183
attempts += 1

src/pyob/models.py

Lines changed: 25 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -259,19 +259,18 @@ def get_valid_llm_response_engine(
259259
or os.environ.get("CI") == "true"
260260
or "GITHUB_RUN_ID" in os.environ
261261
)
262-
all_keys = list(key_cooldowns.keys())
263-
264262
while True:
265263
key = None
266264
now = time.time()
267265
# available_keys are Gemini keys
268-
available_keys = [k for k in all_keys if now > key_cooldowns[k]]
266+
gemini_keys = [k for k in list(key_cooldowns.keys()) if "github" not in k]
267+
available_keys = [k for k in gemini_keys if now > key_cooldowns[k]]
269268
response_text = None
270269

271270
if available_keys:
272271
key = available_keys[attempts % len(available_keys)]
273272
logger.info(
274-
f"Attempting Gemini Key {attempts % len(available_keys) + 1}/{len(available_keys)}"
273+
f"Attempting Gemini Key {attempts % len(available_keys) + 1}/{len(gemini_keys)}"
275274
)
276275
response_text = stream_single_llm(prompt, key=key, context=context)
277276
elif is_cloud:
@@ -290,15 +289,18 @@ def get_valid_llm_response_engine(
290289
prompt, key=None, context=context, gh_model="Llama-3"
291290
)
292291
else:
293-
logger.info(" Using Local Ollama Engine...")
292+
logger.info(
293+
" All Gemini keys exhausted. Falling back to Local Ollama Engine..."
294+
)
294295
response_text = stream_single_llm(prompt, key=None, context=context)
295296

296297
# --- ERROR HANDLING BLOCK ---
297298
if not response_text or response_text.startswith("ERROR_CODE_"):
298299
# 1. Handle Gemini 429 (Minute limits)
299300
if key and response_text and "429" in response_text:
300-
key_cooldowns[key] = time.time() + 60
301-
logger.warning(f"Key {key[-4:]} rate-limited. Rotating...")
301+
key_cooldowns[key] = time.time() + 120
302+
logger.warning(f"Key {key[-4:]} rate-limited. Pivoting to next key...")
303+
attempts += 1
302304
# Immediate pivot attempt for this loop
303305
if is_cloud:
304306
logger.warning(
@@ -307,6 +309,8 @@ def get_valid_llm_response_engine(
307309
response_text = stream_single_llm(
308310
prompt, key=None, context=context, gh_model="Llama-3"
309311
)
312+
else:
313+
continue
310314

311315
# 2. Handle GitHub 429 (Daily Quota Limits)
312316
if (
@@ -345,8 +349,21 @@ def get_valid_llm_response_engine(
345349
prompt, key=None, context=context, gh_model="Phi-4"
346350
)
347351

348-
# 4. Final Fail-Safe Sleep
352+
# 4. Final Catch-All / Fail-Safe Sleep
349353
if not response_text or response_text.startswith("ERROR_CODE_"):
354+
if key and "429" not in (response_text or ""):
355+
key_cooldowns[key] = (
356+
time.time() + 10
357+
) # Short cooldown for unknown errors prevent tight loops
358+
359+
if available_keys:
360+
logger.warning(
361+
f"Engine failed with error: {str(response_text)[:60]}... Rotating..."
362+
)
363+
attempts += 1
364+
time.sleep(2)
365+
continue
366+
350367
wait = 90
351368
logger.warning(
352369
f"All Engines failed or exhausted. Sleeping {wait}s for refill..."

0 commit comments

Comments
 (0)