-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
662 lines (585 loc) · 26.8 KB
/
app.py
File metadata and controls
662 lines (585 loc) · 26.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
from __future__ import annotations
import json
from pathlib import Path
import streamlit as st
from engine.config import load_app_config
from engine.demo_cases import expected_overall_status_for_demo_case, featured_demo_cases
from engine.rendering import export_evaluation_payload
from engine.schemas import EvaluationResult, PARequest
from engine.service import ReadinessService, ServiceError
from engine.test_suites import run_cases
BASE_DIR = Path(__file__).resolve().parent
st.set_page_config(page_title="PA Readiness Copilot", layout="wide")
st.markdown(
"""
<style>
:root {
--ink: #1f2937;
--muted: #5f6b7a;
--line: #d8dee6;
--panel: #f5f8fb;
--ready: #166534;
--warn: #9a3412;
--stop: #991b1b;
--info: #1d4ed8;
}
.hero {
border: 1px solid var(--line);
border-radius: 18px;
padding: 1.2rem 1.25rem;
background: linear-gradient(135deg, #f9fbfd 0%, #eef4f8 100%);
margin-bottom: 1rem;
}
.hero h1 {
margin: 0 0 0.35rem 0;
color: var(--ink);
font-size: 2rem;
}
.hero p {
margin: 0.2rem 0;
color: var(--muted);
}
.status-panel {
border-radius: 16px;
border: 1px solid var(--line);
padding: 1rem 1.1rem;
margin-bottom: 1rem;
}
.status-ready {
background: #f1f8f2;
border-left: 6px solid var(--ready);
}
.status-not-ready {
background: #fff5ef;
border-left: 6px solid var(--warn);
}
.status-cannot-determine {
background: #fff4f4;
border-left: 6px solid var(--stop);
}
.status-unknown {
background: #f4f8ff;
border-left: 6px solid var(--info);
}
.scope-panel {
border-radius: 16px;
border: 1px solid var(--line);
background: var(--panel);
padding: 1rem 1.1rem;
}
.eyebrow {
font-size: 0.85rem;
text-transform: uppercase;
letter-spacing: 0.08em;
color: var(--muted);
margin-bottom: 0.3rem;
}
</style>
""",
unsafe_allow_html=True,
)
@st.cache_resource
def get_service() -> ReadinessService:
return ReadinessService(load_app_config(BASE_DIR))
service = get_service()
config = service.config
@st.cache_data(ttl=300)
def get_synthetic_eval_status() -> tuple[int, int, list[dict]]:
rows = run_cases(str(config.rules_path), str(config.synthetic_cases_path))
passed = sum(1 for row in rows if row.get("pass") == "✅")
return passed, len(rows), rows
def load_case_into_session(case: dict) -> None:
st.session_state["selected_demo_case_id"] = case["id"]
st.session_state["payer"] = case["payer"]
st.session_state["procedure_code"] = case["procedure_code"]
st.session_state["dx_codes"] = ", ".join(case.get("dx_codes", []))
st.session_state["site_of_care"] = case.get("site_of_care", "outpatient")
st.session_state["specialty"] = case.get("specialty", "unknown")
st.session_state["note_text"] = case.get("note_text", "")
def current_request() -> PARequest:
dx_codes = [item.strip() for item in st.session_state.get("dx_codes", "").split(",") if item.strip()]
return PARequest(
payer=st.session_state["payer"],
procedure_code=st.session_state["procedure_code"],
dx_codes=dx_codes,
site_of_care=st.session_state["site_of_care"],
specialty=st.session_state["specialty"],
note_text=st.session_state["note_text"],
)
def status_panel(evaluation: EvaluationResult) -> None:
status = evaluation.overall_status
klass = {
"READY": "status-ready",
"NOT_READY": "status-not-ready",
"CANNOT_DETERMINE": "status-cannot-determine",
}.get(status, "status-unknown")
summaries = {
"READY": (
"Administratively ready under the current versioned demo rules.",
"All required elements were explicitly documented and met threshold.",
),
"NOT_READY": (
"Not ready to submit under the current versioned demo rules.",
"At least one required element was documented but failed threshold.",
),
"CANNOT_DETERMINE": (
"Readiness cannot be determined from the documentation provided.",
"At least one required element was missing or not explicit enough for deterministic extraction.",
),
}
headline, detail = summaries.get(
status,
("Status unavailable.", "Unexpected status returned by the deterministic workflow."),
)
st.markdown(
f"""
<div class="status-panel {klass}">
<div class="eyebrow">Decision</div>
<strong>{status}</strong><br/>
<span>{headline}</span><br/>
<span>{detail}</span>
</div>
""",
unsafe_allow_html=True,
)
def render_requirement_result(result) -> None:
default_open = result.status != "MET"
icon = {"MET": "✅", "NOT_MET": "⚠️", "NOT_DOCUMENTED": "❌"}.get(result.status, "❓")
with st.expander(f"{icon} {result.label}", expanded=default_open):
c1, c2 = st.columns([1.2, 2])
with c1:
st.metric("Status", result.status)
with c2:
st.write(result.reason)
if result.evidence:
st.info(f"What the rule expects: {result.evidence}")
if result.evidence_snippets:
st.markdown("**Evidence found in the note**")
for snippet in result.evidence_snippets[:5]:
st.code(snippet, language="text")
else:
st.caption("No supporting snippet was captured for this requirement.")
if result.evidence_spans:
refs = [f"{span.start}-{span.end}" for span in result.evidence_spans[:5]]
st.caption(f"Normalized evidence references: {', '.join(refs)}")
def render_fact_card(label: str, value: object, status: str) -> None:
if value is None:
display = "Missing from note"
elif isinstance(value, bool):
display = "Documented" if value else "Explicitly denied or absent"
elif label.endswith("(weeks)"):
display = f"{value} weeks"
else:
display = str(value)
st.markdown(f"**{label}**")
st.write(display)
if status == "NOT_DOCUMENTED":
st.caption("Missing or not explicit enough for deterministic extraction.")
elif status == "NOT_MET":
st.caption("Documented, but below the current rule threshold.")
else:
st.caption("Captured and used in deterministic evaluation.")
def render_scope_panel() -> None:
st.markdown(
"""
<div class="scope-panel">
<div class="eyebrow">Scope</div>
<strong>This product checks administrative readiness only.</strong>
<p>It does not make clinical judgments, predict approval, review medical necessity, or take autonomous action.</p>
<p>Synthetic demo inputs only. Human review still sits before any real submission workflow.</p>
</div>
""",
unsafe_allow_html=True,
)
supported_procedures = service.list_supported_procedures()
payers = sorted({item.payer for item in supported_procedures})
procedures_by_payer = {payer: [item for item in supported_procedures if item.payer == payer] for payer in payers}
registry_rows = [
{
"payer": procedure.payer,
"procedure_code": procedure.procedure_code,
"display_name": procedure.display_name,
"category": procedure.metadata.category,
"rule_family": procedure.metadata.rule_family,
"trust": procedure.policy_trust_level.upper(),
"drift_monitored": "Yes" if procedure.monitored_for_drift else "No",
"rule_source": procedure.provenance.rule_source_label or procedure.provenance.source_name or "n/a",
"last_rule_update": procedure.metadata.last_rule_update or "n/a",
"last_reviewed": procedure.provenance.last_reviewed or "n/a",
}
for procedure in supported_procedures
]
if "last_eval_payload" not in st.session_state:
st.session_state["last_eval_payload"] = None
if "letter_text" not in st.session_state:
st.session_state["letter_text"] = ""
if "letter_meta" not in st.session_state:
st.session_state["letter_meta"] = {}
if "ack_policy_drift" not in st.session_state:
st.session_state["ack_policy_drift"] = False
if "selected_demo_case_id" not in st.session_state:
st.session_state["selected_demo_case_id"] = None
if "payer" not in st.session_state:
st.session_state["payer"] = payers[0]
if "procedure_code" not in st.session_state:
st.session_state["procedure_code"] = procedures_by_payer[st.session_state["payer"]][0].procedure_code
if "dx_codes" not in st.session_state:
st.session_state["dx_codes"] = ""
if "site_of_care" not in st.session_state:
st.session_state["site_of_care"] = config.allowed_sites[0]
if "specialty" not in st.session_state:
st.session_state["specialty"] = ""
if "note_text" not in st.session_state:
st.session_state["note_text"] = ""
st.markdown(
"""
<div class="hero">
<h1>Prior Authorization Readiness Copilot</h1>
<p>Deterministic administrative readiness review for versioned payer rules and synthetic demo cases.</p>
<p>Narrow, explainable, auditable behavior. No clinical judgment. No approval prediction. No autonomous action.</p>
</div>
""",
unsafe_allow_html=True,
)
hero_cols = st.columns(3)
with hero_cols[0]:
st.metric("Supported procedures", len(supported_procedures))
with hero_cols[1]:
st.metric("Monitored policy sources", len(service.policy_sources))
with hero_cols[2]:
st.metric("Synthetic demo cases", len(service.demo_cases))
render_scope_panel()
with st.sidebar:
st.header("Quality Gates")
try:
passed, total, synthetic_rows = get_synthetic_eval_status()
if passed == total:
st.success(f"Synthetic eval suite: {passed}/{total}")
else:
st.error(f"Synthetic eval suite: {passed}/{total}")
st.caption("Coarse fixture-label regression. Exact output shapes are protected separately by acceptance snapshots.")
with st.expander("View synthetic evaluation details"):
failures = [row for row in synthetic_rows if row.get("pass") != "✅"]
if failures:
for failure in failures:
st.write(
f"- {failure['id']}: expected `{failure['expected']}`, got `{failure['predicted']}` ({failure['overall_status']})"
)
else:
st.write("All bundled synthetic cases matched expected labels.")
except Exception as exc: # pragma: no cover - defensive UI path
passed, total = 0, 0
synthetic_rows = []
st.error(f"Synthetic eval suite unavailable: {exc}")
tests_healthy = bool(total and passed == total)
st.header("Supported Scope")
for procedure in supported_procedures:
monitored = "Yes" if procedure.monitored_for_drift else "No"
st.caption(
f"{procedure.payer} | {procedure.procedure_code} | {procedure.metadata.category} | "
f"trust={procedure.policy_trust_level} | drift monitored={monitored}"
)
drift_report = service.get_drift_status()
rulebook_status = service.get_rulebook_status()
st.subheader("Governance Monitor")
st.caption("Configured monitored sources only. Drift detection is governance-only and never changes rules automatically.")
st.dataframe([source.model_dump(mode="json") for source in drift_report.sources], width="stretch")
if drift_report.any_review_required:
st.warning(
"One or more monitored sources require governance review because a policy diff was detected "
"or the monitoring baseline is stale or missing."
)
st.session_state["ack_policy_drift"] = st.checkbox(
"I acknowledge governance issues may make related demo outputs stale.",
value=st.session_state["ack_policy_drift"],
)
else:
st.success("No monitored-source drift or stale/missing baselines currently require review.")
st.session_state["ack_policy_drift"] = True
if drift_report.stale_source_count:
st.warning(
f"{drift_report.stale_source_count} monitored source(s) are stale relative to the configured check frequency. "
"This is a governance signal only; it does not auto-change rules."
)
policy_gate_block = drift_report.any_review_required and not st.session_state["ack_policy_drift"]
if not tests_healthy:
st.error("Evaluation is gated because the bundled synthetic regression suite is not fully green.")
st.subheader("Rulebook Governance")
st.caption("Versioned reviewed and active snapshots make rule promotion inspectable. Monitoring never promotes rules automatically.")
rulebook_rows = [
{
"release_id": release.release_id,
"stage": release.stage or "unassigned",
"rules_version": release.rules_version or "n/a",
"procedures": len(release.procedures),
"reviewed_at": release.reviewed_at or "n/a",
"runtime_match": "Yes" if release.runtime_matches else ("No" if release.runtime_matches is False else "n/a"),
}
for release in rulebook_status.releases
]
st.dataframe(rulebook_rows, width="stretch")
if rulebook_status.validation_errors:
st.error("Rulebook validation errors detected.")
for item in rulebook_status.validation_errors:
st.write(f"- {item}")
else:
st.success(f"Active rulebook release: {rulebook_status.active_release_id or 'n/a'}")
with st.expander("Promotion workflow", expanded=False):
st.write("- Draft: candidate snapshot awaiting human review.")
st.write("- Reviewed: validated snapshot kept for comparison and audit.")
st.write("- Active: runtime rulebook intentionally promoted by a human. Drift monitoring never auto-promotes.")
st.subheader("Supported Procedure Registry")
st.caption("Compact view of the current deterministic scope, rule family, provenance label, and drift coverage.")
st.dataframe(registry_rows, width="stretch")
featured_cases = featured_demo_cases(config)
st.subheader("Featured Demo Cases")
st.caption("Seeded examples for live demos. Each one remains fully editable after loading.")
showcase_submitted = False
showcase_message = None
for start in range(0, len(featured_cases), 2):
cols = st.columns(2)
for col, case in zip(cols, featured_cases[start : start + 2]):
with col:
st.markdown(f"#### {case.showcase.get('title', case.id)}")
st.write(case.showcase.get("description", "Synthetic demo case."))
expected_status = expected_overall_status_for_demo_case(case)
if expected_status:
st.caption(f"Expected overall status: {expected_status}")
elif case.expected_label:
st.caption(f"Fixture label: {case.expected_label}")
if case.showcase.get("scenario_type"):
st.caption(f"Scenario: {case.showcase.get('scenario_type')}")
if case.showcase.get("tags"):
st.caption(f"Tags: {', '.join(case.showcase.get('tags', []))}")
st.caption(case.showcase.get("why_interesting", ""))
if st.button("Load Demo Case", key=f"case_{case.id}", width="stretch"):
load_case_into_session(case.model_dump(mode="json"))
showcase_submitted = tests_healthy and not policy_gate_block
if showcase_submitted:
showcase_message = (
"success",
f'Loaded "{case.showcase.get("title", case.id)}" and ran the evaluation.',
)
elif policy_gate_block:
showcase_message = (
"info",
f'Loaded "{case.showcase.get("title", case.id)}". Acknowledge the drift gate to run it.',
)
else:
showcase_message = (
"info",
f'Loaded "{case.showcase.get("title", case.id)}". Resolve the synthetic eval gate to run it.',
)
if showcase_message:
getattr(st, showcase_message[0])(showcase_message[1])
st.subheader("Evaluate Request")
left, right = st.columns([1.5, 1])
with left:
with st.form("evaluation_form", clear_on_submit=False):
payer = st.selectbox("Payer", options=payers, key="payer")
procedure_options = procedures_by_payer[payer]
default_index = next(
(index for index, item in enumerate(procedure_options) if item.procedure_code == st.session_state.get("procedure_code")),
0,
)
selected_procedure = st.selectbox(
"Procedure",
options=procedure_options,
index=default_index,
format_func=lambda item: f"{item.procedure_code} | {item.display_name}",
key="procedure_selectbox",
)
st.session_state["procedure_code"] = selected_procedure.procedure_code
st.text_input("Diagnosis codes (comma-separated)", key="dx_codes", placeholder="e.g., M54.16, G47.33")
st.selectbox("Site of care", options=config.allowed_sites, key="site_of_care")
st.text_input("Ordering specialty", key="specialty", placeholder="e.g., Orthopedics")
st.text_area(
"Synthetic note text",
key="note_text",
height=220,
placeholder="Paste or edit a synthetic note here.",
)
submitted = st.form_submit_button(
"Run deterministic readiness review",
disabled=(not tests_healthy) or policy_gate_block,
)
with right:
current_supported = service.get_supported_procedure(
st.session_state["payer"],
st.session_state["procedure_code"],
)
st.markdown("#### Current Rule Summary")
st.caption(f"Category: {current_supported.metadata.category}")
st.caption(f"Rule family: {current_supported.metadata.rule_family}")
st.caption(f"Trust level: {current_supported.policy_trust_level.upper()}")
st.caption(f"Rule source: {current_supported.provenance.rule_source_label or current_supported.provenance.source_name or 'n/a'}")
st.caption(
f"Rule last updated: {current_supported.metadata.last_rule_update or 'n/a'} | "
f"Last reviewed: {current_supported.provenance.last_reviewed or 'n/a'}"
)
st.caption(
f"Drift monitoring: {'Configured' if current_supported.monitored_for_drift else 'Not configured for this procedure'}"
)
if current_supported.monitored_for_drift:
st.caption(
f"Monitored source: {current_supported.provenance.monitored_source_name or current_supported.provenance.monitored_source_id}"
)
for requirement in current_supported.requirements:
requirement_line = f"- {requirement.label} ({requirement.type})"
if requirement.min is not None:
requirement_line += f" | min={requirement.min:g}"
if requirement.allowed:
requirement_line += f" | allowed={', '.join(requirement.allowed)}"
st.write(requirement_line)
if current_supported.metadata.notes:
with st.expander("Rule notes", expanded=False):
for note in current_supported.metadata.notes:
st.write(f"- {note}")
with st.expander("Scope and limitations", expanded=False):
st.write("- Synthetic demo inputs only")
st.write("- Deterministic rule evaluation only")
st.write("- No approval prediction")
st.write("- No medical-necessity or clinical recommendation logic")
st.write("- Human review remains required before any real submission")
should_run = submitted or showcase_submitted
if should_run:
st.session_state["letter_text"] = ""
st.session_state["letter_meta"] = {}
try:
evaluation = service.evaluate(current_request())
st.session_state["last_eval_payload"] = evaluation.model_dump(mode="json")
except ServiceError as exc:
st.error(str(exc))
st.session_state["last_eval_payload"] = None
st.subheader("Results")
if not st.session_state["last_eval_payload"]:
st.info("Run a demo case or submit synthetic input to inspect deterministic readiness results.")
else:
evaluation = EvaluationResult.model_validate(st.session_state["last_eval_payload"])
status_panel(evaluation)
if evaluation.policy_trust_level != "verified":
st.warning(
"This procedure currently uses DEMO trust. "
"The rule logic is still deterministic, but provenance remains curated for demonstration."
)
if evaluation.warnings:
with st.expander("Evaluation warnings", expanded=True):
for warning in evaluation.warnings:
st.write(f"- {warning}")
metric_cols = st.columns(4)
with metric_cols[0]:
st.metric("Overall status", evaluation.overall_status)
with metric_cols[1]:
st.metric("Readiness score", f"{evaluation.readiness_score}/100")
with metric_cols[2]:
st.metric("Missing requirements", len(evaluation.blockers.not_documented))
with metric_cols[3]:
st.metric("Documented failures", len(evaluation.blockers.not_met))
tabs = st.tabs(["Overview", "Requirement Reasoning", "Facts and Evidence", "Audit and Export"])
with tabs[0]:
st.markdown("#### Blockers")
if not evaluation.blockers.not_documented and not evaluation.blockers.not_met:
st.success("No blockers detected under the current rules.")
else:
if evaluation.blockers.not_documented:
st.markdown("**Missing documentation**")
for blocker in evaluation.blockers.not_documented:
st.write(f"- {blocker.label}: {blocker.reason}")
if evaluation.blockers.not_met:
st.markdown("**Documented but below threshold**")
for blocker in evaluation.blockers.not_met:
st.write(f"- {blocker.label}: {blocker.reason}")
st.markdown("#### Procedure metadata")
rule_source_label = (
evaluation.supported_procedure.provenance.rule_source_label
or evaluation.supported_procedure.provenance.source_name
or "n/a"
)
monitored_source_label = (
evaluation.supported_procedure.provenance.monitored_source_name
or evaluation.supported_procedure.provenance.monitored_source_id
or "n/a"
)
st.write(f"- Payer: {evaluation.request.payer}")
st.write(f"- Procedure: {evaluation.request.procedure_code} ({evaluation.supported_procedure.display_name})")
st.write(f"- Category: {evaluation.supported_procedure.metadata.category}")
st.write(f"- Rule family: {evaluation.supported_procedure.metadata.rule_family}")
st.write(f"- Site of care: {evaluation.request.site_of_care}")
st.write(f"- Specialty: {evaluation.request.specialty}")
st.write(f"- Policy trust level: {evaluation.policy_trust_level.upper()}")
st.write(f"- Required field keys: {', '.join(evaluation.supported_procedure.required_field_keys)}")
st.write(f"- Rule source: {rule_source_label}")
st.write(f"- Last rule update: {evaluation.supported_procedure.metadata.last_rule_update or 'n/a'}")
st.write(f"- Last reviewed: {evaluation.supported_procedure.provenance.last_reviewed or 'n/a'}")
if evaluation.supported_procedure.monitored_for_drift:
st.write(f"- Monitored source: {monitored_source_label}")
with tabs[1]:
st.caption("Requirement-level reasoning stays deterministic and traceable.")
for result in evaluation.results:
render_requirement_result(result)
with tabs[2]:
st.markdown("#### Extracted facts")
for start in range(0, len(evaluation.results), 2):
cols = st.columns(2)
for col, result in zip(cols, evaluation.results[start : start + 2]):
with col:
render_fact_card(result.label, evaluation.facts.get(result.key), result.status)
st.markdown("#### Evidence map")
for result in evaluation.results:
with st.expander(result.label, expanded=False):
spans = evaluation.evidence_map.get(result.key, [])
if spans:
for span in spans:
st.code(span.text, language="text")
st.caption(f"Character offsets: {span.start}-{span.end}")
else:
st.caption("No explicit evidence span was captured for this requirement.")
with tabs[3]:
st.markdown("#### Audit summary")
audit_cols = st.columns(4)
with audit_cols[0]:
st.metric("Run ID", evaluation.audit_trail.run_id[:8])
with audit_cols[1]:
st.metric("Note hash", evaluation.audit_trail.note_hash)
with audit_cols[2]:
st.metric("Rules version", evaluation.audit_trail.rules_version or "n/a")
with audit_cols[3]:
st.metric("Submission ready", "YES" if evaluation.submission_readiness else "NO")
if evaluation.audit_trail.invariant_errors:
st.error("Invariant checks require review before trusting this run.")
for item in evaluation.audit_trail.invariant_errors:
st.write(f"- {item}")
with st.expander("Structured provenance", expanded=False):
st.json(evaluation.provenance)
letter_type = st.selectbox(
"Letter type",
options=["submission_cover_letter", "missing_info_request", "appeal_template"],
key="letter_type",
)
letter_cols = st.columns([1, 1])
with letter_cols[0]:
if st.button("Generate deterministic letter", width="stretch"):
letter_text, letter_meta = service.generate_letter(evaluation, letter_type=letter_type)
st.session_state["letter_text"] = letter_text
st.session_state["letter_meta"] = letter_meta
with letter_cols[1]:
if st.button("Clear letter", width="stretch"):
st.session_state["letter_text"] = ""
st.session_state["letter_meta"] = {}
if st.session_state["letter_text"]:
st.markdown("**Letter draft**")
st.text_area("Deterministic administrative letter", value=st.session_state["letter_text"], height=300)
st.json(st.session_state["letter_meta"])
export_payload = export_evaluation_payload(
evaluation,
letter_text=st.session_state.get("letter_text") or None,
letter_meta=st.session_state.get("letter_meta") or None,
)
st.download_button(
"Download JSON artifact",
data=json.dumps(export_payload, indent=2, sort_keys=True),
file_name=f"{evaluation.request.payer.lower()}_{evaluation.request.procedure_code.lower()}_{evaluation.audit_trail.run_id[:8]}.json",
mime="application/json",
)
with st.expander("Raw evaluation payload", expanded=False):
st.json(export_payload)