-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathverifier.py
More file actions
6937 lines (5828 loc) · 277 KB
/
verifier.py
File metadata and controls
6937 lines (5828 loc) · 277 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Generative Logic: A deterministic reasoning and knowledge generation engine.
# Copyright (C) 2025-2026 Generative Logic UG (haftungsbeschränkt)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ------------------------------------------------------------------------------
#
# This software is also available under a commercial license. For details,
# see: https://generative-logic.com/license
#
# Contributions to this project must be made under the terms of the
# Contributor License Agreement (CLA). See the project's CONTRIBUTING.md file.
import argparse
import os
import re
import sys
import itertools
from dataclasses import dataclass, field
from typing import Dict, Iterator, List, Set, Tuple, Optional
# ---------------------------------------------------------------------------
# Data structures
# ---------------------------------------------------------------------------
@dataclass
class ProofLine:
"""@brief One tab-separated row of a processed-proof-graph chapter file.
@details
Wraps the byte-accurate row layout emitted by
``process_proof_graphs.py`` into a structured form that every
per-tag checker consumes:
``<expression> TAB <namespace> TAB <tag> TAB <rest[0]> TAB <rest[1]> TAB ...``
Field semantics:
- ``expression``: the row's LEFT-hand side — the derived fact the
row claims. For most tags this is an MPL expression (e.g.
``(in[v1,N])``, ``(>[v1](in[v1,N])(in[(s[v1]),N]))``); for
``or branch proven`` and ``or disintegration`` it is the
compiled OR ``(or<N>[…])`` itself.
- ``namespace``: the validity scope at which the fact holds.
``"main"`` is the theorem root; descendants encode OR-branch
and recursion scopes via the ``_boundary_`` suffix grammar
(see ``docs/20_core_concepts/04_validity_stack.md``).
- ``tag``: the proof-tag dispatch key. Maps to one of the 30
entries in ``TAG_CHECKERS``. A missing tag becomes
``"<malformed>"`` so the dispatcher records a recognisable
failure instead of crashing on a KeyError.
- ``rest``: origin/justification fields, stored as a FLAT list but
consumed by checkers in alternating ``(source_expr, source_ns)``
PAIRS. A well-formed row always has even ``len(rest)``; odd
length is itself a malformation many checkers reject up front
(e.g. ``equality1`` requires ``len(rest) >= 4``).
- ``raw``: the unparsed text line (kept for diagnostics so the
report can echo the source on structural failures).
- ``line_no``: 1-based row index inside the chapter file (matches
how editors display the row, not 0-based).
@invariant I-16 — verifier is sacred. Every ``ProofLine`` is
treated as opaque input data; no field is mutated after
construction. Tests that need a modified row produce a fresh
instance via ``make_proof_line`` in the test harness.
@see verify_chapter — chapter-level dispatcher that walks every
ProofLine in a chapter and routes it via ``TAG_CHECKERS``.
@see parse_chapter_file — on-disk reader that produces lists of
ProofLine.
"""
expression: str
namespace: str
tag: str
rest: List[str]
raw: str
line_no: int
@dataclass
class TagCounter:
"""@brief Per-tag success/failure tally surfaced in the final report.
@details
Every dispatched check — per-tag in ``TAG_CHECKERS`` plus every
chapter-level meta counter (``self-reference``,
``anchor handling uniqueness``, ``anchor handling trace``,
``contradiction trace``, ``vacuous truth trace``, ``origin``,
``definition set consistency``, ``origin chain termination``) —
aggregates outcomes into one of these counters via
``state.counter_for(name).record(passed)``.
The final report (``print_report``) walks the registry in
declaration order and prints one row per counter:
``<tag-name> success M, failure N``
On a clean release every category reports ``failure 0`` — this is
the airtight criterion the verifier was designed around (I-16).
Append-only within a run; no API resets a counter mid-run.
"""
success: int = 0
failure: int = 0
def record(self, passed: bool):
"""@brief Increment success or failure.
@details
``True`` bumps ``success``; ``False`` bumps ``failure`` and is
the signal the final report uses to flag the row as a real
bug per I-16 — a verifier failure is never a false positive,
always a producer-side regression.
@param passed True iff the checker accepted the row.
"""
if passed:
self.success += 1
else:
self.failure += 1
@dataclass
class VerifierState:
"""@brief Global state for one verification run, threaded into every checker.
@details
Constructed once by ``run_verifier(base_dir)`` and passed as the
third argument of every ``check_*`` function (after the row and
the chapter's row list). Aggregates four categories of data:
1. **Counters.** ``tag_counters`` (per-tag) plus the standalone
``goal_reached`` counter for ``check_theorem_goal_reached``.
These are the only mutable fields touched by checkers.
2. **Theorem registries.** ``global_theorems`` (dict keyed by
expression, for fast membership) and ``global_theorem_list``
(ordered, used by ``build_chapter_theorem_map`` to assign
chapter files to theorems in registry order). ``external_theorems``
holds expressions from ``external_theorems.txt`` (raw + renamed
+ mirror forms) consulted by ``check_externally_provided_theorem``
and the ``origin`` meta-check.
3. **Operator metadata** loaded from ``ConfigVisu.json``:
``output_indices`` (atomic op → 0-based output arg position),
``input_indices`` (atomic op → list of 0-based input positions),
``definition_sets`` (atomic op → ``{pos_str → [type, combinable]}``).
Consumed by the mirror, reformulation, recursion, and
anchor-handling checkers.
4. **GL binaries** loaded from
``files/GL_binaries/GL_binary_*.json``: ``gl_binaries`` is
``tag → operator-dict``. Each binary holds per-batch spontaneous
compact-operator allocations (``existence4``, ``or0``,
``implication26``, …) with shape
``{category, elements, signature, arity, definedSet}``. D-41's
per-tag resolved-defset indices (``resolved_defsets_per_tag``
plus the atomic-only fallback ``resolved_defsets_atomic_only``)
are derived once in ``run_verifier`` via
``build_resolved_defsets_per_tag``.
The transient fields ``current_chapter_thm``,
``current_chapter_type``, ``current_gl_binary``, and
``current_resolved_defsets`` are reset PER CHAPTER by
``verify_chapter`` so per-tag checkers see the chapter's anchor
binary and theorem context without re-resolving them on every call.
@invariant I-16 — verifier is sacred. Checkers MUST NOT mutate
``gl_binaries`` / ``output_indices`` / ``input_indices`` /
``definition_sets`` / ``resolved_defsets_*``. Read-only by
convention; the Python test layer enforces this by sharing these
fields by reference across test fixtures.
@invariant I-29 — variable-port type consistency: the resolved
defsets feed ``check_defset_consistency``, which independently
verifies the compiler's same-type-at-shared-positions contract.
@see run_verifier — bootstraps a fresh state from a
processed-proof-graph directory.
@see verify_chapter — sets per-chapter transient context and
dispatches every row.
"""
tag_counters: Dict[str, TagCounter] = field(default_factory=dict)
goal_reached: TagCounter = field(default_factory=TagCounter)
# Global theorem list: expression → { "type": ..., "ref": ... }
global_theorems: Dict[str, dict] = field(default_factory=dict)
# Ordered list of (expression, type, ref) for chapter mapping
global_theorem_list: List[Tuple[str, str, str]] = field(default_factory=list)
# Output indices: core_name → 0-based index of output argument
output_indices: Dict[str, int] = field(default_factory=dict)
# Input indices: core_name → list of 0-based indices of input arguments
input_indices: Dict[str, List[int]] = field(default_factory=dict)
# GL binaries: tag → { name → { "category", "elements", "signature", "definedSet", ... } }
gl_binaries: Dict[str, dict] = field(default_factory=dict)
# Definition sets: core_name → { 1-based-position-str → [defset_str, bool] }
definition_sets: Dict[str, dict] = field(default_factory=dict)
# Transient: set per-chapter before dispatching line checkers
current_chapter_thm: Optional[Tuple[str, str, str]] = None
current_chapter_type: str = ""
current_gl_binary: Optional[dict] = None # the GL binary for the current chapter's batch
# Definition sets: core_name → { "1-based-position" → [def_set_str, bool] }
definition_sets: Dict[str, Dict[str, list]] = field(default_factory=dict)
# Resolved defset index — PER-TAG. tag → { core_name → { pos_str → type_label_str } }
# Mirrors the compiler's per-batch ArgumentAnalyzer(this->coreExpressionMap)
# construction at prover.hpp:3556 — each batch resolves composites against
# its own binary, so the same compact name (e.g. `implication26`) can mean
# different things in different batches without colliding. Per-chapter the
# right tag is selected the same way `current_gl_binary` is (anchor substring
# match on the chapter's theorem expression). See D-41.
resolved_defsets_per_tag: Dict[str, Dict[str, Dict[str, str]]] = field(default_factory=dict)
# Atomic-only fallback (no batch-specific composites) — used for chapters
# whose theorem doesn't disclose an anchor and the per-tag selection
# therefore fails. Composites unresolved → unknown-skip per check.
resolved_defsets_atomic_only: Dict[str, Dict[str, str]] = field(default_factory=dict)
# Per-chapter pointer set by verify_chapter; check_defset_consistency reads.
current_resolved_defsets: Optional[Dict[str, Dict[str, str]]] = None
# External theorems (raw + renamed forms)
external_theorems: set = field(default_factory=set)
def binaries_for_chapter(self) -> list:
"""@brief Return the GL binary(ies) the current chapter may consult.
@details
Two paths:
1. ``current_gl_binary`` was resolved by anchor-substring match
in ``verify_chapter`` (the chapter's theorem mentions an
``Anchor<tag>`` substring whose ``<tag>`` is a loaded binary
key). In this case the single resolved binary is
authoritative and returned as a one-element list — no
fallback is needed because we know exactly which batch
created the operators this chapter uses.
2. ``current_gl_binary`` is ``None`` (anchor not loaded as a
binary, or no theorem). A fallback list is returned. When
the chapter's theorem contains the literal substring
``AnchorIncubator`` (no tag literally named ``Incubator``
is loaded — the tags are ``IncubatorPeano`` /
``IncubatorGauss`` / ``IncubatorGauss1``), the fallback is
restricted to binaries whose tag starts with ``Incubator``.
Otherwise every loaded binary is returned.
**Why the chapter-context filter.** After the
``files/incubator/GL_binaries/`` merge into
``files/GL_binaries/`` (D-54), main-batch and incubator-batch
spontaneous compact-operator allocations share one directory.
Their names collide on shape — e.g. ``existence4`` is arity 5
in ``Gauss`` but arity 4 in ``IncubatorGauss1``; ``existence2``
is arity 3 in main batches but arity 8 in ``IncubatorPeano``.
Without the filter, alphabetical fallback iteration would pick
a main-batch binary first and route the check against a
same-named-but-different-shaped operator, generating bogus
failures. The filter restores the isolation the duplicate-
folder layout used to provide implicitly via
``os.path.dirname(base_dir)/GL_binaries``.
Per I-16 this is a tightening, not a weakening: the same set
of binaries that were previously consultable on the incubator
side remains consultable; binaries that were never visible
there before (Peano / Gauss / shared) stay invisible.
@return A list of GL binary dicts to consult, in order. At
least one element when any binaries are loaded; empty
only if ``gl_binaries`` itself is empty.
@see verify_chapter — sets ``current_gl_binary`` via the
anchor-substring resolution this method falls back from.
"""
if self.current_gl_binary is not None:
return [self.current_gl_binary]
thm = self.current_chapter_thm
if thm is not None and "AnchorIncubator" in thm[0]:
return [b for tag, b in self.gl_binaries.items()
if tag.startswith("Incubator")]
return list(self.gl_binaries.values())
def counter_for(self, tag: str) -> TagCounter:
"""@brief Lazily return the ``TagCounter`` for ``tag``.
@details
Creates a fresh zero-initialized ``TagCounter`` on first
lookup and caches it in ``tag_counters``. Subsequent calls
return the same instance so successive ``record(...)`` calls
accumulate into one tally. Used both by the per-tag dispatch
in ``verify_chapter`` and by chapter-level meta-check counters
(``self-reference``, ``origin``, etc.) — those don't have
their own ``TAG_CHECKERS`` entry but share the same counter
infrastructure.
@param tag Tag name or meta-counter name. Any string is
accepted; the report enumerates registered names
in dispatch order then trailing meta names in
insertion order.
@return The (possibly freshly created) counter.
"""
if tag not in self.tag_counters:
self.tag_counters[tag] = TagCounter()
return self.tag_counters[tag]
# ---------------------------------------------------------------------------
# Parsing
# ---------------------------------------------------------------------------
def parse_chapter_file(filepath: str) -> List[ProofLine]:
"""@brief Read one chapter file off disk into a list of ``ProofLine``.
@details
Walks the file row-by-row, decoding each line as UTF-8. Blank
lines and lines containing only whitespace are skipped — they have
no semantic meaning. Each remaining line is split on ``"\\t"``;
missing fields are filled with the empty string so the resulting
``ProofLine`` is always well-formed at the Python level (this
means structural checks live downstream in the per-tag checkers,
not at parse time).
Field-position mapping:
- ``parts[0]`` → ``expression`` (or ``""`` if the row is degenerate)
- ``parts[1]`` → ``namespace`` (or ``""``)
- ``parts[2]`` → ``tag``. When this field is absent the row gets
tag ``"<malformed>"``; the dispatcher in ``verify_chapter``
then records the row under ``state.counter_for("<unknown:...>")``,
flagging the structural defect without crashing the verifier.
- ``parts[3:]`` → ``rest``. Already a list (not a flat string), so
checkers can stride-2 index without extra splitting.
- ``raw`` is the untrimmed original line (only the trailing
``\\r\\n`` is stripped) for diagnostics.
- ``line_no`` starts at 1 (matches editor line numbering).
@param filepath Absolute or cwd-relative path to a chapter
``.txt`` file under
``files/processed_proof_graph/``.
@return List of ``ProofLine`` in original file order. Returns an
empty list for empty / whitespace-only files.
@pre ``filepath`` is readable as UTF-8.
@post No I/O side effects beyond the read.
@see verify_chapter — typical consumer of this output.
"""
lines: List[ProofLine] = []
with open(filepath, "r", encoding="utf-8") as f:
for i, raw_line in enumerate(f, start=1):
raw_line = raw_line.rstrip("\r\n")
if not raw_line.strip():
continue
parts = raw_line.split("\t")
lines.append(ProofLine(
expression=parts[0] if parts else "",
namespace=parts[1] if len(parts) > 1 else "",
tag=parts[2] if len(parts) > 2 else "<malformed>",
rest=parts[3:] if len(parts) > 3 else [],
raw=raw_line,
line_no=i,
))
return lines
def load_global_theorem_list(base_dir: str) -> Tuple[Dict[str, dict],
List[Tuple[str, str, str]]]:
"""@brief Load the chapter ordering manifest from ``global_theorem_list.txt``.
@details
The manifest is the contract between
``process_proof_graphs.py`` (which assigns one or more chapter
files per proved theorem) and the verifier (which walks the
chapters in registry order). Each row is tab-separated:
``<theorem-expression> TAB <theorem-type> TAB <theorem-ref>``
where ``<theorem-type>`` is one of ``direct`` / ``mirrored`` /
``reformulated`` / ``induction`` / ``or_theorem`` /
``incubator_back_reformulation`` and ``<theorem-ref>`` is the
proof method (``direct`` / ``mirrored statement`` /
``reformulated statement`` / ``or theorem``) OR — for induction
rows — the induction variable name (consumed by
``check_recursion``).
Returns two views over the same data:
- **Dict** keyed by theorem expression. Drives fast membership
queries inside ``check_theorem_tag`` and the ``origin``
meta-check (cross-batch references).
- **Ordered list** of ``(expr, type, ref)`` triples. Used by
``build_chapter_theorem_map`` to assign chapter files to
theorems in registry order (induction rows consume three
consecutive chapter files; everything else consumes one).
Missing file → returns ``({}, [])`` rather than raising. This is
by design: ``run_verifier`` calls it once for the primary
directory and optionally again for sibling directories
(``extra_global_lists``); a sibling without a manifest is not an
error.
@param base_dir Directory containing ``global_theorem_list.txt``.
@return ``(dict-by-expression, ordered-list)`` — both views are
empty when the file is absent.
@see build_chapter_theorem_map — primary consumer of the ordered
list.
@see check_theorem_tag — primary consumer of the dict.
"""
path = os.path.join(base_dir, "global_theorem_list.txt")
theorems: Dict[str, dict] = {}
ordered: List[Tuple[str, str, str]] = []
if not os.path.isfile(path):
return theorems, ordered
with open(path, "r", encoding="utf-8") as f:
for line in f:
line = line.rstrip("\r\n")
if not line.strip():
continue
parts = line.split("\t")
expr = parts[0]
thm_type = parts[1] if len(parts) > 1 else ""
ref = parts[2] if len(parts) > 2 else ""
theorems[expr] = {"type": thm_type, "ref": ref}
ordered.append((expr, thm_type, ref))
return theorems, ordered
# ---------------------------------------------------------------------------
# Build chapter → theorem mapping
#
# Walk global_theorem_list in order, consume chapter files sequentially.
# "induction" theorems consume 2 chapters (check_zero + check_induction_condition).
# All other types consume 1 chapter.
# ---------------------------------------------------------------------------
def build_chapter_theorem_map(
chapter_files: List[str],
theorem_list: List[Tuple[str, str, str]],
) -> Dict[str, Tuple[str, str, str]]:
"""@brief Assign every chapter file to the theorem it proves.
@details
Walks ``theorem_list`` and ``chapter_files`` in lockstep,
consuming chapter files in declaration order:
- **Non-induction theorems** consume exactly ONE chapter file
(``direct_proof.txt`` / ``mirrored_statement.txt`` /
``reformulated_statement.txt`` / ``or_theorem.txt`` /
``back_reformulated_statement.txt``).
- **Induction theorems** consume exactly THREE chapter files, in
this order:
1. ``<i>_induction_typing.txt`` — proves
``(in[ind_var, N])``, the typing gate the prover requires
before scheduling the induction.
2. ``<i+1>_check_zero.txt`` — proves the zero case
``(=[ind_var, i0])``.
3. ``<i+2>_check_induction_condition.txt`` — proves the
successor-step implication.
Typing comes first because both the zero-case and the
successor-step proofs cite ``(in[ind_var, N])``; if typing
hasn't been established, those citations would dangle.
Returns ``chapter_filename → (theorem_expr, theorem_type,
theorem_ref)``. Each induction theorem maps all THREE of its
chapter files to the same triple, so per-chapter context lookup
in ``verify_chapter`` is a simple dict lookup.
**Structural invariant.** Each induction row must have exactly
three consecutive chapter files at the expected filename
suffixes. Violation raises ``AssertionError`` — the verifier's
"airtight or it fails loudly" stance (I-19: asserts are
first-class).
@param chapter_files Sorted list of chapter filenames (per
``chapter_sort_key`` ordering).
@param theorem_list Ordered theorem list from
``load_global_theorem_list``.
@return Map filename → ``(expr, type, ref)`` covering every
consumed chapter. A short tail of unmatched chapter
files (filenames beyond what the theorem list consumes)
is silently dropped — they may exist in a partial
pipeline run.
@invariant I-19 — induction-triple structure asserted, not
softened. Misaligned filenames crash early rather than confusing
downstream checkers.
"""
mapping: Dict[str, Tuple[str, str, str]] = {}
ci = 0
for thm_expr, thm_type, thm_ref in theorem_list:
if ci >= len(chapter_files):
break
if thm_type == "induction":
# Structural invariant: three consecutive chapters at the right
# suffixes, in the order typing → check_zero → check_induction_condition.
assert ci + 2 < len(chapter_files), (
f"induction theorem {thm_expr!r} missing three chapters "
f"(available: {chapter_files[ci:ci+3]})"
)
assert chapter_files[ci].endswith("_induction_typing.txt"), (
f"induction chapter at index {ci} must be *_induction_typing.txt, "
f"got {chapter_files[ci]!r}"
)
assert chapter_files[ci + 1].endswith("_check_zero.txt"), (
f"induction chapter at index {ci + 1} must be *_check_zero.txt, "
f"got {chapter_files[ci + 1]!r}"
)
assert chapter_files[ci + 2].endswith("_check_induction_condition.txt"), (
f"induction chapter at index {ci + 2} must be "
f"*_check_induction_condition.txt, got {chapter_files[ci + 2]!r}"
)
mapping[chapter_files[ci]] = (thm_expr, thm_type, thm_ref)
mapping[chapter_files[ci + 1]] = (thm_expr, thm_type, thm_ref)
mapping[chapter_files[ci + 2]] = (thm_expr, thm_type, thm_ref)
ci += 3
else:
mapping[chapter_files[ci]] = (thm_expr, thm_type, thm_ref)
ci += 1
return mapping
# ---------------------------------------------------------------------------
# Theorem goal reached check
# ---------------------------------------------------------------------------
def _find_matching_paren(expr: str, start: int) -> int:
"""@brief Find the index of the ``)`` that closes the ``(`` at ``start``.
@details
Linear scan with a depth counter — increments on every ``(``,
decrements on every ``)``. Returns the index of the closing
paren when depth hits zero, or ``-1`` if the parentheses are
unbalanced (the input runs off the end before depth reaches
zero). Single-pass, no allocations; safe to call on long MPL
strings inside hot disintegration loops.
The scan starts AT ``start`` (not after it), so the caller must
pass the index of the opening ``(`` itself. The function treats
``[``/``]`` as opaque — only ``(``/``)`` contribute to depth.
That is intentional: MPL nests parens for sub-expressions and
brackets for argument lists, and the two don't need to be
co-balanced for paren matching (e.g. the argument list of an
outer call may contain a sub-expression with its own
independent paren structure).
@param expr The expression to scan.
@param start Index of the opening ``(`` whose match is sought.
@return Index of the matching ``)``; ``-1`` on unbalanced input.
@pre ``expr[start] == '('``. The function does not check this;
callers responsible for positioning correctly.
"""
depth = 0
for i in range(start, len(expr)):
if expr[i] == '(':
depth += 1
elif expr[i] == ')':
depth -= 1
if depth == 0:
return i
return -1
def disintegrate_implication_head(expr: str) -> str:
"""@brief Return the innermost conclusion of a nested implication.
@details
Iteratively peels ``(>[vars](premise)(body))`` layers from the
outside in. Each iteration:
1. Recognises ``(>[`` at the start of the current expression.
2. Skips past the bound-variable list to the start of the
premise.
3. Walks past the premise — either ``(...)`` (balanced via
``_find_matching_paren``) or ``!(...)`` (negated, balanced via
the same helper starting at the inner ``(``).
4. Replaces ``expr`` with the body (either ``(...)`` or
``!(...)`` form, paren-balanced) and loops.
Stops when:
- The current expression doesn't start with ``(>[``.
- The body is a negated atom ``!(...)`` — returned verbatim with
the leading ``!`` preserved (this is the only path that returns
a negated head; positive heads are always paren-wrapped without
a leading ``!``).
- Bracket scanning hits ``-1`` (malformed input — returns the
partial state, which downstream checkers reject as
structurally wrong).
Used by ``check_theorem_goal_reached`` for ``direct_proof`` /
``check_zero`` / ``check_induction_condition`` chapter types to
extract the proof's target from the theorem; used by
``check_validity_name`` to extract the head of an
``expansion-for-integration`` row's left side.
@param expr An MPL implication expression (possibly nested) OR a
non-implication shape (returned unchanged).
@return The innermost conclusion. Negated bodies retain the
leading ``!``.
@see disintegrate_implication_full — returns BOTH the premise
list AND the head, when the caller needs both halves.
"""
while expr.startswith('(>['):
bracket_close = expr.index(']', 3)
p_start = bracket_close + 1
if p_start >= len(expr):
break
# Premise can be negated: !(...)
if expr[p_start] == '!' and p_start + 1 < len(expr) and expr[p_start + 1] == '(':
p_end = _find_matching_paren(expr, p_start + 1)
elif expr[p_start] == '(':
p_end = _find_matching_paren(expr, p_start)
else:
break
if p_end < 0:
break
b_start = p_end + 1
if b_start >= len(expr):
break
if expr[b_start] == '!':
# Negated body: !(...)
inner_start = b_start + 1
if inner_start >= len(expr) or expr[inner_start] != '(':
break
inner_end = _find_matching_paren(expr, inner_start)
if inner_end < 0:
break
expr = expr[b_start:inner_end + 1]
return expr
if expr[b_start] != '(':
break
b_end = _find_matching_paren(expr, b_start)
if b_end < 0:
break
expr = expr[b_start:b_end + 1]
return expr
# ---------------------------------------------------------------------------
# Mirroring helpers (independent copy — no imports from create_expressions)
# ---------------------------------------------------------------------------
def _extract_args(expr: str) -> List[str]:
"""@brief Extract args from the FIRST top-level ``[...]`` group in ``expr``.
@details
Returns the comma-split contents of the first bracket group found
by simple regex (``\\[([^\\]]*)\\]``) — empty strings are filtered.
Examples:
- ``(in3[a,b,c,+])`` → ``["a", "b", "c", "+"]``
- ``(>[v1](in[v1,N])(in[v1,N]))`` → ``["v1"]`` (the binder)
- ``a_plain_string`` → ``[]`` (no bracket group)
**Limitation.** Only the FIRST bracket group is returned. Nested
groups (the arg list of a sub-expression inside an outer call's
arg list) are not visible. Use ``_extract_all_args`` when you
need the recursive form (the trace-back walker over compound
source expressions relies on it).
Used pervasively by the checkers to inspect argument shapes —
arity comparisons, var-name extraction, anchor-slot detection,
etc.
@param expr Expression with at least one ``[...]`` group.
@return List of comma-split args (empty strings dropped); ``[]``
if no bracket group is present.
"""
m = re.search(r'\[([^\]]*)\]', expr)
if not m:
return []
return [a for a in m.group(1).split(',') if a]
def _extract_all_args(expr: str) -> List[str]:
"""@brief Extract args from EVERY ``[...]`` group in ``expr`` (recursive).
@details
Differs from ``_extract_args`` by iterating ``re.findall`` over
EVERY bracket group, not just the first. Example:
``!(>[v6](in[v6,N])!(in2[v6,i1_copy,s]))``
decomposes to bracket groups ``[v6]``, ``[v6,N]``,
``[v6,i1_copy,s]``, returning the flat arg list
``["v6", "v6", "N", "v6", "i1_copy", "s"]``. Duplicates are
preserved (unlike a set); the caller decides whether to dedupe.
Used by the variable-copy trace-back to detect whether a copy
variable (e.g. ``i1_copy``) is present ANYWHERE inside a
compound source expression. ``_extract_args`` would miss it if
the copy lives in a nested sub-expression's arg list, which is
exactly the structure compound sources have (e.g. an implication
cited as a source).
@param expr Any expression (may have nested ``[...]`` groups).
@return Flat list of every comma-split arg across all groups,
preserving duplicates.
@see _extract_args — first-bracket-only variant for hot paths
that only need the outer arity.
"""
out: List[str] = []
for group in re.findall(r'\[([^\]]*)\]', expr):
for a in group.split(','):
if a:
out.append(a)
return out
def _extract_core_name(expr: str) -> str:
"""@brief Extract the operator core name from a leaf-call expression.
@details
Matches the leading ``(<core>[`` pattern with a word-character
core name. Returns the core name on a match, ``""`` otherwise.
Examples:
- ``(in3[a,b,c,+])`` → ``"in3"``
- ``(AnchorPeano[N,s,p,zero,one,two])`` → ``"AnchorPeano"``
- ``!(in[a,N])`` → ``""`` (leading ``!`` breaks the regex anchor)
- ``(>[v1](in[v1,N])(in[v1,N]))`` → ``""`` (the ``>`` is not a
word char)
- ``(=[a,b])`` → ``""`` (the ``=`` is not a word char; equality
checkers reach into the args via ``_extract_args`` and shape
checks against the literal prefix ``"(=["``)
Equality, negation, and implication shapes are deliberately NOT
returned by this function — they're recognized by literal prefix
checks at the call sites. Only true leaf operators (``in``,
``in2``, ``in3``, ``fXY``, ``existence4``, ``or0``,
``implication26``, ``Anchor*``, …) return a non-empty result.
@param expr An MPL expression.
@return Core operator name on a leaf-call match; ``""``
otherwise.
"""
m = re.match(r'\((\w+)\[', expr)
return m.group(1) if m else ""
def _trace_back_to(expr: str,
expr_to_lines: Dict[str, List["ProofLine"]],
is_target,
should_follow=None,
visited: set = None) -> bool:
"""@brief Generic recursive trace-back through a chapter's origin graph.
@details
The chapter-level meta-checks (``anchor handling trace``,
``contradiction trace``, ``vacuous truth trace``, ``variable
copy``) all share the same query shape: starting from some
expression ``expr``, can we reach a chapter row that satisfies
``is_target(row)`` by walking rest-field source edges? This
function is the shared engine.
Algorithm:
1. Look up ``expr`` in ``expr_to_lines`` (built once per chapter
in ``verify_chapter``: ``expression → list of chapter rows
whose left-hand side is that expression``). For each
matching row:
a. If ``is_target(row)`` returns True, the walk succeeds —
return True immediately.
b. Otherwise, iterate the row's ``rest`` fields in
alternating ``(source_expr, source_ns)`` PAIRS (stride-2
starting at index 0). For each source, optionally apply
the ``should_follow(src)`` predicate to prune edges
(callers like the variable-copy walker filter to edges
that still carry the copy variable).
c. Recurse on each surviving source.
2. Cycle-safe via the ``visited`` set — a node visited once is
never re-entered, so chapters with circular citations
terminate gracefully (returning False if no target was
reachable).
The function returns the BOOLEAN reachability answer, NOT the
path. Callers that need the path build it externally; in
practice no checker does (the trace counters record True/False
per call).
@param expr Starting expression for the walk.
@param expr_to_lines Map expression → list of chapter rows
whose LEFT-hand expression is that key.
Pre-computed once per chapter for O(1)
lookup inside the recursion.
@param is_target Predicate over a chapter line; True
short-circuits the walk with success.
@param should_follow Optional predicate over a source
expression; if False, that edge is not
followed. Default: every edge is followed.
@param visited Internal recursion guard. Callers pass
``None`` (default); the recursion fills it.
@return True iff at least one ``is_target``-satisfying row is
reachable from ``expr`` through follow-able edges.
"""
if visited is None:
visited = set()
if expr in visited:
return False
visited.add(expr)
for ch_line in expr_to_lines.get(expr, []):
if is_target(ch_line):
return True
for i in range(0, len(ch_line.rest), 2):
src = ch_line.rest[i]
if should_follow is not None and not should_follow(src):
continue
if _trace_back_to(src, expr_to_lines, is_target, should_follow, visited):
return True
return False
def disintegrate_implication_full(expr: str) -> Tuple[List[str], str]:
"""@brief Peel every implication layer; return ``(premises, head)``.
@details
The full-information cousin of ``disintegrate_implication_head``:
instead of discarding intermediate premises and returning only
the innermost conclusion, this builds a flat list of every
premise encountered on the way down and returns it alongside the
final head.
Walks ``(>[vars](premise)(body))`` nesting from outside in:
1. Recognise ``(>[`` at the start of the current expression.
2. Skip past the bound-variable list to the start of the
premise.
3. Capture the premise as either ``(...)`` (positive,
paren-balanced) or ``!(...)`` (negated, leading ``!``
preserved). Append to the premise list.
4. Advance to the body. If the body is ``!(...)``, treat it as
the final negated head — append the LAST premise (already
captured), record the head, and return.
5. If the body is ``(...)``, replace ``expr`` with the body and
loop.
On malformed input (any bracket scan returning ``-1``) the loop
breaks and returns the partial state captured so far. Downstream
checkers detect the malformation through structural mismatch
(e.g. premise count differing from the actual row's rest length)
and record a failure under the appropriate counter.
Used by every checker that needs to compare a row's structure
against a reference implication — ``check_implication``,
``check_equality1``, ``check_premise_element``,
``check_validity_name``, ``_check_mirror``,
``_check_reformulation``, ``check_equalize_variable``, etc.
@param expr An MPL implication expression (possibly nested) OR
a non-implication shape (in which case
``premises == []`` and ``head == expr``).
@return ``(premises_list, head)`` — premises in outermost-first
order, ``head`` is the innermost conclusion (with
leading ``!`` preserved if negated).
@see disintegrate_implication_head — head-only variant for
callers that don't need the premises.
"""
premises: List[str] = []
while expr.startswith('(>['):
bracket_close = expr.index(']', 3)
p_start = bracket_close + 1
if p_start >= len(expr):
break
# Premise can be negated: !(...)
if expr[p_start] == '!' and p_start + 1 < len(expr) and expr[p_start + 1] == '(':
inner_end = _find_matching_paren(expr, p_start + 1)
if inner_end < 0:
break
p_end = inner_end
premise = expr[p_start:p_end + 1]
elif expr[p_start] == '(':
p_end = _find_matching_paren(expr, p_start)
if p_end < 0:
break
premise = expr[p_start:p_end + 1]
else:
break
b_start = p_end + 1
if b_start >= len(expr):
break
if expr[b_start] == '!':
# Negated body: !(...)
inner_start = b_start + 1
if inner_start >= len(expr) or expr[inner_start] != '(':
break
inner_end = _find_matching_paren(expr, inner_start)
if inner_end < 0:
break
premises.append(premise)
expr = expr[b_start:inner_end + 1]
return premises, expr
if expr[b_start] != '(':
break
b_end = _find_matching_paren(expr, b_start)
if b_end < 0:
break
premises.append(premise)
expr = expr[b_start:b_end + 1]
return premises, expr
def _replace_arg_safe(expr: str, old: str, new: str) -> str:
"""@brief Rename one argument-level token inside ``[...]`` groups.
@details
Uses lookbehind/lookahead bracket guards
(``(?<=[\\[,]) ... (?=[\\],])``) so only full-token matches
inside argument lists are rewritten. Operator names, substrings
inside larger arg names, and tokens elsewhere in the expression
are NOT touched. Examples (rename ``v1 → x``):
- ``(in[v1,N])`` → ``(in[x,N])`` ✓
- ``(in3[v1,v1,+])`` → ``(in3[x,x,+])`` ✓
- ``(v1plus[a])`` → ``(v1plus[a])`` (untouched — operator name)
- ``(>[v1](in[v1,N])(in[v1,N]))`` → ``(>[v1](in[x,N])(in[x,N]))``
(binder list at ``>[v1]`` is NOT modified — it isn't bracketed
between a ``[`` or ``,`` on the left in the same way; the
caller treats binder vars separately)
Used by ``_check_reformulation`` to perform the
``u_i → actual_arg`` substitution when expanding a binary
signature, and by the recursion checker's
``_replace_arg_safe(expr, ind_var, x_var)`` rewrite of induction
premises.
@param expr Expression to rewrite.
@param old Argument name to replace (e.g. ``"v1"``).
@param new New argument name (e.g. ``"x"``).
@return Rewritten expression; identical to ``expr`` if no match.
@see _replace_arg_safe_multi — bulk variant doing many
substitutions in one pass.
"""
pattern = r'(?<=[\[,])' + re.escape(old) + r'(?=[\],])'
return re.sub(pattern, new, expr)
# ---------------------------------------------------------------------------
# Theorem-citation w/W -> v/V revert
#
# Counterpart of process_proof_graphs._v_to_w_in_theorem_citation. The
# processor renames non-anchor inner bvars of cited theorem-anchor
# implications from v/V to w/W (case-preserving letter swap). The
# verifier reverses that swap before exact-string comparison against
# state.global_theorems / state.external_theorems, which store the
# registry form (v/V).
#
# The fold _normalize_expr_list (already widened to [vVwW]\d+) provides
# a slower fallback that also works; the explicit reverse keeps the
# registry-membership fast path intact and documents the round-trip
# invariant `processor: v->w` <-> `verifier: w->v`.
# ---------------------------------------------------------------------------
_WV_REVERT_PATTERN = re.compile(r'(?<=[\[,])([wW])(\d+)(?=[\],])')
def _is_theorem_anchor_impl_local(expr: str) -> bool:
"""@brief True when ``expr`` is a theorem-anchor implication.
@details
Local copy of ``process_proof_graphs.is_theorem_anchor_implication`` —
the verifier deliberately stays a single-file standalone with no
imports from ``process_proof_graphs`` (so it can be run against a
proof graph generated by any version of the processor without
cross-version coupling).
Returns True iff ``expr`` is an outer implication ``(>[...]...)`` and
its first premise (after disintegration) starts with ``(Anchor`` —
i.e. the first thing the rule requires is an anchor row, which is
the structural signature of a theorem-level rule (as opposed to
an internal rule used during proof search).
The distinction matters because non-anchor bvars in cited
theorem-anchor implications are subject to the ``v→w`` rename
applied by ``process_proof_graphs._v_to_w_in_theorem_citation``;
callers use this predicate to gate the inverse
``_revert_w_to_v_in_theorem_citation`` revert before exact-string
membership checks against the registry.