-
Notifications
You must be signed in to change notification settings - Fork 70
Expand file tree
/
Copy pathtest.py
More file actions
130 lines (103 loc) · 3.87 KB
/
test.py
File metadata and controls
130 lines (103 loc) · 3.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import pathlib
import shutil
import subprocess
from html.parser import HTMLParser
import pytest
class HTMLMetaParser(HTMLParser):
"""Simple HTML parser to extract metadata from HTML files."""
def __init__(self):
super().__init__()
self.title = ""
self._in_title = False
self.meta_description = None
self.links = []
self.classes = []
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == "title":
self._in_title = True
elif tag == "meta" and attrs_dict.get("name") == "description":
self.meta_description = attrs_dict.get("content", "")
elif tag == "a":
self.links.append(attrs_dict)
if "class" in attrs_dict:
self.classes.extend(attrs_dict["class"].split())
def handle_data(self, data):
if self._in_title:
self.title += data
def handle_endtag(self, tag):
if tag == "title":
self._in_title = False
def parse_html(path: pathlib.Path) -> HTMLMetaParser:
parser = HTMLMetaParser()
parser.feed(path.read_text())
return parser
OUTPUT_DIR = pathlib.Path("output")
@pytest.fixture(scope="session", autouse=True)
def built_site():
"""Build the site before tests and clean up after."""
# Setup: build the site
result = subprocess.run(
["uv", "run", "render-engine", "build"],
capture_output=True,
text=True,
)
if result.returncode != 0:
pytest.fail(f"Failed to build site: {result.stderr}")
yield OUTPUT_DIR
# Teardown: remove the output directory
if OUTPUT_DIR.exists():
shutil.rmtree(OUTPUT_DIR)
ROUTE_FILES = {
"": "index.html",
"blog": "blog/index.html",
"about.html": "about.html",
"support.html": "support.html",
}
def test_destination(loaded_route: str, built_site: pathlib.Path) -> None:
"""Test that the expected output files exist for each route."""
file_name = ROUTE_FILES[loaded_route]
output_file = built_site / file_name
assert output_file.exists(), f"Expected output file not found: {output_file}"
@pytest.mark.parametrize(
"title, file_path",
(
("Black Python Devs", "index.html"),
("Blog | Black Python Devs", "blog/index.html"),
("About | Black Python Devs", "about.html"),
("Support | Black Python Devs", "support.html"),
),
)
def test_bpdevs_title_en(built_site: pathlib.Path, title: str, file_path: str) -> None:
"""Check that each page has the expected title."""
parsed = parse_html(built_site / file_path)
assert parsed.title == title, f"Expected title '{title}', got '{parsed.title}'"
def test_mailto_bpdevs(built_site: pathlib.Path) -> None:
"""Check that the homepage has a mailto link to contact@blackpythondevs.com."""
parsed = parse_html(built_site / "index.html")
mailto_links = [
link for link in parsed.links if link.get("href", "").startswith("mailto:")
]
assert any(
link["href"] == "mailto:contact@blackpythondevs.com" for link in mailto_links
), "Expected mailto:contact@blackpythondevs.com link not found"
def _blog_post_files() -> list[pathlib.Path]:
"""Get all blog post HTML files (excluding index and pagination pages)."""
blog_dir = OUTPUT_DIR / "blog"
if not blog_dir.exists():
return []
skip = {"index.html"}
return [
f
for f in sorted(blog_dir.glob("*.html"))
if f.name not in skip and not f.stem.startswith("blog")
]
@pytest.mark.parametrize("post_file", _blog_post_files(), ids=lambda p: p.stem)
def test_blog_post_has_meta_description(
built_site: pathlib.Path, post_file: pathlib.Path
) -> None:
"""Check that each blog post has a meta description tag."""
parsed = parse_html(post_file)
assert (
parsed.meta_description is not None
), f"Missing meta description in {post_file.name}"