-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathusaf-scrape.py
More file actions
201 lines (158 loc) · 7.03 KB
/
usaf-scrape.py
File metadata and controls
201 lines (158 loc) · 7.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import os
import time
from urllib.parse import urljoin, urlparse
import requests
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
except ImportError:
print("Install required packages: pip install selenium beautifulsoup4")
exit(1)
def extract_all_pdfs_from_table(output_dir='af_publications'):
"""
Load the page with Selenium, extract PDFs directly from the rendered table.
Navigate through all 178 pages.
"""
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
driver = webdriver.Chrome(options=chrome_options)
all_pdfs = []
try:
url = "https://www.e-publishing.af.mil/Product-Index/#/?view=pubs&orgID=10141&catID=1&series=-1&modID=449&tabID=131"
print("Loading Air Force e-publishing page...")
driver.get(url)
# Wait for table to load
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.ID, "data"))
)
# Trigger the data load
print("Triggering data load...")
driver.execute_script("epubs.SelectOrg(10141, 1, false, 'Departmental', 449, 131);")
time.sleep(10)
# Get total pages
try:
info = driver.find_element(By.CLASS_NAME, "dataTables_info").text
print(f"Table info: {info}")
# Try to find last page number
pagination = driver.find_elements(By.CSS_SELECTOR, ".paginate_button")
page_numbers = []
for button in pagination:
try:
text = button.text.strip()
if text.isdigit():
page_numbers.append(int(text))
except:
continue
total_pages = max(page_numbers) if page_numbers else 178
print(f"Total pages to process: {total_pages}\n")
except:
total_pages = 178
print(f"Defaulting to 178 pages\n")
# Process each page
for page_num in range(1, total_pages + 1):
print(f"[Page {page_num}/{total_pages}] Extracting PDFs...")
# Wait for page to stabilize
time.sleep(3)
# Get page HTML
page_html = driver.page_source
soup = BeautifulSoup(page_html, 'html.parser')
# Find the data table
table = soup.find('table', {'id': 'data'})
if table:
tbody = table.find('tbody')
if tbody:
rows = tbody.find_all('tr')
for row in rows:
cells = row.find_all('td')
if not cells:
continue
# First cell has product number
product_num = cells[0].get_text(strip=True)
# Find all PDF links in the row
for link in row.find_all('a', href=True):
href = link['href']
if '.pdf' in href.lower():
full_url = urljoin(url, href)
# Avoid duplicates
if not any(pdf['url'] == full_url for pdf in all_pdfs):
all_pdfs.append({
'url': full_url,
'product': product_num
})
print(f" ✓ {product_num}")
# Navigate to next page
if page_num < total_pages:
try:
# Try JavaScript navigation first
driver.execute_script("$('#data').DataTable().page('next').draw('page');")
time.sleep(2)
except:
# Fallback: click next button
try:
next_btn = driver.find_element(By.ID, "data_next")
if "disabled" not in next_btn.get_attribute("class"):
driver.execute_script("arguments[0].click();", next_btn)
time.sleep(2)
except Exception as e:
print(f" ⚠ Navigation failed: {str(e)}")
break
print(f"\n{'='*60}")
print(f"✓ Extraction Complete!")
print(f"Total PDFs found: {len(all_pdfs)}")
print(f"{'='*60}\n")
return all_pdfs
finally:
driver.quit()
def download_pdfs(pdf_list, output_dir='af_publications'):
"""Download all PDFs from the list."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
session = requests.Session()
session.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
'Referer': 'https://www.e-publishing.af.mil/'
})
successful = 0
failed = 0
for idx, pdf in enumerate(pdf_list, 1):
try:
filename = os.path.basename(pdf['url'].split('?')[0])
filepath = os.path.join(output_dir, filename)
if os.path.exists(filepath):
print(f"[{idx}/{len(pdf_list)}] ⊙ {pdf['product']}")
successful += 1
continue
print(f"[{idx}/{len(pdf_list)}] ↓ {pdf['product']} - {filename}")
response = session.get(pdf['url'], timeout=60)
response.raise_for_status()
with open(filepath, 'wb') as f:
f.write(response.content)
print(f" ✓ {len(response.content) / 1024:.2f} KB")
successful += 1
time.sleep(0.5)
except Exception as e:
print(f" ✗ {str(e)}")
failed += 1
print(f"\n{'='*60}")
print(f"Download Summary:")
print(f" Successful: {successful}")
print(f" Failed: {failed}")
print(f" Total: {len(pdf_list)}")
print(f"{'='*60}")
if __name__ == "__main__":
print("Air Force E-Publishing - Table Extraction Method")
print("="*60 + "\n")
output_dir = input("Output directory (default 'af_publications'): ").strip() or 'af_publications'
# Extract PDFs from all pages
pdf_list = extract_all_pdfs_from_table(output_dir)
if pdf_list:
# Download all PDFs
download_pdfs(pdf_list, output_dir)
else:
print("✗ No PDFs found")