-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapply_task2.py
More file actions
332 lines (290 loc) · 11.2 KB
/
apply_task2.py
File metadata and controls
332 lines (290 loc) · 11.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
import os
import re
os.makedirs("/home/volta/argus/argus-api/src/cron", exist_ok=True)
# Write src/redis.js
with open("/home/volta/argus/argus-api/src/redis.js", "w") as f:
f.write("""const { createClient } = require("redis");
const client = createClient({ url: process.env.REDIS_URL || "redis://redis:6379" });
client.on("error", (err) => console.error("Redis Client Error", err));
client.connect().catch(console.error);
module.exports = client;
""")
# Rewrite feeds.js to just read from Redis for /news and keep the others proxying
with open("/home/volta/argus/argus-api/src/routes/feeds.js", "r") as f:
old_feeds = f.read()
# Extract just the proxy routes up to the /news route
# and replace the /news route with one that reads from redis.
proxy_routes = re.search(r'(const express = require\("express"\);.*?)router\.get\("/news"', old_feeds, re.DOTALL).group(1)
new_news = """
const redis = require("../redis");
router.get("/news", async (_req, res) => {
try {
const data = await redis.get("argus:news");
if (data) {
return res.json(JSON.parse(data));
}
return res.json({ items: [], meta: { sourcesChecked: 0, fetchedAt: new Date().toISOString(), dedupedCount: 0 }, regions: {} });
} catch (error) {
console.error("Error reading news from Redis:", error);
return res.status(500).json({ error: "Internal Server Error" });
}
});
module.exports = router;
"""
with open("/home/volta/argus/argus-api/src/routes/feeds.js", "w") as f:
f.write(proxy_routes + new_news)
# Now extract the old /news parsing logic into cron/news.js
# We need the requires, the constants, the parsing functions, and then a cron job wrapper
cron_news = """const cron = require("node-cron");
const { createHash } = require("node:crypto");
const redis = require("../redis");
const DEFAULT_NEWS_SOURCES = [
{ name: "BBC World", url: "https://feeds.bbci.co.uk/news/world/rss.xml", weight: 1.0 },
{ name: "Al Jazeera", url: "https://www.aljazeera.com/xml/rss/all.xml", weight: 0.95 },
{ name: "Guardian World", url: "https://www.theguardian.com/world/rss", weight: 0.92 },
{ name: "DW Top", url: "https://rss.dw.com/rdf/rss-en-top", weight: 0.9 },
{ name: "NPR World", url: "https://feeds.npr.org/1004/rss.xml", weight: 0.88 },
{ name: "Hacker News", url: "https://news.ycombinator.com/rss", weight: 0.74 },
{ name: "GDELT Blog", url: "https://blog.gdeltproject.org/feed/", weight: 0.82 },
];
const REGION_KEYWORDS = {
CENTCOM: ["iran", "iraq", "syria", "israel", "gaza", "yemen", "red sea", "hormuz", "uae", "qatar", "kuwait", "bahrain", "saudi"],
NORTHCOM: ["united states", "u.s.", "us ", "canada", "mexico", "north america", "homeland", "arctic"],
SOUTHCOM: ["south america", "latin america", "caribbean", "brazil", "argentina", "colombia", "venezuela", "ecuador", "peru", "chile"],
EUCOM: ["europe", "eu ", "nato", "ukraine", "russia", "germany", "france", "poland", "baltic", "balkan", "black sea"],
AFRICOM: ["africa", "sahel", "sudan", "somalia", "ethiopia", "kenya", "nigeria", "mali", "chad", "libya"],
INDOPACOM: ["indo-pacific", "pacific", "south china sea", "taiwan", "japan", "korea", "philippines", "india", "australia", "indonesia"],
};
const COMMAND_REGIONS = ["WORLDCOM", "CENTCOM", "NORTHCOM", "SOUTHCOM", "EUCOM", "AFRICOM", "INDOPACOM"];
const TAG_KEYWORDS = {
CYBER: ["cyber", "malware", "ransomware", "hack", "ddos"],
CONFLICT: ["war", "strike", "missile", "troop", "attack", "military"],
INFRA: ["outage", "blackout", "pipeline", "port", "rail", "telecom", "cable"],
ECON: ["tariff", "inflation", "oil", "sanction", "market", "gdp", "trade"],
SPACE: ["satellite", "orbit", "launch", "space", "gps"],
};
function parseNewsSources() {
const raw = process.env.NEWS_RSS_FEEDS?.trim();
if (!raw) return DEFAULT_NEWS_SOURCES;
const urls = raw.split(",").map((chunk) => chunk.trim()).filter(Boolean);
if (urls.length === 0) return DEFAULT_NEWS_SOURCES;
return urls.map((url, idx) => ({ name: `Feed ${idx + 1}`, url, weight: 0.8 }));
}
function cleanText(value) {
return value
.replace(/<!\\[CDATA\\[([\\s\\S]*?)\\]\\]>/g, "$1")
.replace(/<[^>]+>/g, " ")
.replace(/&/g, "&")
.replace(/"/g, '\\"')
.replace(/'/g, "'")
.replace(/</g, "<")
.replace(/>/g, ">")
.replace(/\\s+/g, " ")
.trim();
}
function extractTag(block, tags) {
for (const tag of tags) {
const re = new RegExp(`<${tag}[^>]*>([\\\\s\\\\S]*?)</${tag}>`, "i");
const match = block.match(re);
if (match?.[1]) return cleanText(match[1]);
}
return "";
}
function extractAtomLink(block) {
const relAlt = block.match(/<link[^>]+rel=["']alternate["'][^>]*href=["']([^"']+)["'][^>]*\\/?>/i);
if (relAlt?.[1]) return relAlt[1];
const direct = block.match(/<link[^>]*href=["']([^"']+)["'][^>]*\\/?>/i);
if (direct?.[1]) return direct[1];
return extractTag(block, ["link"]);
}
function canonicalizeUrl(value) {
try {
const url = new URL(value.trim());
[...url.searchParams.keys()].forEach((key) => {
if (key.startsWith("utm_") || key === "ocid" || key === "cmpid") {
url.searchParams.delete(key);
}
});
url.hash = "";
return url.toString();
} catch {
return value.trim();
}
}
function titleSignature(title) {
return title
.toLowerCase()
.replace(/[^a-z0-9\\s]/g, " ")
.split(/\\s+/)
.filter(Boolean)
.slice(0, 10)
.sort()
.join("|");
}
function classifyTags(text) {
const lower = text.toLowerCase();
const tags = Object.entries(TAG_KEYWORDS)
.filter(([, words]) => words.some((word) => lower.includes(word)))
.map(([tag]) => tag);
return tags.length ? tags : ["GENERAL"];
}
function classifyRegion(text) {
const lower = text.toLowerCase();
let best = "WORLDCOM";
let score = 0;
for (const [region, words] of Object.entries(REGION_KEYWORDS)) {
const nextScore = words.reduce((acc, word) => acc + Number(lower.includes(word)), 0);
if (nextScore > score) {
best = region;
score = nextScore;
}
}
return best;
}
function parseFeed(xml, source) {
const isAtom = /<entry\\b/i.test(xml);
const blocks = [...xml.matchAll(new RegExp(isAtom ? "<entry\\\\b[\\\\s\\\\S]*?<\\\\/entry>" : "<item\\\\b[\\\\s\\\\S]*?<\\\\/item>", "gi"))];
const entries = [];
for (const [block] of blocks) {
const title = extractTag(block, ["title"]);
const url = canonicalizeUrl(isAtom ? extractAtomLink(block) : extractTag(block, ["link", "guid"]));
if (!title || !url) continue;
const publishedAt = extractTag(block, ["pubDate", "published", "updated"]) || new Date().toISOString();
const summary = extractTag(block, ["description", "summary", "content:encoded", "content"]).slice(0, 320);
entries.push({
title,
source,
url,
summary,
publishedAt: new Date(publishedAt).toISOString(),
});
}
return entries;
}
function computeScore(item, weight, tags) {
const ageHours = Math.max(0, (Date.now() - new Date(item.publishedAt).getTime()) / 3_600_000);
const recency = Math.max(0, 100 - ageHours * 5);
const tagBoost = tags.includes("GENERAL") ? 2 : tags.length * 7;
return Number((recency * 0.62 + weight * 25 + tagBoost).toFixed(2));
}
function buildRegionSummary(region, items) {
if (!items.length) {
return {
posture: "STABLE",
summary: `${region} has no fresh items in the current ingest window.`,
keySignals: [],
itemCount: 0,
};
}
const counts = new Map();
let riskPoints = 0;
for (const item of items) {
for (const tag of item.tags) {
counts.set(tag, (counts.get(tag) ?? 0) + 1);
if (tag === "CONFLICT" || tag === "CYBER") riskPoints += 2;
else if (tag === "INFRA" || tag === "SPACE") riskPoints += 1;
}
}
const keySignals = [...counts.entries()]
.sort((a, b) => b[1] - a[1])
.slice(0, 3)
.map(([tag]) => tag);
const posture = riskPoints >= 18 ? "HIGH" : riskPoints >= 8 ? "ELEVATED" : "STABLE";
return {
posture,
summary: `${region} posture ${posture.toLowerCase()}. Lead item: ${items[0].title}. Primary signals: ${keySignals.join(", ") || "GENERAL"}.`,
keySignals,
itemCount: items.length,
};
}
async function fetchNews() {
console.log("Fetching news...");
const sources = parseNewsSources();
const maxItems = Math.max(10, Math.min(500, Number(process.env.NEWS_MAX_ITEMS ?? 100)));
const feedResults = await Promise.all(
sources.map(async (source) => {
try {
const response = await fetch(source.url, {
cache: "no-store",
headers: {
Accept: "application/rss+xml, application/atom+xml, application/xml, text/xml;q=0.9",
"User-Agent": "ArgusNewsBot/1.0",
},
});
if (!response.ok) return { source, items: [] };
const xml = await response.text();
return { source, items: parseFeed(xml, source.name) };
} catch {
return { source, items: [] };
}
}),
);
const weightBySource = new Map(feedResults.map((entry) => [entry.source.name, entry.source.weight]));
const deduped = [];
const seenUrls = new Set();
const seenSignatures = new Set();
for (const item of feedResults.flatMap((entry) => entry.items)) {
const url = canonicalizeUrl(item.url);
if (!url || seenUrls.has(url)) continue;
const sig = titleSignature(item.title);
if (sig && seenSignatures.has(sig)) continue;
const tags = classifyTags(`${item.title} ${item.summary}`);
const region = classifyRegion(`${item.title} ${item.summary}`);
const score = computeScore(item, weightBySource.get(item.source) ?? 0.75, tags);
deduped.push({
...item,
id: createHash("sha1").update(`${item.title}|${url}`).digest("hex").slice(0, 12),
url,
tags,
region,
score,
});
seenUrls.add(url);
if (sig) seenSignatures.add(sig);
}
const items = deduped
.sort((a, b) => b.score - a.score || b.publishedAt.localeCompare(a.publishedAt))
.slice(0, maxItems);
const buckets = {
WORLDCOM: [...items],
CENTCOM: [],
NORTHCOM: [],
SOUTHCOM: [],
EUCOM: [],
AFRICOM: [],
INDOPACOM: [],
};
for (const item of items) {
if (item.region !== "WORLDCOM" && buckets[item.region]) {
buckets[item.region].push(item);
}
}
const regions = {};
for (const region of COMMAND_REGIONS) {
regions[region] = buildRegionSummary(region, buckets[region].slice(0, 15));
}
const result = {
items,
meta: {
sourcesChecked: sources.length,
fetchedAt: new Date().toISOString(),
dedupedCount: items.length,
},
regions,
};
await redis.set("argus:news", JSON.stringify(result));
console.log(`News fetched and cached in Redis. ${items.length} items.`);
}
cron.schedule("*/10 * * * *", fetchNews);
// Run immediately on startup
setTimeout(fetchNews, 1000);
"""
with open("/home/volta/argus/argus-api/src/cron/news.js", "w") as f:
f.write(cron_news)
# Update index.js to import the cron job
with open("/home/volta/argus/argus-api/src/index.js", "r") as f:
index_js = f.read()
index_js = index_js.replace('const playbackRoutes = require("./routes/playback");', 'const playbackRoutes = require("./routes/playback");\nrequire("./cron/news");')
with open("/home/volta/argus/argus-api/src/index.js", "w") as f:
f.write(index_js)
print("Task 2 applied.")