-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathap-parse.py
More file actions
executable file
·176 lines (156 loc) · 5.71 KB
/
ap-parse.py
File metadata and controls
executable file
·176 lines (156 loc) · 5.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#!/usr/bin/env python3
from dotenv import load_dotenv
load_dotenv()
import os
import sys
import traceback
import argparse
import logging
from uuid import UUID
from functools import partial
from articleparser.runners import run_batch, run_one_shot, DbGetter, DbSaver, JsonSaver
from articleparser import version
from articleparser import db
import articleparser.producer as producer
import articleparser.publication as publication
import articleparser.scraper as scraper
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
logger = logging.getLogger(__name__)
import json
def get_all_unprocessed_articles(scraper_db, parser_db, args):
publication.update_parser_info(parser_db)
info = json.loads(parser_db.get_parser_info(parser_name=publication.name)["info"])
later_than = (
info["last_processed_snapshot_at"]
if "last_processed_snapshot_at" in info
else 0
)
return DbGetter(
scraper_db,
scraper.get_snapshots,
snapshot_at_later_than=later_than,
first=args.first,
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dump",
action="store_true",
help="dump parsed data to STDOUT instead of write it to db",
)
parser.add_argument(
"--scraper-name", default="ZeroScraper", help="name of upstream scraper to use"
)
cmds = parser.add_subparsers(title="sub command", dest="command", required=True)
prod_cmd = cmds.add_parser("producer", help="parses producers in parser db")
prod_cmd.add_argument(
"id", type=UUID, help="id of the producer to parse in parser db", nargs="?"
)
prod_cmd.add_argument(
"--site-id", type=int, help="id of the site to parse in news db", nargs="?"
)
pub_cmd = cmds.add_parser("publication", help="parses publications in parser db")
pub_cmd.add_argument(
"id", type=int, help="id of the publication to parse in parser db", nargs="?"
)
pub_cmd.add_argument(
"--first",
action="store_true",
help="only parse the first snapshot of each article",
)
pub_cmd.add_argument(
"--article-id", type=int, help="id the article to parse in news db", nargs="?"
)
pub_cmd.add_argument("--url", help="url the article to parse in news db", nargs="?")
pub_cmd.add_argument(
"--site-id", type=int, help="parse all articles of this site", nargs="?"
)
pub_cmd.add_argument(
"--update",
action="store_true",
help="update publications; do not parse new articles",
)
pub_cmd.add_argument("--parser", type=str, help="parser to use", default="default")
pub_cmd.add_argument(
"--limit", type=int, default=100000, help="limit number of entries to parse"
)
return parser.parse_args()
def main(args):
parser_db = db.module("queries")
parser_db.connect(os.getenv("DB_URL"))
try:
sc = parser_db.get_scraper_by_name(scraper_name=args.scraper_name)
sc["data"] = db.to_json(sc["data"])
scraper_db = scraper.ScraperDb(
sc["scraper_name"], os.getenv(sc["data"]["db_url_var"]), sc["data"]
)
if args.command == "producer":
if args.id is not None:
p = db.to_producer(
parser_db.get_producer(producer_id=db.of_uuid(args.id))
)
data_getter = DbGetter(
scraper_db, scraper.get_site, site_id=p["site_id"]
)
elif args.site_id is not None:
data_getter = DbGetter(
scraper_db, scraper.get_site, site_id=args.site_id
)
else:
data_getter = DbGetter(scraper_db, scraper.get_sites)
data_saver = (
DbSaver(parser_db, producer.saver, scraper=sc)
if not args.dump
else JsonSaver()
)
run_one_shot(
data_getter=data_getter,
data_saver=data_saver,
processor=producer.process_item,
)
elif args.command == "publication":
if args.id is not None:
raise RuntimeError("Unimplemented")
elif args.article_id is not None:
data_getter = DbGetter(
scraper_db,
scraper.get_snapshots,
article_id=args.article_id,
first=args.first,
)
elif args.url is not None:
data_getter = DbGetter(
scraper_db, scraper.get_snapshots, url=args.url, first=args.first
)
elif args.site_id is not None:
data_getter = DbGetter(
scraper_db,
scraper.get_snapshots,
site_id=args.site_id,
first=args.first,
)
elif args.update:
raise RuntimeError("Unimplemented")
else:
data_getter = get_all_unprocessed_articles(
scraper_db, parser_db, args=args
)
run_batch(
data_getter=data_getter,
data_saver=DbSaver(parser_db, publication.saver, scraper=sc)
if not args.dump
else JsonSaver(),
processor=partial(publication.process_item, parser=args.parser),
batch_size=1000,
limit=args.limit,
)
else:
raise RuntimeError(f"Unknown command '{args.command}'")
return 0
except:
logger.error(traceback.format_exc())
return -1
finally:
parser_db.disconnect()
if __name__ == "__main__":
sys.exit(main(parse_args()))