-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathPubMedScraper.py
More file actions
59 lines (47 loc) · 1.69 KB
/
PubMedScraper.py
File metadata and controls
59 lines (47 loc) · 1.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
class PubMedScraper:
def __init__(self):
self._is_driver = False
self.abstracts = dict()
self.open_webdriver(headless=True)
# Add (url, abstract) to cache
def _cache_abstract(self, url, abstract):
if url not in self.abstracts:
self.abstracts[url] = abstract
return
def open_webdriver(self, headless=True):
assert(not self._is_driver)
# Javascript requires us to open the webpagewith a webdriver
is_headless = Options()
if headless:
is_headless.add_argument('--headless')
self._driver = webdriver.Chrome('./chromedriver', chrome_options=is_headless)
self._is_driver = True
return
def close_webdriver(self):
assert(self._is_driver)
self._driver.quit()
self._is_driver = False
return
# Get the relevant abstract for a PubMed url
def get_abstract(self, url):
assert(self._is_driver)
# Check if we've previously scraped the abstracts
if url in self.abstracts:
return self.abstracts[url]
# Extract abstract from PubMed url
else:
self._driver.get(url)
soup = BeautifulSoup(self._driver.page_source, 'lxml')
abstract = soup.abstracttext.text
self._cache_abstract(url, abstract)
return abstract
# Return a list of abstracts
def get_all_abstracts(self, urls):
assert(self._is_driver)
abstracts = []
for url in urls:
abstracts.append(self.get_abstract(url))
return abstracts