forked from aryak007/GeeksForGeeks_article_extractor
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathGFG_article_extractor.py~
More file actions
executable file
·75 lines (48 loc) · 1.75 KB
/
GFG_article_extractor.py~
File metadata and controls
executable file
·75 lines (48 loc) · 1.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#!/usr/bin/env python
'''
--------------------------------------------------
--------------------------------------------------
Name: GeeksForGeeks Article Extractor
Purpose: To download and save articles filed under each and every tag mentioned in www.geeksforgeeks.org
--------------------------------------------------
--------------------------------------------------
'''
import urllib2
import os
import pdfkit
from bs4 import BeautifulSoup
def parse_options():
usage = "usage: prog [options] (arg1, arg2, ... argn)"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--tag", \
type="string", \
action="store", \
dest="inp_tag", \
default = "", \
help="input tags for downloading from the website")
opts, args = parser.parse_args()
return opts, args
AllTags = ['payu']
path = "/home/yodebu/Desktop/GeeksForGeeks_article_extractor/" # Specify your path here
def ExtractMainLinks(AllTags,path):
for i in AllTags:
newpath = path + i
os.mkdir(newpath)
url = "http://www.geeksforgeeks.org/tag/" + i +"/"
data = urllib2.urlopen(url).read()
soup = BeautifulSoup(data)
allLinks = soup.findAll("h2",class_="post-title")
listofLinks = []
for link in allLinks:
mainLink = str(link.findAll("a")[0]).split("<a href=")[1].split('rel="bookmark"')[0].strip('"').split('"')[0]
listofLinks.append(mainLink)
Extract_And_Save_Page_Data(listofLinks,newpath,i)
def Extract_And_Save_Page_Data(listofLinks,newpath,i):
No = 0
for item in listofLinks:
pageData = urllib2.urlopen(item).read()
filePath = newpath +"//" +str(i)+" "+str(No+1)+".html"
No = No +1
with open(filePath,"wb") as f:
f.write(str(pageData))
ExtractMainLinks(AllTags,path)