Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
5cc75a3
[new] - cut_highlights.py file created
ssw03270 Jul 8, 2020
5a04fae
[remove] - .idea directory deleted
Jul 8, 2020
ef40185
[add] - Updated cut_highlights.py
Jul 10, 2020
3c3fc36
[fix] - Fixed some resource file and cut_highlights path.
Jul 10, 2020
32a1dd5
[add] - added writing video part
ssw03270 Jul 26, 2020
3f17fce
[new] - searching_upperleft_banner.py created
wpdudH Jul 27, 2020
b6965cb
[new] - Created cognition_inGame.py file
tjswodud Jul 27, 2020
c0e8900
[update] - Update cognition_inGame.py
tjswodud Jul 30, 2020
e8263cf
[add] - Add minimap_templ.png
tjswodud Aug 2, 2020
5eb1c92
[update] - Update cognition_inGame.py
tjswodud Aug 2, 2020
84e5e31
[add] - cut_video() now prints progress
littlecsi Aug 2, 2020
6b461a3
[update] - Update cognition_inGame.py (increase writing rate)
tjswodud Aug 2, 2020
de0791f
Merge remote-tracking branch 'origin/feature-standardization-minimap'…
tjswodud Aug 2, 2020
d573a73
[update] - Update cognition_inGame.py (increase writing rate)
tjswodud Aug 2, 2020
298c33a
[fix] - cutting highlights error fixed
ssw03270 Aug 2, 2020
a90aa47
[fix] - deleted visualization code
ssw03270 Aug 2, 2020
0abe4d0
[update] - Update cognition_inGame.py (increase writing rate)
tjswodud Aug 4, 2020
d71e709
[fix] - cutting frame error fixed
wpdudH Aug 8, 2020
72e0332
[fix] - Remove unnecessary code and improve performance
wpdudH Aug 9, 2020
e77ae6e
[update] - Update cognition_inGame.py
tjswodud Aug 10, 2020
ccaf4b4
[update] - Update cognition_inGame.py
tjswodud Aug 15, 2020
97db2a9
[add] add match_template, sift_algorithm, create_capture function
tjswodud Aug 18, 2020
dca2490
[fix] - fixed main function
wpdudH Aug 23, 2020
223c1ca
[new] - Created cognition_inGame.py file
tjswodud Jul 27, 2020
43ab9ad
[update] - Update cognition_inGame.py
tjswodud Jul 30, 2020
9335ce9
[add] - Add minimap_templ.png
tjswodud Aug 2, 2020
70b80d2
[update] - Update cognition_inGame.py
tjswodud Aug 2, 2020
2f42429
[update] - Update cognition_inGame.py (increase writing rate)
tjswodud Aug 2, 2020
414a16c
Merge remote-tracking branch 'origin/feature-standardization-minimap'…
tjswodud Aug 31, 2020
8e9bbfb
[add] - add highlight image
wpdudH Sep 1, 2020
ac62766
[add] - add code cutting highlight
wpdudH Sep 1, 2020
f4d01e5
[add] - add pro view image
wpdudH Sep 7, 2020
6c0bde3
[add] - add code cutting pro view screen
wpdudH Sep 7, 2020
6064f4c
[add] add cognition_pause.py, cutting the frame what is paused.
tjswodud Sep 21, 2020
eed7a9b
[add] - cut_video() now prints progress
littlecsi Aug 2, 2020
d231aa2
[update] - Update cognition_inGame.py
tjswodud Aug 4, 2020
2a42473
[update] - Update cognition_inGame.py
tjswodud Aug 10, 2020
134cfc7
[update] - Update cognition_inGame.py
tjswodud Aug 15, 2020
d0f75b7
[add] add match_template, sift_algorithm, create_capture function
tjswodud Aug 18, 2020
be3022c
[add] add cognition_pause.py, cutting the frame what is paused.
tjswodud Sep 21, 2020
327e0b0
[Update] update cognition_inGame.py, cognition_pause.py
tjswodud Sep 21, 2020
df72df9
[Update] update two files.
tjswodud Sep 21, 2020
56de58a
Merge remote-tracking branch 'origin/feature-standardization-minimap'…
tjswodud Sep 24, 2020
a6ebf18
[Update] update two files.
tjswodud Sep 24, 2020
7612b49
[Update] Edit description of cognition_pause.py
tjswodud Sep 24, 2020
3a18d9d
Merge branch 'feature-standardization-upperleft' of https://github.co…
tjswodud Sep 28, 2020
8c56f55
[Add] add searching_upperleft_banner.py, cut_not_inGame.py
tjswodud Sep 28, 2020
45b57bc
[Update] update cognition_inGame.py, cut_not_inGame.py
tjswodud Oct 13, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions cut-highlights/checker.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[30.0, 0]
[60.0, 0]
[90.0, 0]
[120.0, 0]
[150.0, 0]
[180.0, 0]
[210.0, 0]
[240.0, 0]
[270.0, 0]
[300.0, 0]
[330.0, 0]
186 changes: 186 additions & 0 deletions cut-highlights/cut_highlights.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
# File Name: cut_highlights.py
# Team: standardization
# Programmer: ssw03270
# Start Date: 07/08/20
# Last Update: August 2, 2020
# Purpose: Almost highlight video has 3 game.
# So we have to cut it to compare with our highlights.
# This program help to do it.

# 1620, 780 (1920, 1080) : minimap start point in edit video, raw video size
# If you want to see visual working process, erase #(notes) under the code.

import cv2 as cv
import numpy as np
import os
from matplotlib import pyplot as plt


def matching(video_file: str, video_capture: np.ndarray, video_path: str, compare_image: np.ndarray) -> None:
"""
For comparing video's capture and image

Args:
video_capture: One frame of video for compare
video_path: Video's path that user input
compare_image: Image file for compare

Returns:
N/A

Raises:
N/A
"""
checker = []
is_writing = False
game_set = ["_GAME1", "_GAME2", "_GAME3", "_GAME4", "_GAME5"]
game_num = -1

width = int(video_capture.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(video_capture.get(cv.CAP_PROP_FRAME_HEIGHT))
fourcc = cv.VideoWriter_fourcc(*"mp4v")
fps = video_capture.get(cv.CAP_PROP_FPS)

sift_ans = False
while True:
if not sift_ans:
is_writing = False

else:
if not is_writing:
is_writing = True
game_num += 1
out = cv.VideoWriter(video_file + game_set[game_num] + ".mp4", fourcc, fps, (width, height), 1)

out.write(frame_color)
# cv.imshow("EditedFrame", frame_color)

ret, frame_color = video_capture.read()
if not ret:
break
frame_gray = cv.cvtColor(frame_color, cv.COLOR_BGR2GRAY)
width_end, height_end = frame_gray.shape

width_start = round(780 / 1080 * width_end)
height_start = round(1620 / 1920 * height_end)

frame_resize = frame_gray[width_start: width_end, height_start: height_end]

# Showing video.
# cv.imshow("VideoFrame", frame_gray)

if video_capture.get(cv.CAP_PROP_POS_FRAMES) % fps == 0:
print("start comparing..." + str(video_capture.get(cv.CAP_PROP_POS_FRAMES)))
sift_ans = sift_algorithm(frame_resize, compare_image)
checker.append([video_capture.get(cv.CAP_PROP_POS_FRAMES), sift_ans])

# Stopping video.
# if cv.waitKey(1) > 0:
# break

# write_txt(checker)
out.release()
video_capture.release()
cv.destroyAllWindows()


def sift_algorithm(frame_resize: np.ndarray, compare_image: np.ndarray) -> bool:
"""
Using sift algorithm to compare video's capture and image

Args:
frame_resize: Resized frame for compare
compare_image: Image file for compare

Returns:
If list name of good has 30 or more values, returns 1. (It means this frame is ingame)
If not, returns 0. (It means this frame isn't ingame)

Raises:
N/A
"""
sift = cv.xfeatures2d.SIFT_create()

keypoint_1, descriptor_1 = sift.detectAndCompute(frame_resize, None)
keypoint_2, descriptor_2 = sift.detectAndCompute(compare_image, None)

bf = cv.BFMatcher()
matches = bf.knnMatch(descriptor_1, descriptor_2, 2)

good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append([m])

# Showing plt.
# plt_image = cv.drawMatchesKnn(frame_resize, keypoint_1, compare_image, keypoint_2, good, None, flags=2)
# plt.imshow(plt_image)
# plt.show()

if len(good) > 15:
print("this frame is ingame.")
return True
else:
print("this frame isn't ingame.")
return False


def write_txt(checker: list) -> None:
"""
Writing the value that the frame has.
Now, it wasn't needed - 2020/08/02

Args:
checker: This list has current frame number and that's status.

Returns:
N/A

Raises:
N/A
"""
print("start writeing...")
f = open("checker.txt", "w")
for data in checker:
f.writelines(str(data) + "\n")
f.close()


def make_resource(path: str, type: int) -> np.ndarray:
"""
Making a resource with path, according to type.

Args:
path: File's local path.
type: File's type, 0 is video file, 1 is image file.

Returns:
If type is 0, returns video capture.
If type is 1, returns image which convert gray.

Raises:
N/A
"""
if type == 0:
return cv.VideoCapture(path)
elif type == 1:
return cv.imread(path, cv.COLOR_BGR2GRAY)


def main() -> None:
image_path = "../resources/standardization/sample_image"
video_path = "../resources/standardization/sample_video"

video_list = os.listdir(video_path)
for video_file in video_list:
new_video_path = video_path + "/" + video_file
video_capture = make_resource(new_video_path, 0)

minimap_file = image_path + "/minimap.png"
minimap_image = make_resource(minimap_file, 1)

matching(video_file[0:len(video_file) - 4], video_capture, video_path, minimap_image)


if __name__ == '__main__':
main()
127 changes: 127 additions & 0 deletions minimap/cognition_inGame.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
"""
# File Name: cognition_inGame.py
# Team: standardization
# Programmer: tjswodud
# Start Date: 07/07/20
# Last Update: September 28, 2020
# Purpose: Full video of LCK will be given in this program.
# And compare frame and minimap image (template) per frame, using sift_algorithm.
# (if it success for compare, that frame is ingame, if not, that frame is not ingame.)
# Finally, this program will return edited video, except for frame that is not ingame.
"""

import cognition_pause as pause
import cv2 as cv
import numpy as np
import searching_upperleft_banner as upperleft

def match_template(video_capture: np.ndarray, template: np.ndarray, pause_image: np.ndarray, compare_images: list, video_file: str, video_path: str) -> None:
"""
Compare captured video and template image (minimap image) with sift_algorithm
, and then write video frame that is in_game
Args:
video_capture: captured video using VideoCapture in __main__
template: a template image (minimap image) to compare with video_capture
video_file: name of video file in string type
video_path: path of output_video (output_video will be stored this path)
Returns:
None
Raises:
N/A
"""
is_writing = False

file_name = video_file.replace('.mp4', '')

width = int(video_capture.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(video_capture.get(cv.CAP_PROP_FRAME_HEIGHT))
fourcc = cv.VideoWriter_fourcc(*"mp4v")
fps = video_capture.get(cv.CAP_PROP_FPS)
output = cv.VideoWriter((video_path + '/' + file_name + '_output' + '.mp4'), fourcc, fps, (width, height), 1)

# sift_ans = False
while True:
ret, frame = video_capture.read()
if not ret:
break

frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
width_end, height_end = frame_gray.shape # 1080, 1920
# width_end_2, height_end_2 = 638, 1138

width_start = round(780 / 1080 * width_end) # 780
height_start = round(1620 / 1920 * height_end) # 1620
# width_start_2 = round(442 / 1080 * width_end) # 442
# height_start_2 = round(782 / 1920 * height_end) # 782

frame_resize = frame_gray[width_start: width_end, height_start: height_end]
frame_resize_center = frame_gray[442: 638, 782: 1138]

total_frames = int(video_capture.get(cv.CAP_PROP_FRAME_COUNT))

if video_capture.get(cv.CAP_PROP_POS_FRAMES) % int(fps) == 0:
current_frame = int(video_capture.get(cv.CAP_PROP_POS_FRAMES))
percentage = (current_frame / total_frames) * 100
# print('{}/{} - {}%'.format(current_frame, total_frames, percentage))
print(end='\r')
print('Processing... {}%\r'.format(round(percentage, 2)), end = '')

sift_ans = sift_algorithm(frame_resize, template)
pause_ans = pause.sift_algorithm(frame_resize_center, pause_image)
replay_ans = upperleft.check_algorithm(upperleft.frame_resize(frame_gray, 0), compare_images[0])
highlight_ans = upperleft.check_algorithm(upperleft.frame_resize(frame_gray, 1), compare_images[1])
proview_ans = upperleft.check_algorithm(upperleft.frame_resize(frame_gray, 2), compare_images[2])

if (sift_ans and pause_ans and replay_ans and highlight_ans and proview_ans):
is_writing = True
else:
is_writing = False

if is_writing:
output.write(frame)
else:
continue

output.release()
video_capture.release()
cv.destroyAllWindows()

def sift_algorithm(frame_resize: np.ndarray, template: np.ndarray) -> bool:
"""
Compare video's frame and template image, using sift_algorithm
Args:
frame_resize: each of video's frame that is resized to template image
template: a template image (minimap image) to compare with video_capture
Returns:
[bool type]
if frame_resize and template match more than 15 points, return True (this frame is ingame.)
if not, return False (this frame is not ingame.)
Raises:
N/A
"""
sift = cv.xfeatures2d.SIFT_create()

keypoint_1, descriptor_1 = sift.detectAndCompute(frame_resize, None)
keypoint_2, descriptor_2 = sift.detectAndCompute(template, None)

bf_matcher = cv.BFMatcher()
match = bf_matcher.knnMatch(descriptor_1, descriptor_2, 2)

success_match = []
for m, n in match:
if m.distance < n.distance * 0.75:
success_match.append([m])

# plot_image = cv.drawMatchesKnn(frame_resize, keypoint_1, template, keypoint_2, success_match, None, flags=2)
# plot.imshow(plot_image)
# plot.show()

if len(success_match) > 15:
# print('this frame is ingame.')
return True
else:
# print('this frame is not ingame.')
return False

def create_capture(path: str):
return cv.VideoCapture(path)
52 changes: 52 additions & 0 deletions minimap/cognition_pause.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""
# File Name: cognition_pause.py
# Team: standardization
# Programmer: tjswodud
# Start Date: 07/07/20
# Last Update: September 28, 2020
# Purpose: This file will be used module of cognition_inGame.py.
# compare every frame and pause_image, and then cut the frame that has a pause_image.
"""

import cv2 as cv
# import matplotlib.pyplot as plot
import numpy as np


def sift_algorithm(frame_resize: np.ndarray, pause_image: np.ndarray) -> bool:
"""
Compare video's frame and pause image, using sift_algorithm

Args:
frame_resize: each of video's frame that is resized to template image
pause_image: a image (pause_image) to compare with video_capture

Returns:
[bool type]
if frame_resize and pause_image match more than 15 points, return False (this frame isn't ingame.)
if not, return True (this frame is ingame. -> match with template_image)

Raises:
N/A
"""
sift = cv.xfeatures2d.SIFT_create()

keypoint_1, descriptor_1 = sift.detectAndCompute(frame_resize, None)
keypoint_2, descriptor_2 = sift.detectAndCompute(pause_image, None)

bf_matcher = cv.BFMatcher()
match = bf_matcher.knnMatch(descriptor_1, descriptor_2, 2)

success_match = []
for m, n in match:
if m.distance < n.distance * 0.75:
success_match.append([m])

# plot_image = cv.drawMatchesKnn(frame_resize, keypoint_1, pause_image, keypoint_2, success_match, None, flags=2)
# plot.imshow(plot_image)
# plot.show()

if len(success_match) > 15:
return False
else:
return True
Loading