forked from chris0piper/DotaClipzVideoCreator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
commander.py
172 lines (135 loc) · 6 KB
/
commander.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
from video_analyser import VideoAnalyser
from youtube import Youtube
from video_editer import VideoEditer
from S3 import S3
import re
import cv2
import numpy as np
import requests
import urllib.request
from difflib import get_close_matches
import os
import time
import traceback
SECONDS_PER_MINUTE = 60
# when creating each clip, the videos can occational become corrupted. One solution is just
# to recreate that specific clip, however if it fails x number of times, we just exit
MAX_VIDEO_EDIT_RETRY_COUNT = 5
# location of where the raw video comes from
DOWNLOAD_LOCATION = "SpotnetDota2"
yt = Youtube()
editor = VideoEditer()
dotaAnalyser = VideoAnalyser()
s3 = S3()
# grab the 30 most recent games videos
teamNames, logoUrls = yt.getAllTeamNamesAndLogos()
videoIds, videoTitles = yt.get30RecentVideos(DOWNLOAD_LOCATION)
processedVideos = set()
# for videoId in videoIds:
# processedVideos.add(videoId)
def cleanUpEditingDirectories():
videoClipFiles = os.listdir('videoclips')
for filename in videoClipFiles:
os.remove('videoclips/' + filename)
rawClipFiles = os.listdir('rawVideos')
for filename in rawClipFiles:
os.remove('rawVideos/' + filename)
while(True):
# grab the 30 most recent games videos
videoIds, videoTitles = yt.get30RecentVideos(DOWNLOAD_LOCATION)
for i in range(len(videoTitles) - 1, 0, -1):
# only create compilation if its a new video
watchId = videoIds[i]
if(watchId in processedVideos):
continue
title = videoTitles[i]
print('Proccessing: ' + title)
try:
# only act on full length videos
if('highlight' in title.lower()):
processedVideos.add(watchId)
continue
# remove the spotnet logo from title
title = re.sub(r'\|?\s?Spotnet Dota 2\s?\|?', '', title)
# Grab the game number from the title
matches = re.findall(r'Game \d+', title)
gameNumber = matches[0].split()[1]
title = re.sub(r'Game \d+', '', title)
# Grab the number of games in series
match = re.search(r'Bo(\d)', title)
numberOfGamesInSeries = match.group(1)
title = re.sub(r'Bo\d+', '', title)
# grab the two team names
titleSections = title.split(' | ')
for section in titleSections:
if bool(re.search(r'\bvs\.?\s?\b', section, re.IGNORECASE)):
teams = re.split(r'\bvs\.?\s?\b', section, flags=re.IGNORECASE)
team1 = teams[0]
team2 = teams[1]
except Exception as e:
traceback.print_exc()
processedVideos.add(watchId)
continue
baseFileName = team1 + ' vs ' + team2 + ' Game ' + str(gameNumber) + ' of ' + str(numberOfGamesInSeries)
# predict which team name this title is refering to based on the titles scraped from liquipedia list
# these are used to get the url
predictedTeam1Name = get_close_matches(team1, teamNames, cutoff = 0.00)[0]
predictedTeam2Name = get_close_matches(team2, teamNames, cutoff = 0.00)[0]
team1Url = logoUrls[teamNames.index(predictedTeam1Name)]
team2Url = logoUrls[teamNames.index(predictedTeam2Name)]
team1Url = 'https://liquipedia.net/' + team1Url
team2Url = 'https://liquipedia.net/' + team2Url
# create the thumbnail and upload it to s3
thumbnailPath = 'thumbnailCreation/' + baseFileName + '.jpeg'
editor.createThumbnail(team1Url, team2Url, gameNumber, thumbnailPath)
s3.upload_file(thumbnailPath, 'pending-youtube-upload', 'dotaClipzThumbnails/{}.jpeg'.format(baseFileName))
# download the full video and begin video processing
videoUrl = 'https://www.youtube.com/watch?v={}'.format(watchId)
yt.downloadVideo1080p(videoUrl)
# analyze the full video for timestamps of good clips + kill count
clips, killsPerClip = dotaAnalyser.findClips('rawVideos/video.mp4')
if(len(clips) == 0):
continue
# create the video clips based on the timestamp, using a max retry num
fileIdentifier = ''
retryNumber = 0
failedCreatingClips = False
for i in range(0, len(clips)):
# if hit max retry count, clean up all clips and go to next video
if(retryNumber >= MAX_VIDEO_EDIT_RETRY_COUNT):
print("\nClip {} failed to be created!\n".format(fileIdentifier))
cleanUpEditingDirectories()
failedCreatingClips = True
break
timestamp = clips[i]
try:
editor.createClip(timestamp, fileIdentifier)
fileIdentifier += '1'
except Exception as e:
print(e)
retryNumber += 1
i -= 1
# combine all these subclips into a final video
if(not failedCreatingClips):
try:
# delete file if it already exists
finalVideoName = baseFileName + '.mp4'
try:
os.remove('finishedVideos/{}'.format(finalVideoName))
except OSError:
pass
# combine all the individual clips together, then delete them
editor.combineClips(finalVideoName)
# upload this final clip to s3 to be processed by another host
s3.upload_file('finishedVideos/{}'.format(finalVideoName), 'pending-youtube-upload', 'dotaClipz/{}'.format(finalVideoName))
try:
os.remove('finishedVideos/{}'.format(finalVideoName))
except OSError:
pass
cleanUpEditingDirectories()
except Exception as e:
print(e)
cleanUpEditingDirectories()
continue
processedVideos.add(watchId)
time.sleep(10 * SECONDS_PER_MINUTE)