Skip to content
This repository has been archived by the owner on Oct 16, 2020. It is now read-only.

Commit

Permalink
revised core data structures for better long-term maintenance and to …
Browse files Browse the repository at this point in the history
…support lottery flag
  • Loading branch information
markubiak committed Dec 28, 2017
1 parent 6096034 commit 3f8a61d
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 38 deletions.
10 changes: 5 additions & 5 deletions wpreddit/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,19 +58,19 @@ def set_image_title(img, title):
return img


# in - string, string, - a url and a title
# in - [string, string, string] - url, title, and permalink
# saves the url of the image to ~/.wallpaper/url.txt, the title of the image to ~/.wallpaper/title.txt,
# and the permalink to ~/.wallpaper/permalink.txt just for reference
def save_info(url, title, permalink):
def save_info(link):
# Reddit escapes the unicode in json, so when the json is downloaded, the info has to be manually re-encoded
# and have the unicode characters reprocessed
# title = title.encode('utf-8').decode('unicode-escape')
with open(config.walldir + '/url.txt', 'w') as urlinfo:
urlinfo.write(url)
urlinfo.write(link[0])
with open(config.walldir + '/title.txt', 'w') as titleinfo:
titleinfo.write(remove_tags(title))
titleinfo.write(remove_tags(link[1]))
with open(config.walldir + '/permalink.txt', 'w') as linkinfo:
linkinfo.write(permalink)
linkinfo.write(link[2])


# in - string - title of the picture
Expand Down
14 changes: 6 additions & 8 deletions wpreddit/main.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import random
import sys
from pkg_resources import resource_string
from subprocess import check_call, CalledProcessError
Expand Down Expand Up @@ -37,14 +38,11 @@ def run():
print("ERROR: You do not appear to be connected to Reddit. Exiting")
sys.exit(1)
links = reddit.get_links()
titles = links[1]
permalinks = links[2]
valid = reddit.choose_valid(links[0])
valid_url = valid[0]
title = titles[valid[1]]
permalink = permalinks[valid[1]]
download.download_image(valid_url, title)
download.save_info(valid_url, title, permalink)
if (config.lottery == True):
random.shuffle(links)
valid = reddit.choose_valid(links)
download.download_image(valid[0], valid[1])
download.save_info(valid)
wallpaper.set_wallpaper()
external_script()
except KeyboardInterrupt:
Expand Down
37 changes: 12 additions & 25 deletions wpreddit/reddit.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


# in - string[] - list of subreddits to get links from
# out - string[], string[], string[] - a list of links from the subreddits and their respective titles and permalinks
# out - [string, string, string][] - a list of links from the subreddits and their respective titles and permalinks
# takes in subreddits, converts them to a reddit json url, and then picks out urls and their titles
def get_links():
print("searching for valid images...")
Expand All @@ -35,31 +35,24 @@ def get_links():
sys.exit(0)
response.close()
links = []
titles = []
permalinks = []
for i in data["data"]["children"]:
links.append(i["data"]["url"])
titles.append(i["data"]["title"])
permalinks.append("http://reddit.com" + i["data"]["permalink"])
return links, titles, permalinks
links.append([i["data"]["url"],
i["data"]["title"],
"http://reddit.com" + i["data"]["permalink"]])
return links


# in - string[] - list of links to check
# out - string, int - first link to match all criteria and its index (for matching it with a title)
# in - [string, string, string][] - list of links to check
# out - [string, string, string] - first link to match all criteria with title and permalink
# takes in a list of links and attempts to find the first one that is a direct image link,
# is within the proper dimensions, and is not blacklisted
def choose_valid(links):
if len(links) == 0:
print("No links were returned from any of those subreddits. Are they valid?")
sys.exit(1)

if (config.lottery == True):
random.shuffle(links)

for i, origlink in enumerate(links):
origlink = links[i]
config.log("checking link # {0}: {1}".format(i, origlink))
link = origlink
link = origlink[0]
config.log("checking link # {0}: {1}".format(i, link))
if not (link[-4:] == '.png' or link[-4:] == '.jpg' or link[-5:] == '.jpeg'):
if re.search('(imgur\.com)(?!/a/)', link):
link = link.replace("/gallery", "")
Expand All @@ -73,18 +66,13 @@ def check_same_url(link):
with open(config.walldir + '/url.txt', 'r') as f:
currlink = f.read()
if currlink == link:
if (config.lottery == False):
print("current wallpaper is the most recent, will not re-download the same wallpaper.")
sys.exit(0)
else:
return False
print("current wallpaper is the most recent, will not re-download the same wallpaper.")
sys.exit(0)
else:
return True
return False

if config.force_dl or not (os.path.isfile(config.walldir + '/url.txt')) or check_same_url(link):
return link, i

return [link, origlink[1], origlink[2]]
print("No valid links were found from any of those subreddits. Try increasing the maxlink parameter.")
sys.exit(0)

Expand Down Expand Up @@ -112,7 +100,6 @@ def check_dimensions(url):
# out: the name of a random subreddit
# will pick a random sub from a list of subreddits
def pick_random(subreddits):
random.seed()
rand = random.randint(0, len(subreddits) - 1)
return subreddits[rand]

Expand Down

0 comments on commit 3f8a61d

Please sign in to comment.