Skip to content
This repository has been archived by the owner on Oct 16, 2020. It is now read-only.

Commit

Permalink
Merge branch 'cdglitch-master'
Browse files Browse the repository at this point in the history
  • Loading branch information
markubiak committed Dec 28, 2017
2 parents de122e7 + 3f8a61d commit a2e6ee6
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 27 deletions.
8 changes: 8 additions & 0 deletions wpreddit/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def parse_config():
global startupattempts
global savedir
global randomsub
global lottery
if config.get('Title Overlay', 'titlegravity', fallback=None) is not None:
print("You are using an old (pre v3) configuration file. Please delete your config file at " + confdir +
" and let the program create a new one.")
Expand All @@ -102,6 +103,7 @@ def parse_config():
minheight = config.getint('Options', 'minheight', fallback=1080)
resize = config.getboolean('Options', 'resize', fallback=True)
randomsub = config.getboolean('Options', 'random', fallback=False)
lottery = config.getboolean('Options', 'lottery', fallback=False)
setcmd = config.get('SetCommand', 'setcommand', fallback='')
settitle = config.getboolean('Title Overlay', 'settitle', fallback=False)
titlesize = config.getint('Title Overlay', 'titlesize', fallback=24)
Expand Down Expand Up @@ -145,6 +147,8 @@ def parse_args():
help="will pick a random subreddit from the ones provided instead of turning them into a multireddit",
action="store_true")
parser.add_argument("--settitle", help="write title over the image", action="store_true")
parser.add_argument("--lottery", help="select a random image from a subreddit instead of the newest", action="store_true")

args = parser.parse_args()
global subs
global verbose
Expand All @@ -156,6 +160,7 @@ def parse_args():
global settitle
global randomsub
global blacklistcurrent
global lottery
if not args.subreddits == []:
subs = args.subreddits
verbose = args.verbose
Expand All @@ -171,10 +176,13 @@ def parse_args():
randomsub = True
if args.blacklist:
blacklistcurrent = True
if args.lottery:
lottery = True


# in - string - messages to print
# takes a string and will print it as output if verbose
def log(info):
if verbose:
print(info)

15 changes: 8 additions & 7 deletions wpreddit/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@ def download_image(url, title):
if config.settitle:
img = set_image_title(img, title)
if config.opsys == "Windows":
img.save(config.walldir + '\\wallpaper.bmp', "BMP")
img.convert('RGB').save(config.walldir + '\\wallpaper.bmp', "BMP")
else:
img.save(config.walldir + '/wallpaper.jpg', "JPEG")
img.convert('RGB').save(config.walldir + '/wallpaper.jpg', "JPEG")

except IOError:
print("Error saving image!")
sys.exit(1)
Expand Down Expand Up @@ -57,19 +58,19 @@ def set_image_title(img, title):
return img


# in - string, string, - a url and a title
# in - [string, string, string] - url, title, and permalink
# saves the url of the image to ~/.wallpaper/url.txt, the title of the image to ~/.wallpaper/title.txt,
# and the permalink to ~/.wallpaper/permalink.txt just for reference
def save_info(url, title, permalink):
def save_info(link):
# Reddit escapes the unicode in json, so when the json is downloaded, the info has to be manually re-encoded
# and have the unicode characters reprocessed
# title = title.encode('utf-8').decode('unicode-escape')
with open(config.walldir + '/url.txt', 'w') as urlinfo:
urlinfo.write(url)
urlinfo.write(link[0])
with open(config.walldir + '/title.txt', 'w') as titleinfo:
titleinfo.write(remove_tags(title))
titleinfo.write(remove_tags(link[1]))
with open(config.walldir + '/permalink.txt', 'w') as linkinfo:
linkinfo.write(permalink)
linkinfo.write(link[2])


# in - string - title of the picture
Expand Down
14 changes: 6 additions & 8 deletions wpreddit/main.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import random
import sys
from pkg_resources import resource_string
from subprocess import check_call, CalledProcessError
Expand Down Expand Up @@ -37,14 +38,11 @@ def run():
print("ERROR: You do not appear to be connected to Reddit. Exiting")
sys.exit(1)
links = reddit.get_links()
titles = links[1]
permalinks = links[2]
valid = reddit.choose_valid(links[0])
valid_url = valid[0]
title = titles[valid[1]]
permalink = permalinks[valid[1]]
download.download_image(valid_url, title)
download.save_info(valid_url, title, permalink)
if (config.lottery == True):
random.shuffle(links)
valid = reddit.choose_valid(links)
download.download_image(valid[0], valid[1])
download.save_info(valid)
wallpaper.set_wallpaper()
external_script()
except KeyboardInterrupt:
Expand Down
22 changes: 10 additions & 12 deletions wpreddit/reddit.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


# in - string[] - list of subreddits to get links from
# out - string[], string[], string[] - a list of links from the subreddits and their respective titles and permalinks
# out - [string, string, string][] - a list of links from the subreddits and their respective titles and permalinks
# takes in subreddits, converts them to a reddit json url, and then picks out urls and their titles
def get_links():
print("searching for valid images...")
Expand All @@ -35,26 +35,24 @@ def get_links():
sys.exit(0)
response.close()
links = []
titles = []
permalinks = []
for i in data["data"]["children"]:
links.append(i["data"]["url"])
titles.append(i["data"]["title"])
permalinks.append("http://reddit.com" + i["data"]["permalink"])
return links, titles, permalinks
links.append([i["data"]["url"],
i["data"]["title"],
"http://reddit.com" + i["data"]["permalink"]])
return links


# in - string[] - list of links to check
# out - string, int - first link to match all criteria and its index (for matching it with a title)
# in - [string, string, string][] - list of links to check
# out - [string, string, string] - first link to match all criteria with title and permalink
# takes in a list of links and attempts to find the first one that is a direct image link,
# is within the proper dimensions, and is not blacklisted
def choose_valid(links):
if len(links) == 0:
print("No links were returned from any of those subreddits. Are they valid?")
sys.exit(1)
for i, origlink in enumerate(links):
config.log("checking link # {0}: {1}".format(i, origlink))
link = origlink
link = origlink[0]
config.log("checking link # {0}: {1}".format(i, link))
if not (link[-4:] == '.png' or link[-4:] == '.jpg' or link[-5:] == '.jpeg'):
if re.search('(imgur\.com)(?!/a/)', link):
link = link.replace("/gallery", "")
Expand All @@ -74,7 +72,7 @@ def check_same_url(link):
return True

if config.force_dl or not (os.path.isfile(config.walldir + '/url.txt')) or check_same_url(link):
return link, i
return [link, origlink[1], origlink[2]]
print("No valid links were found from any of those subreddits. Try increasing the maxlink parameter.")
sys.exit(0)

Expand Down

0 comments on commit a2e6ee6

Please sign in to comment.