diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 06283ebf4..0c095b28b 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,15 +1,15 @@ -* Ripme version: -* Java version: -* Operating system: - -* Exact URL you were trying to rip when the problem occurred: -* Please include any additional information about how to reproduce the problem: - -## Expected Behavior - -Detail the expected behavior here. - -## Actual Behavior - -Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report. +* Ripme version: +* Java version: +* Operating system: + +* Exact URL you were trying to rip when the problem occurred: +* Please include any additional information about how to reproduce the problem: + +## Expected Behavior + +Detail the expected behavior here. + +## Actual Behavior + +Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8810800c7..56d0dd294 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,27 +1,27 @@ -# Category - -This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which: -* [ ] a bug fix (Fix #...) -* [ ] a new Ripper -* [ ] a refactoring -* [ ] a style change/fix -* [ ] a new feature - - -# Description - -Please add details about your change here. - - -# Testing - -Required verification: -* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors). -* [ ] I've verified that this change works as intended. - * [ ] Downloads all relevant content. - * [ ] Downloads content from multiple pages (as necessary or appropriate). - * [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content. -* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified). - -Optional but recommended: -* [ ] I've added a unit test to cover my change. +# Category + +This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which: +* [ ] a bug fix (Fix #...) +* [ ] a new Ripper +* [ ] a refactoring +* [ ] a style change/fix +* [ ] a new feature + + +# Description + +Please add details about your change here. + + +# Testing + +Required verification: +* [ ] I've verified that there are no regressions in `mvn test` (there are no new failures or errors). +* [ ] I've verified that this change works as intended. + * [ ] Downloads all relevant content. + * [ ] Downloads content from multiple pages (as necessary or appropriate). + * [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content. +* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified). + +Optional but recommended: +* [ ] I've added a unit test to cover my change. diff --git a/.github/workflows/gradle.yml b/.github/workflows/gradle.yml new file mode 100644 index 000000000..d88a5e969 --- /dev/null +++ b/.github/workflows/gradle.yml @@ -0,0 +1,65 @@ +name: CI + release + +on: + pull_request: + push: + branches: + - '**' + tags: + - '!**' + +jobs: + build: + + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macOS-latest] + java: [22] + include: # test old java on one os only, upload from ubuntu java-17 + - os: ubuntu-latest + java: 21 + upload: true + + steps: + + - uses: actions/checkout@v1 + + - name: Set environment CI_ variables + id: ci-env + uses: FranzDiebold/github-env-vars-action@v2 + + - name: Set up java + uses: actions/setup-java@v4.2.1 + with: + java-version: ${{ matrix.java }} + distribution: zulu + cache: gradle + + - name: Build with Gradle + run: gradle clean build -PjavacRelease=${{ matrix.java }} + + - name: SHA256 + if: matrix.upload + run: shasum -a 256 build/libs/*.jar + + - name: upload jar as asset + if: matrix.upload + uses: actions/upload-artifact@v4 + with: + name: zipped-ripme-jar + path: build/libs/*.jar + + - name: create pre-release + id: create-pre-release + if: matrix.upload + uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + automatic_release_tag: "latest-${{ env.CI_REF_NAME_SLUG }}" + prerelease: true + title: "development build ${{ env.CI_REF_NAME }}" + files: | + build/libs/*.jar + +# vim:set ts=2 sw=2 et: diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml deleted file mode 100644 index b48244ede..000000000 --- a/.github/workflows/maven.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Java CI - -on: [push, pull_request] - -jobs: - build: - - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macOS-latest] - java: [1.8] - include: # test newest java on one os only - - os: ubuntu-latest - java: 1.15 - - - steps: - - uses: actions/checkout@v1 - - name: Set up JDK - uses: actions/setup-java@v1 - with: - java-version: ${{ matrix.java }} - - name: Build with Maven - run: mvn package --file pom.xml diff --git a/.gitignore b/.gitignore index e7813bc7f..fe1e80c6e 100644 --- a/.gitignore +++ b/.gitignore @@ -80,6 +80,12 @@ buildNumber.properties # Avoid ignoring Maven wrapper jar file (.jar files are usually ignored) !/.mvn/wrapper/maven-wrapper.jar +### gradle ### +/.gradle +/build +# Avoid ignoring gradle wrapper jar file (.jar files are usually ignored) +!/gradle/wrapper/gradle-wrapper.jar + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -105,6 +111,11 @@ $RECYCLE.BIN/ .vscode .idea .project +local.properties + +### Build files +.gradle/ +build/ ### Ripme ### ripme.log @@ -112,7 +123,6 @@ rips/ .history ripme.jar.update *.swp -*.properties !LabelsBundle*.properties history.json *.iml diff --git a/.project b/.project deleted file mode 100644 index 894074570..000000000 --- a/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - ripme - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.jdt.core.javanature - org.eclipse.m2e.core.maven2Nature - - diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 0fd1b17f5..000000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: java - -matrix: - include: - - jdk: openjdk9 - before_install: - - rm "${JAVA_HOME}/lib/security/cacerts" - - ln -s /etc/ssl/certs/java/cacerts "${JAVA_HOME}/lib/security/cacerts" - - jdk: openjdk8 - -after_success: - - mvn clean test jacoco:report coveralls:report diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index e26479b65..000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "files.exclude": { - "target/**": true, - "**/.git": true, - "**/.DS_Store": true, - "**/*.class": true, - "**/rips/**": true - }, - "java.configuration.updateBuildConfiguration": "automatic" -} diff --git a/README.md b/README.md index be7739873..f6f0868d1 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ -# RipMe [![Licensed under the MIT License](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/RipMeApp/ripme/blob/master/LICENSE.txt) [![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Subreddit](https://img.shields.io/badge/discuss-on%20reddit-blue.svg)](https://www.reddit.com/r/ripme/) - -[![Build Status](https://travis-ci.org/RipMeApp/ripme.svg?branch=master)](https://travis-ci.org/RipMeApp/ripme) +# RipMe +[![Licensed under the MIT License](https://img.shields.io/badge/License-MIT-blue.svg)](/LICENSE.txt) +[![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Subreddit](https://img.shields.io/badge/discuss-on%20reddit-blue.svg)](https://www.reddit.com/r/ripme/) +![alt Badge Status](https://github.com/ripmeapp2/ripme/actions/workflows/gradle.yml/badge.svg) [![Coverage Status](https://coveralls.io/repos/github/RipMeApp/ripme/badge.svg?branch=master)](https://coveralls.io/github/RipMeApp/ripme?branch=master) -# Contribute - -RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](https://github.com/metaprime)**, **[@cyian-1756](https://github.com/cyian-1756)** and **[@kevin51jiang](https://github.com/kevin51jiang)**. If you'd like to contribute but aren't good with code, help keep us happy with a small contribution! +RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](https://github.com/metaprime)**, **[@cyian-1756](https://github.com/cyian-1756)** and **[@kevin51jiang](https://github.com/kevin51jiang)**. If you'd like to contribute but aren't good with code, help keep us happy with a small contribution! Chat on [gitter](https://gitter.im/RipMeApp/Lobby). [![Tip with PayPal](https://img.shields.io/badge/PayPal-Buy_us...-lightgrey.svg)](https://www.paypal.me/ripmeapp) [![Tip with PayPal](https://img.shields.io/badge/coffee-%245-green.svg)](https://www.paypal.com/paypalme/ripmeapp/send?amount=5.00¤cyCode=USD&locale.x=en_US&country.x=US) @@ -16,17 +16,20 @@ RipMe is maintained with ♥️ and in our limited free time by **[@MetaPrime](h # About -RipMe is an album ripper for various websites. It is a cross-platform tool that runs on your computer, and requires Java 8. RipMe has been tested and confirmed working on Windows, Linux and MacOS. +RipMe is an album ripper for various websites. It is a cross-platform tool that runs on your computer, and +requires Java 17. RipMe has been tested and confirmed working on Windows, Linux and MacOS. ![Screenshot](https://i.imgur.com/UCQNjeg.png) ## Downloads -Download `ripme.jar` from the [latest release](https://github.com/ripmeapp/ripme/releases). - -**Note: If you're currently using version 1.2.x, 1.3.x or 1.7.49, you will not automatically get updates to the newest versions. We recommend downloading the latest version from the link above.** +Download `ripme.jar` from the [latest release](https://github.com/ripmeapp2/ripme/releases). For information about running the `.jar` file, see +[the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe). -For information about running the `.jar` file, see [the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe). +The version number like ripme-1.7.94-17-2167aa34-feature_auto_release.jar contains a release number (1.7.94), given by +a person, the number of commits since this version (17). The commit SHA (2167aa34) uniquely references the +source code ripme was built from. If it is not built from the main branch, the branch name (feature/auto-release) is +given. ## Installation @@ -37,7 +40,7 @@ brew install --cask ripme && xattr -d com.apple.quarantine /Applications/ripme.j ## Changelog -[Changelog](https://github.com/ripmeapp/ripme/blob/master/ripme.json) **(ripme.json)** +[Changelog](/ripme.json) **(ripme.json)** # Features @@ -77,40 +80,49 @@ If you're a developer, you can add your own Ripper by following the wiki guide: # Compiling & Building -The project uses [Gradle](https://gradle.org) or [Maven](http://maven.apache.org/). -Therefor both commands are given. To build the .jar file, navigate to the root -project directory and run: +The project uses [Gradle](https://gradle.org). To build the .jar file, +navigate to the root project directory and run at least the test you +change, e.g. Xhamster. test execution can also excluded completely: ```bash -mvn clean compile assembly:single -mvn -B package assembly:single -Dmaven.test.skip=true -``` -```bash -./gradlew clean build +./gradlew clean build testAll --tests XhamsterRipperTest.testXhamster2Album ./gradlew clean build -x test --warning-mode all ``` -This will include all dependencies in the JAR. One can skip executing the tests -as well. +The generated JAR (java archive) in build/libs will include all +dependencies. # Running Tests -Tests can be marked as beeing slow, or flaky. Default is to run all but the flaky tests. Slow tests can be excluded to -run. slow and flaky tests can be run on its own. After building you can run tests, quoting might be necessary depending -on your shell: +Tests can be tagged as beeing slow, or flaky. The gradle build reacts to +the following combinations of tags: -```bash -mvn test -mvn test -DexcludedGroups= -Dgroups=flaky,slow -mvn test '-Dgroups=!slow' -``` +- default is to run all tests without tag. +- testAll runs all tests. +- testFlaky runs tests with tag "flaky". +- testSlow runs tests with tag "slow". +- tests can be run by test class, or single test. Use "testAll" so it does + not matter if a test is tagged or not. ```bash ./gradlew test -./gradlew test -DexcludeTags= -DincludeTags=flaky,slow -./gradlew test '-DincludeTags=!slow' +./gradlew testAll +./gradlew testFlaky +./gradlew testSlow +./gradlew testAll --tests XhamsterRipperTest +./gradlew testAll --tests XhamsterRipperTest.testXhamster2Album ``` -Please note that some tests may fail as sites change and our rippers become out of date. -Start by building and testing a released version of RipMe -and then ensure that any changes you make do not cause more tests to break. +Please note that some tests may fail as sites change and our rippers +become out of date. Start by building and testing a released version +of RipMe and then ensure that any changes you make do not cause more +tests to break. + +# New GUI - compose-jb +As Java Swing will go away in future, a new GUI technology should be used. One of the +candidates is [Jetpack Compose for Desktop](https://github.com/JetBrains/compose-jb/). + +The library leverages the compose library for android and provides it for android, +desktop and web. The navigation library is not available for desktop, so Arkadii Ivanov +implemented +[decompose](https://proandroiddev.com/a-comprehensive-hundred-line-navigation-for-jetpack-desktop-compose-5b723c4f256e). diff --git a/build.bat b/build.bat index 7c2aa6c3b..f6bf32a62 100755 --- a/build.bat +++ b/build.bat @@ -1,2 +1 @@ -mvn clean compile assembly:single -mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file +./gradlew clean build -x test diff --git a/build.gradle.kts b/build.gradle.kts index cc164234f..e4c08af60 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -1,4 +1,13 @@ +// the build derives a version with the jgitver plugin out of a tag in the git history. when there is no +// git repo, the jgitver default would be 0.0.0. one can override this version with a parameter. also, permit +// to start the build setting the javac release parameter, no parameter means build for java-17: +// gradle clean build -PjavacRelease=21 +// gradle clean build -PcustomVersion=1.0.0-10-asdf +val customVersion = (project.findProperty("customVersion") ?: "") as String +val javacRelease = (project.findProperty("javacRelease") ?: "21") as String + plugins { + id("fr.brouillard.oss.gradle.jgitver") version "0.9.1" id("jacoco") id("java") id("maven-publish") @@ -10,36 +19,57 @@ repositories { } dependencies { - implementation("org.java-websocket:Java-WebSocket:1.5.1") - implementation("org.jsoup:jsoup:1.8.1") - implementation("org.json:json:20190722") - implementation("commons-configuration:commons-configuration:1.7") - implementation("log4j:log4j:1.2.17") - implementation("commons-cli:commons-cli:1.2") - implementation("commons-io:commons-io:1.3.2") - implementation("org.apache.httpcomponents:httpclient:4.3.6") - implementation("org.apache.httpcomponents:httpmime:4.3.3") - implementation("org.graalvm.js:js:20.1.0") - testImplementation(enforcedPlatform("org.junit:junit-bom:5.6.2")) + implementation("com.lmax:disruptor:3.4.4") + implementation("org.java-websocket:Java-WebSocket:1.5.3") + implementation("org.jsoup:jsoup:1.16.1") + implementation("org.json:json:20211205") + implementation("com.j2html:j2html:1.6.0") + implementation("commons-configuration:commons-configuration:1.10") + implementation("commons-cli:commons-cli:1.5.0") + implementation("commons-io:commons-io:2.13.0") + implementation("org.apache.httpcomponents:httpclient:4.5.14") + implementation("org.apache.httpcomponents:httpmime:4.5.14") + implementation("org.apache.logging.log4j:log4j-api:2.20.0") + implementation("org.apache.logging.log4j:log4j-core:2.20.0") + implementation("com.squareup.okhttp3:okhttp:4.12.0") + implementation("org.graalvm.js:js:22.3.2") + testImplementation(enforcedPlatform("org.junit:junit-bom:5.10.0")) testImplementation("org.junit.jupiter:junit-jupiter") - testImplementation("junit:junit:4.13") + testRuntimeOnly("org.junit.platform:junit-platform-launcher") } group = "com.rarchives.ripme" version = "1.7.94" description = "ripme" -java { - sourceCompatibility = JavaVersion.VERSION_1_8 - targetCompatibility = JavaVersion.VERSION_1_8 +jacoco { + toolVersion = "0.8.11" +} + +jgitver { + gitCommitIDLength = 8 + nonQualifierBranches = "main,master" + useGitCommitID = true +} + +afterEvaluate { + if (customVersion != "") { + project.version = customVersion + } +} + +tasks.compileJava { + options.release.set(Integer.parseInt(javacRelease)) } tasks.withType { duplicatesStrategy = DuplicatesStrategy.INCLUDE manifest { attributes["Main-Class"] = "com.rarchives.ripme.App" + attributes["Implementation-Version"] = archiveVersion + attributes["Multi-Release"] = "true" } - + // To add all of the dependencies otherwise a "NoClassDefFoundError" error from(sourceSets.main.get().output) @@ -59,9 +89,14 @@ publishing { tasks.withType { options.encoding = "UTF-8" + val compilerArgs = options.compilerArgs + compilerArgs.addAll(listOf("-Xlint:deprecation")) } tasks.test { + testLogging { + showStackTraces = true + } useJUnitPlatform { // gradle-6.5.1 not yet allows passing this as parameter, so exclude it excludeTags("flaky","slow") @@ -71,24 +106,44 @@ tasks.test { finalizedBy(tasks.jacocoTestReport) // report is always generated after tests run } -tasks.register("slowTests") { +tasks.register("testAll") { + useJUnitPlatform { + includeTags("any()", "none()") + } +} + +tasks.register("testFlaky") { + useJUnitPlatform { + includeTags("flaky") + } +} + +tasks.register("testSlow") { useJUnitPlatform { includeTags("slow") } } +tasks.register("testTagged") { + useJUnitPlatform { + includeTags("any()") + } +} + // make all archive tasks in the build reproducible tasks.withType().configureEach { isPreserveFileTimestamps = false isReproducibleFileOrder = true } +println("Build directory: ${file(layout.buildDirectory)}") + tasks.jacocoTestReport { dependsOn(tasks.test) // tests are required to run before generating the report reports { - xml.isEnabled = false - csv.isEnabled = false - html.destination = file("${buildDir}/jacocoHtml") + xml.required.set(false) + csv.required.set(false) + html.outputLocation.set(file("${file(layout.buildDirectory)}/jacocoHtml")) } } diff --git a/build.sh b/build.sh index 2f044cde4..d4dbe3b83 100755 --- a/build.sh +++ b/build.sh @@ -1,4 +1,2 @@ #!/usr/bin/env bash -mvn clean compile assembly:single -# Strip the jar of any non-reproducible metadata such as timestamps -mvn io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar \ No newline at end of file +./gradlew clean build -x test diff --git a/deploy.bat b/deploy.bat deleted file mode 100644 index 388ece447..000000000 --- a/deploy.bat +++ /dev/null @@ -1,2 +0,0 @@ -@echo off -powershell -c ".\deploy.ps1 -source (Join-Path target (Get-Item -Path .\target\* -Filter *.jar)[0].Name) -dest ripme.jar" diff --git a/deploy.ps1 b/deploy.ps1 deleted file mode 100644 index 9124c2416..000000000 --- a/deploy.ps1 +++ /dev/null @@ -1,16 +0,0 @@ -Param ( - [Parameter(Mandatory=$True)] - [string]$source, - [Parameter(Mandatory=$True)] - [string]$dest -) - -Copy-Item -Path $source -Destination $dest - -$sourceHash = (Get-FileHash $source -algorithm MD5).Hash -$destHash = (Get-FileHash $dest -algorithm MD5).Hash -if ($sourceHash -eq $destHash) { - Write-Output 'Deployed successfully.' -} else { - Write-Output 'Hash Mismatch: did you close ripme before deploying?' -} diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 62d4c0535..a4b76b953 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index d8442f554..df97d72b8 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.0-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip +networkTimeout=10000 +validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists \ No newline at end of file +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index fbd7c5158..f5feea6d6 100755 --- a/gradlew +++ b/gradlew @@ -1,7 +1,7 @@ -#!/usr/bin/env sh +#!/bin/sh # -# Copyright 2015 the original author or authors. +# Copyright © 2015-2021 the original authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,69 +15,104 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# SPDX-License-Identifier: Apache-2.0 +# ############################################################################## -## -## Gradle start up script for UN*X -## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# ############################################################################## # Attempt to set APP_HOME + # Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s +' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" +MAX_FD=maximum warn () { echo "$*" -} +} >&2 die () { echo echo "$*" echo exit 1 -} +} >&2 # OS specific support (must be 'true' or 'false'). cygwin=false msys=false darwin=false nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar @@ -87,9 +122,9 @@ CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACMD=$JAVA_HOME/jre/sh/java else - JAVACMD="$JAVA_HOME/bin/java" + JAVACMD=$JAVA_HOME/bin/java fi if [ ! -x "$JAVACMD" ] ; then die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME @@ -98,88 +133,120 @@ Please set the JAVA_HOME variable in your environment to match the location of your Java installation." fi else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac fi -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. # For Cygwin or MSYS, switch paths to Windows format before running java -if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) fi - i=`expr $i + 1` + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg done - case $i in - 0) set -- ;; - 1) set -- "$args0" ;; - 2) set -- "$args0" "$args1" ;; - 3) set -- "$args0" "$args1" "$args2" ;; - 4) set -- "$args0" "$args1" "$args2" "$args3" ;; - 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac fi -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=`save "$@"` -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index a9f778a7a..9b42019c7 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -13,8 +13,10 @@ @rem See the License for the specific language governing permissions and @rem limitations under the License. @rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +27,8 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,13 +43,13 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if %ERRORLEVEL% equ 0 goto execute -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail @@ -54,31 +57,16 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line @@ -86,17 +74,19 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/java b/java deleted file mode 100644 index e69de29bb..000000000 diff --git a/patch.py b/patch.py deleted file mode 100644 index aa53755dc..000000000 --- a/patch.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -import subprocess -from hashlib import sha256 - -# This script will: -# - read current version -# - increment patch version -# - update version in a few places -# - insert new line in ripme.json with message -# - build ripme -# - add the hash of the latest binary to ripme.json -# - commit all changes -message = input('message: ') - -# Strip any spaces that might've been entered before the message -message.lstrip() - - -def get_ripme_json(): - with open('ripme.json') as dataFile: - ripmeJson = json.load(dataFile) - return ripmeJson - - -def update_hash(current_hash): - ripmeJson = get_ripme_json() - with open('ripme.json', 'w') as dataFile: - ripmeJson["currentHash"] = current_hash - print(ripmeJson["currentHash"]) - json.dump(ripmeJson, dataFile, indent=4) - - -def update_change_list(message): - ripmeJson = get_ripme_json() - with open('ripme.json', 'w') as dataFile: - ripmeJson["changeList"].insert(0, message) - json.dump(ripmeJson, dataFile, indent=4) - - -currentVersion = get_ripme_json()["latestVersion"] - -print('Current version ' + currentVersion) - -versionFields = currentVersion.split('.') -patchCur = int(versionFields[2]) -patchNext = patchCur + 1 -majorMinor = versionFields[:2] -majorMinor.append(str(patchNext)) -nextVersion = '.'.join(majorMinor) - -print('Updating to ' + nextVersion) - -substrExpr = 's/' + currentVersion + '/' + nextVersion + '/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java']) -subprocess.call(['git', 'grep', 'DEFAULT_VERSION.*' + nextVersion, - 'src/main/java/com/rarchives/ripme/ui/UpdateUtils.java']) - -substrExpr = 's/\\\"latestVersion\\\": \\\"' + currentVersion + '\\\"/\\\"latestVersion\\\": \\\"' + \ - nextVersion + '\\\"/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'ripme.json']) -subprocess.call(['git', 'grep', 'latestVersion', 'ripme.json']) - -substrExpr = 's/' + currentVersion + '/' + nextVersion + '/' -subprocess.call(['sed', '-i', '-e', substrExpr, 'pom.xml']) -subprocess.call(['git', 'grep', '' + nextVersion + '', 'pom.xml']) - -commitMessage = nextVersion + ': ' + message - -update_change_list(commitMessage) - - -print("Building ripme") -subprocess.call(["mvn", "clean", "compile", "assembly:single"]) -print("Stripping jar") -subprocess.call(["mvn", "io.github.zlika:reproducible-build-maven-plugin:0.6:strip-jar"]) -print("Hashing .jar file") -openedFile = open("./target/ripme-{}-jar-with-dependencies.jar".format(nextVersion), "rb") -readFile = openedFile.read() -file_hash = sha256(readFile).hexdigest() -print("Hash is: {}".format(file_hash)) -print("Updating hash") -update_hash(file_hash) -subprocess.call(['git', 'add', '-u']) -subprocess.call(['git', 'commit', '-m', commitMessage]) -subprocess.call(['git', 'tag', nextVersion]) -print("Remember to run `git push origin master` before release.py") diff --git a/pom.xml b/pom.xml deleted file mode 100644 index 0d8364e41..000000000 --- a/pom.xml +++ /dev/null @@ -1,171 +0,0 @@ - - 4.0.0 - com.rarchives.ripme - ripme - jar - 1.7.95 - ripme - http://rip.rarchives.com - - flaky - UTF-8 - - - - - org.junit - junit-bom - 5.6.2 - pom - import - - - - - - org.junit.jupiter - junit-jupiter-api - test - - - org.junit.jupiter - junit-jupiter-engine - test - - - org.junit.vintage - junit-vintage-engine - test - - - - org.jsoup - jsoup - 1.8.1 - - - org.graalvm.js - js - 20.1.0 - - - org.json - json - 20190722 - - - commons-configuration - commons-configuration - 1.7 - - - log4j - log4j - 1.2.17 - - - commons-cli - commons-cli - 1.2 - - - commons-io - commons-io - 1.3.2 - - - org.apache.httpcomponents - httpclient - 4.3.6 - - - org.apache.httpcomponents - httpmime - 4.3.3 - - - org.java-websocket - Java-WebSocket - 1.5.1 - - - - - - org.apache.maven.plugins - maven-site-plugin - 3.7.1 - - - io.github.zlika - reproducible-build-maven-plugin - 0.6 - - - maven-assembly-plugin - - - - com.rarchives.ripme.App - true - true - - - ./config - - - - jar-with-dependencies - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.1 - - 1.8 - 1.8 - - - - org.eluder.coveralls - coveralls-maven-plugin - 4.3.0 - - - - org.jacoco - jacoco-maven-plugin - 0.8.6 - - - prepare-agent - - prepare-agent - - - - - - maven-surefire-plugin - 3.0.0-M5 - - ${excludedGroups} - - - - - - - - org.apache.maven.plugins - maven-surefire-report-plugin - 3.0.0-M5 - - false - - - - - diff --git a/release.py b/release.py deleted file mode 100755 index ad099badc..000000000 --- a/release.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 - -import re - -import os - -import sys -from hashlib import sha256 -from github import Github -import json -import argparse - -parser = argparse.ArgumentParser(description="Make a new ripme release on github") -parser.add_argument("-f", "--file", help="Path to the version of ripme to release") -parser.add_argument("-t", "--token", help="Your github personal access token") -parser.add_argument("-d", "--debug", help="Run in debug mode", action="store_true") -parser.add_argument("-n", "--non-interactive", help="Do not ask for any input from the user", action="store_true") -parser.add_argument("--test", help="Perform a dry run (Do everything but upload new release)", action="store_true") -parser.add_argument("--skip-hash-check", help="Skip hash check (This should only be used for testing)", action="store_true") -args = parser.parse_args() - -try: - # This binds input to raw_input on python2, we do this because input acts like eval on python2 - input = raw_input -except NameError: - pass - - -# Make sure the file the user selected is a jar -def isJar(filename): - if debug: - print("Checking if {} is a jar file".format(filename)) - return filename.endswith("jar") - - -# Returns true if last entry to the "changeList" section of ripme.json is in the format of $number.$number.$number: and -# false if not -def isValidCommitMessage(message): - if debug: - print(r"Checking if {} matches pattern ^\d+\.\d+\.\d+:".format(message)) - pattern = re.compile(r"^\d+\.\d+\.\d+:") - return re.match(pattern, message) - - -# Checks if the update has the name ripme.jar, if not it renames the file -def checkAndRenameFile(path): - """Check if path (a string) points to a ripme.jar. Returns the possibly renamed file path""" - if not path.endswith("ripme.jar"): - print("Specified file is not named ripme.jar, renaming") - new_path = os.path.join(os.path.dirname(path), "ripme.jar") - os.rename(path, new_path) - return new_path - return path - - -ripmeJson = json.loads(open("ripme.json").read()) -fileToUploadPath = checkAndRenameFile(args.file) -InNoninteractiveMode = args.non_interactive -commitMessage = ripmeJson.get("changeList")[0] -releaseVersion = ripmeJson.get("latestVersion") -debug = args.debug -accessToken = args.token -repoOwner = "ripmeapp" -repoName = "ripme" - -if not os.path.isfile(fileToUploadPath): - print("[!] Error: {} does not exist".format(fileToUploadPath)) - sys.exit(1) - -if not isJar(fileToUploadPath): - print("[!] Error: {} is not a jar file!".format(fileToUploadPath)) - sys.exit(1) - -if not isValidCommitMessage(commitMessage): - print("[!] Error: {} is not a valid commit message as it does not start with a version".format(fileToUploadPath)) - sys.exit(1) - - -if not args.skip_hash_check: - if debug: - print("Reading file {}".format(fileToUploadPath)) - ripmeUpdate = open(fileToUploadPath, mode='rb').read() - - # The actual hash of the file on disk - actualHash = sha256(ripmeUpdate).hexdigest() - - # The hash that we expect the update to have - expectedHash = ripmeJson.get("currentHash") - - # Make sure that the hash of the file we're uploading matches the hash in ripme.json. These hashes not matching will - # cause ripme to refuse to install the update for all users who haven't disabled update hash checking - if expectedHash != actualHash: - print("[!] Error: expected hash of file and actual hash differ") - print("[!] Expected hash is {}".format(expectedHash)) - print("[!] Actual hash is {}".format(actualHash)) - sys.exit(1) -else: - print("[*] WARNING: SKIPPING HASH CHECK") -# Ask the user to review the information before we precede -# This only runs in we're in interactive mode -if not InNoninteractiveMode: - print("File path: {}".format(fileToUploadPath)) - print("Release title: {}".format(commitMessage)) - print("Repo: {}/{}".format(repoOwner, repoName)) - input("\nPlease review the information above and ensure it is correct and then press enter") - -if not args.test: - print("Accessing github using token") - g = Github(accessToken) - - print("Creating release") - release = g.get_user(repoOwner).get_repo(repoName).create_git_release(releaseVersion, commitMessage, "") - - print("Uploading file") - release.upload_asset(fileToUploadPath, "ripme.jar") -else: - print("Not uploading release being script was run with --test flag") diff --git a/ripme.json b/ripme.json index dea957c0f..c3a90ca0a 100644 --- a/ripme.json +++ b/ripme.json @@ -1,6 +1,23 @@ { - "currentHash": "008201e406f401b27248277a4188f26203bb9da0170872de900125f8a6c8b558", + "latestVersion": "2.1.10-21-c94a9543", + "currentHash": "782ffec29bd14cfde6d714fa6f76980b3fd7cf96723b1121976134a6a5057e68", "changeList": [ + "2.1.10-21-c94a9543, Imagebam, Unify colons in UI, Motherless, right click menu, rgif fixed", + "2.1.9-7-22e915df, HistoryMenuMouseListener right click menu, Imagefap retry logic for getFullSizedImage(), EightmusesRipper fixed", + "2.1.8-1-f5153de8: jpg3 add, java-21 adjustments.", + "2.1.7-29-b080faae: luciousripper fix, java-21 adjustments.", + "2.1.6-1-68189f27: erome fix.", + "2.1.5-8-ba51d7b: ripme running with java-17.", + "2.1.4-38-836a7494: fixed imagefap ripper.", + "2.1.3-15-1b83dc68: relative path now from working dir to subfolder, allowing images to be put in subfolder with same filename, sanatize reddit titles saved as files, additional logging in AbstractHTMLRipper.", + "2.1.2-23-e5438e85: caching of first page, retry sleep time, nhentai fixed", + "2.1.2-3-ea90b172: better sanitize filenames for windows, save config on update value. reddit, print exceptions in loops and continue.", + "2.1.1-3-536339dd: java-11+ necessary to run, work around non existing working directory.", + "2.0.4-13-03e32cb7: fix vsco, add danbooru.", + "2.0.3: Check new version against ripme2app.", + "2.0.2: Add greek translation, fixed reddit, redgif.", + "2.0.1: Fixed reddit, tujigu, xhamster, imagebam, erome; marked some tests as flaky.", + "2.0.0: Fixed Zizki, WordpressComics, Imagebam; marked some tests as flaky ", "1.7.95: Added porncomixinfo.net; Fixed ripper for HentaiNexus; move l option to before r and R; marked some tests as flaky ", "1.7.94: Added reddit gallery support; Fixed AllporncomicRipper; Fix imagefap ripper; instagramRipper, replaced Nashorn with GraalVM.js", "1.7.93: Fixed Motherless ripper; Fixed e621 ripper; Updated pt_PT translation; Implemented redgifs Ripper; added missing translation to Korean/KR; Fixed elecx ripper; Added ripper for HentaiNexus", @@ -266,6 +283,5 @@ "1.0.4: Fixed spaces-in-directory bug", "1.0.3: Added VK.com ripper", "1.0.1: Added auto-update functionality" - ], - "latestVersion": "1.7.95" -} \ No newline at end of file + ] +} diff --git a/settings.gradle.kts b/settings.gradle.kts index 25d894516..5528f49df 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -1 +1,9 @@ +pluginManagement { + repositories { + mavenLocal() + gradlePluginPortal() + // TODO: remove after new build of compose-jb is published + maven("https://maven.pkg.jetbrains.space/public/p/compose/dev") + } +} rootProject.name = "ripme" diff --git a/src/main/java/com/rarchives/ripme/App.java b/src/main/java/com/rarchives/ripme/App.java index 1952fdda0..2c37fdc95 100644 --- a/src/main/java/com/rarchives/ripme/App.java +++ b/src/main/java/com/rarchives/ripme/App.java @@ -1,13 +1,30 @@ package com.rarchives.ripme; +import com.rarchives.ripme.ripper.AbstractRipper; +import com.rarchives.ripme.ui.History; +import com.rarchives.ripme.ui.HistoryEntry; +import com.rarchives.ripme.ui.MainWindow; +import com.rarchives.ripme.ui.UpdateUtils; +import com.rarchives.ripme.utils.Proxy; +import com.rarchives.ripme.utils.RipUtils; +import com.rarchives.ripme.utils.Utils; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.SystemUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.swing.*; import java.awt.*; -import java.io.File; -import java.io.IOException; import java.io.BufferedReader; -import java.io.FileReader; import java.io.FileNotFoundException; - +import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; @@ -15,25 +32,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Date; - -import javax.swing.SwingUtilities; - -import org.apache.commons.cli.BasicParser; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.lang.SystemUtils; -import org.apache.log4j.Logger; - -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.ui.History; -import com.rarchives.ripme.ui.HistoryEntry; -import com.rarchives.ripme.ui.MainWindow; -import com.rarchives.ripme.ui.UpdateUtils; -import com.rarchives.ripme.utils.Proxy; -import com.rarchives.ripme.utils.RipUtils; -import com.rarchives.ripme.utils.Utils; +import java.util.stream.Stream; /** * Entry point to application. @@ -44,7 +43,7 @@ */ public class App { - public static final Logger logger = Logger.getLogger(App.class); + public static final Logger logger = LogManager.getLogger(App.class); public static String stringToAppendToFoldername = null; private static final History HISTORY = new History(); @@ -54,11 +53,11 @@ public class App { * * @param args Array of command line arguments. */ - public static void main(String[] args) throws MalformedURLException { + public static void main(String[] args) throws IOException { CommandLine cl = getArgs(args); if (args.length > 0 && cl.hasOption('v')){ - logger.info(UpdateUtils.getThisJarVersion()); + System.out.println(UpdateUtils.getThisJarVersion()); System.exit(0); } @@ -113,7 +112,7 @@ private static void rip(URL url) throws Exception { entry.dir = ripper.getWorkingDir().getAbsolutePath(); try { entry.title = ripper.getAlbumTitle(ripper.getURL()); - } catch (MalformedURLException e) { } + } catch (MalformedURLException ignored) { } HISTORY.add(entry); } } @@ -122,7 +121,7 @@ private static void rip(URL url) throws Exception { * For dealing with command-line arguments. * @param args Array of Command-line arguments */ - private static void handleArguments(String[] args) { + private static void handleArguments(String[] args) throws IOException { CommandLine cl = getArgs(args); //Help (list commands) @@ -185,7 +184,7 @@ private static void handleArguments(String[] args) { } for (HistoryEntry entry : HISTORY.toList()) { try { - URL url = new URL(entry.url); + URL url = new URI(entry.url).toURL(); rip(url); } catch (Exception e) { logger.error("[!] Failed to rip URL " + entry.url, e); @@ -214,7 +213,7 @@ private static void handleArguments(String[] args) { if (entry.selected) { added++; try { - URL url = new URL(entry.url); + URL url = new URI(entry.url).toURL(); rip(url); } catch (Exception e) { logger.error("[!] Failed to rip URL " + entry.url, e); @@ -253,9 +252,9 @@ private static void handleArguments(String[] args) { //Read URLs from File if (cl.hasOption('f')) { - String filename = cl.getOptionValue('f'); + Path urlfile = Paths.get(cl.getOptionValue('f')); - try (BufferedReader br = new BufferedReader(new FileReader(filename))) { + try (BufferedReader br = Files.newBufferedReader(urlfile)) { String url; while ((url = br.readLine()) != null) { if (url.startsWith("//") || url.startsWith("#")) { @@ -288,11 +287,11 @@ private static void handleArguments(String[] args) { /** * Attempt to rip targetURL. * @param targetURL URL to rip - * @param saveConfig Whether or not you want to save the config (?) + * @param saveConfig Whether you want to save the config (?) */ private static void ripURL(String targetURL, boolean saveConfig) { try { - URL url = new URL(targetURL); + URL url = new URI(targetURL).toURL(); rip(url); saveHistory(); } catch (MalformedURLException e) { @@ -337,7 +336,7 @@ private static Options getOptions() { * @return CommandLine object containing arguments. */ private static CommandLine getArgs(String[] args) { - BasicParser parser = new BasicParser(); + var parser = new DefaultParser(); try { return parser.parse(getOptions(), args, false); } catch (ParseException e) { @@ -349,19 +348,18 @@ private static CommandLine getArgs(String[] args) { /** * Loads history from history file into memory. - * @see MainWindow.loadHistory */ - private static void loadHistory() { - File historyFile = new File(Utils.getConfigDir() + File.separator + "history.json"); + private static void loadHistory() throws IOException { + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); HISTORY.clear(); - if (historyFile.exists()) { + if (Files.exists(historyFile)) { try { - logger.info("Loading history from " + historyFile.getCanonicalPath()); - HISTORY.fromFile(historyFile.getCanonicalPath()); + logger.info("Loading history from " + historyFile); + HISTORY.fromFile(historyFile.toString()); } catch (IOException e) { logger.error("Failed to load history from file " + historyFile, e); logger.warn( - "RipMe failed to load the history file at " + historyFile.getAbsolutePath() + "\n\n" + + "RipMe failed to load the history file at " + historyFile + "\n\n" + "Error: " + e.getMessage() + "\n\n" + "Closing RipMe will automatically overwrite the contents of this file,\n" + "so you may want to back the file up before closing RipMe!"); @@ -372,16 +370,18 @@ private static void loadHistory() { if (HISTORY.toList().isEmpty()) { // Loaded from config, still no entries. // Guess rip history based on rip folder - String[] dirs = Utils.getWorkingDirectory().list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory()); - for (String dir : dirs) { - String url = RipUtils.urlFromDirectoryName(dir); + Stream stream = Files.list(Utils.getWorkingDirectory()) + .filter(Files::isDirectory); + + stream.forEach(dir -> { + String url = RipUtils.urlFromDirectoryName(dir.toString()); if (url != null) { // We found one, add it to history HistoryEntry entry = new HistoryEntry(); entry.url = url; HISTORY.add(entry); } - } + }); } } } @@ -390,7 +390,7 @@ private static void loadHistory() { * @see MainWindow.saveHistory */ private static void saveHistory() { - Path historyFile = Paths.get(Utils.getConfigDir() + File.separator + "history.json"); + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); try { if (!Files.exists(historyFile)) { Files.createDirectories(historyFile.getParent()); diff --git a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java index 3e3fdb189..8c26c903b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java @@ -2,29 +2,37 @@ import java.io.File; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; +import java.io.UnsupportedEncodingException; +import java.net.*; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; + import org.jsoup.nodes.Document; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Utils; import com.rarchives.ripme.ui.MainWindow; import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.utils.Http; /** * Simplified ripper, designed for ripping from sites by parsing HTML. */ public abstract class AbstractHTMLRipper extends AbstractRipper { - private Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); - private Map itemsErrored = Collections.synchronizedMap(new HashMap()); + private final Map itemsPending = Collections.synchronizedMap(new HashMap<>()); + private final Map itemsCompleted = Collections.synchronizedMap(new HashMap<>()); + private final Map itemsErrored = Collections.synchronizedMap(new HashMap<>()); + Document cachedFirstPage; protected AbstractHTMLRipper(URL url) throws IOException { super(url); @@ -33,11 +41,21 @@ protected AbstractHTMLRipper(URL url) throws IOException { protected abstract String getDomain(); public abstract String getHost(); - protected abstract Document getFirstPage() throws IOException; - public Document getNextPage(Document doc) throws IOException { + protected Document getFirstPage() throws IOException, URISyntaxException { + return Http.url(url).get(); + } + + protected Document getCachedFirstPage() throws IOException, URISyntaxException { + if (cachedFirstPage == null) { + cachedFirstPage = getFirstPage(); + } + return cachedFirstPage; + } + + public Document getNextPage(Document doc) throws IOException, URISyntaxException { return null; } - protected abstract List getURLsFromPage(Document page); + protected abstract List getURLsFromPage(Document page) throws UnsupportedEncodingException; protected List getDescriptionsFromPage(Document doc) throws IOException { throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function? } @@ -56,7 +74,7 @@ public boolean canRip(URL url) { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { return url; } protected boolean hasDescriptionSupport() { @@ -86,12 +104,12 @@ protected boolean pageContainsAlbums(URL url) { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { int index = 0; int textindex = 0; LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); - Document doc = getFirstPage(); + var doc = getCachedFirstPage(); if (hasQueueSupport() && pageContainsAlbums(this.url)) { List urls = getAlbumsToQueue(doc); @@ -104,11 +122,28 @@ public void rip() throws IOException { LOGGER.debug("Adding items from " + this.url + " to queue"); } + List doclocation = new ArrayList<>(); + + LOGGER.info("Got doc location " + doc.location()); + while (doc != null) { + + LOGGER.info("Processing a doc..."); + + // catch if we saw a doc location already, save the ones seen in a list + if (doclocation.contains(doc.location())) { + LOGGER.info("Already processed location " + doc.location() + " breaking"); + break; + } + doclocation.add(doc.location()); + if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) { sendUpdate(STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); break; } + + LOGGER.info("retrieving urls from doc"); + List imageURLs = getURLsFromPage(doc); // If hasASAPRipping() returns true then the ripper will handle downloading the files // if not it's done in the following block of code @@ -126,9 +161,9 @@ public void rip() throws IOException { for (String imageURL : imageURLs) { index += 1; - LOGGER.debug("Found image url #" + index + ": " + imageURL); + LOGGER.debug("Found image url #" + index + ": '" + imageURL + "'"); downloadURL(new URL(imageURL), index); - if (isStopped()) { + if (isStopped() || isThisATest()) { break; } } @@ -139,7 +174,7 @@ public void rip() throws IOException { if (!textURLs.isEmpty()) { LOGGER.debug("Found description link(s) from " + doc.location()); for (String textURL : textURLs) { - if (isStopped()) { + if (isStopped() || isThisATest()) { break; } textindex += 1; @@ -195,7 +230,7 @@ public void rip() throws IOException { */ private String fileNameFromURL(URL url) { String saveAs = url.toExternalForm(); - if (saveAs.substring(saveAs.length() - 1) == "/") { saveAs = saveAs.substring(0,saveAs.length() - 1) ;} + if (saveAs.substring(saveAs.length() - 1).equals("/")) { saveAs = saveAs.substring(0,saveAs.length() - 1) ;} saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1); if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); } if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); } @@ -250,7 +285,7 @@ private boolean saveText(URL url, String subdirectory, String text, int index, S } LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); if (!saveFileAs.getParentFile().exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + LOGGER.info("[+] Creating directory: " + saveFileAs.getParent()); saveFileAs.getParentFile().mkdirs(); } return true; @@ -281,22 +316,22 @@ protected boolean allowDuplicates() { } @Override - /** - * Returns total amount of files attempted. + /* + Returns total amount of files attempted. */ public int getCount() { return itemsCompleted.size() + itemsErrored.size(); } @Override - /** - * Queues multiple URLs of single images to download from a single Album URL + /* + Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -307,20 +342,24 @@ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); + private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); private Map itemsErrored = Collections.synchronizedMap(new HashMap()); protected AbstractJSONRipper(URL url) throws IOException { @@ -31,8 +37,8 @@ protected AbstractJSONRipper(URL url) throws IOException { @Override public abstract String getHost(); - protected abstract JSONObject getFirstPage() throws IOException; - protected JSONObject getNextPage(JSONObject doc) throws IOException { + protected abstract JSONObject getFirstPage() throws IOException, URISyntaxException; + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { throw new IOException("getNextPage not implemented"); } protected abstract List getURLsFromJSON(JSONObject json); @@ -51,12 +57,12 @@ public boolean canRip(URL url) { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { return url; } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { int index = 0; LOGGER.info("Retrieving " + this.url); sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm()); @@ -98,7 +104,7 @@ public void rip() throws IOException { try { sendUpdate(STATUS.LOADING_RESOURCE, "next page"); json = getNextPage(json); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.info("Can't get next page: " + e.getMessage()); break; } @@ -140,11 +146,11 @@ public int getCount() { /** * Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -155,20 +161,24 @@ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, + protected abstract boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME); /** @@ -234,9 +251,10 @@ protected abstract boolean addURLToDownload(URL url, File saveAs, String referre */ protected boolean addURLToDownload(URL url, Map options, Map cookies) { // Bit of a hack but this lets us pass a bool using a map - boolean useMIME = options.getOrDefault("getFileExtFromMIME", "false").toLowerCase().equals("true"); - return addURLToDownload(url, options.getOrDefault("prefix", ""), options.getOrDefault("subdirectory", ""), options.getOrDefault("referrer", null), - cookies, options.getOrDefault("fileName", null), options.getOrDefault("extension", null), useMIME); + boolean useMIME = options.getOrDefault("getFileExtFromMIME", "false").equalsIgnoreCase("true"); + return addURLToDownload(url, options.getOrDefault("subdirectory", ""), options.getOrDefault("referrer", null), cookies, + options.getOrDefault("prefix", ""), options.getOrDefault("fileName", null), options.getOrDefault("extension", null), + useMIME); } @@ -274,7 +292,7 @@ protected boolean addURLToDownload(URL url, Map options) { * True if downloaded successfully * False if failed to download */ - protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension, Boolean getFileExtFromMIME) { + protected boolean addURLToDownload(URL url, String subdirectory, String referrer, Map cookies, String prefix, String fileName, String extension, Boolean getFileExtFromMIME) { // A common bug is rippers adding urls that are just "http:". This rejects said urls if (url.toExternalForm().equals("http:") || url.toExternalForm().equals("https:")) { LOGGER.info(url.toExternalForm() + " is a invalid url amd will be changed"); @@ -285,8 +303,8 @@ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, if (url.toExternalForm().contains(" ")) { // If for some reason the url with all spaces encoded as %20 is malformed print an error try { - url = new URL(url.toExternalForm().replaceAll(" ", "%20")); - } catch (MalformedURLException e) { + url = new URI(url.toExternalForm().replaceAll(" ", "%20")).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Unable to remove spaces from url\nURL: " + url.toExternalForm()); e.printStackTrace(); } @@ -305,34 +323,19 @@ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, LOGGER.debug("Ripper has been stopped"); return false; } - LOGGER.debug("url: " + url + ", prefix: " + prefix + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", fileName: " + fileName); - String saveAs = getFileName(url, fileName, extension); - File saveFileAs; + LOGGER.debug("url: " + url + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: " + cookies + ", prefix: " + prefix + ", fileName: " + fileName); + Path saveAs; try { - if (!subdirectory.equals("")) { - subdirectory = Utils.filesystemSafe(subdirectory); - subdirectory = File.separator + subdirectory; - } - prefix = Utils.filesystemSanitized(prefix); - String topFolderName = workingDir.getCanonicalPath(); - if (App.stringToAppendToFoldername != null) { - topFolderName = topFolderName + App.stringToAppendToFoldername; + saveAs = getFilePath(url, subdirectory, prefix, fileName, extension); + LOGGER.debug("Downloading " + url + " to " + saveAs); + if (!Files.exists(saveAs.getParent())) { + LOGGER.info("[+] Creating directory: " + saveAs.getParent()); + Files.createDirectories(saveAs.getParent()); } - saveFileAs = new File( - topFolderName - + subdirectory - + File.separator - + prefix - + saveAs); } catch (IOException e) { LOGGER.error("[!] Error creating save file path for URL '" + url + "':", e); return false; } - LOGGER.debug("Downloading " + url + " to " + saveFileAs); - if (!saveFileAs.getParentFile().exists()) { - LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); - saveFileAs.getParentFile().mkdirs(); - } if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) { LOGGER.info("Writing " + url.toExternalForm() + " to file"); try { @@ -341,11 +344,11 @@ protected boolean addURLToDownload(URL url, String prefix, String subdirectory, LOGGER.debug("Unable to write URL history file"); } } - return addURLToDownload(url, saveFileAs, referrer, cookies, getFileExtFromMIME); + return addURLToDownload(url, saveAs, referrer, cookies, getFileExtFromMIME); } protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName, String extension) { - return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, extension, false); + return addURLToDownload(url, subdirectory, referrer, cookies, prefix, fileName, extension, false); } protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer, Map cookies, String fileName) { @@ -384,33 +387,53 @@ protected boolean addURLToDownload(URL url, String prefix) { return addURLToDownload(url, prefix, ""); } - public static String getFileName(URL url, String fileName, String extension) { - String saveAs; - if (fileName != null) { - saveAs = fileName; - } else { - saveAs = url.toExternalForm(); - saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1); + public Path getFilePath(URL url, String subdir, String prefix, String fileName, String extension) throws IOException { + // construct the path: workingdir + subdir + prefix + filename + extension + // save into working dir + Path filepath = Paths.get(workingDir.getCanonicalPath()); + + if (null != App.stringToAppendToFoldername) + filepath = filepath.resolveSibling(filepath.getFileName() + App.stringToAppendToFoldername); + + if (null != subdir && !subdir.trim().isEmpty()) + filepath = filepath.resolve(Utils.filesystemSafe(subdir)); + + filepath = filepath.resolve(getFileName(url, prefix, fileName, extension)); + return filepath; + } + + public static String getFileName(URL url, String prefix, String fileName, String extension) { + // retrieve filename from URL if not passed + if (fileName == null || fileName.trim().isEmpty()) { + fileName = url.toExternalForm(); + fileName = fileName.substring(fileName.lastIndexOf('/')+1); + } + if (fileName.indexOf('?') >= 0) { fileName = fileName.substring(0, fileName.indexOf('?')); } + if (fileName.indexOf('#') >= 0) { fileName = fileName.substring(0, fileName.indexOf('#')); } + if (fileName.indexOf('&') >= 0) { fileName = fileName.substring(0, fileName.indexOf('&')); } + if (fileName.indexOf(':') >= 0) { fileName = fileName.substring(0, fileName.indexOf(':')); } + + // add prefix + if (prefix != null && !prefix.trim().isEmpty()) { + fileName = prefix + fileName; } - if (extension == null) { + + // retrieve extension from URL if not passed, no extension if nothing found + if (extension == null || extension.trim().isEmpty()) { // Get the extension of the file String[] lastBitOfURL = url.toExternalForm().split("/"); String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split("."); if (lastBit.length != 0) { extension = lastBit[lastBit.length - 1]; - saveAs = saveAs + "." + extension; } } - - if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); } - if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); } - if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); } - if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); } + // if extension is passed or found, add it if (extension != null) { - saveAs = saveAs + "." + extension; + fileName = fileName + "." + extension; } - return saveAs; + // make sure filename is not too long and has no unsupported chars + return Utils.sanitizeSaveAs(fileName); } @@ -443,20 +466,16 @@ public void retrievingSource(String url) { * @param saveAs * Where the downloaded file is stored. */ - public abstract void downloadCompleted(URL url, File saveAs); + public abstract void downloadCompleted(URL url, Path saveAs); /** * Notifies observers that a file could not be downloaded (includes a reason). - * @param url - * @param reason */ public abstract void downloadErrored(URL url, String reason); /** * Notify observers that a download could not be completed, * but was not technically an "error". - * @param url - * @param file */ - public abstract void downloadExists(URL url, File file); + public abstract void downloadExists(URL url, Path file); /** * @return Number of files downloaded. @@ -478,17 +497,17 @@ void checkIfComplete() { completed = true; LOGGER.info(" Rip completed!"); - RipStatusComplete rsc = new RipStatusComplete(workingDir, getCount()); + RipStatusComplete rsc = new RipStatusComplete(workingDir.toPath(), getCount()); RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc); observer.update(this, msg); - Logger rootLogger = Logger.getRootLogger(); - FileAppender fa = (FileAppender) rootLogger.getAppender("FILE"); - if (fa != null) { - LOGGER.debug("Changing log file back to 'ripme.log'"); - fa.setFile("ripme.log"); - fa.activateOptions(); - } + // we do not care if the rollingfileappender is active, just change the logfile in case + // TODO - does not work. +// System.setProperty("logFilename", "ripme.log"); +// LOGGER.debug("Changing log file back to 'ripme.log'"); +// LoggerContext ctx = (LoggerContext) LogManager.getContext(false); +// ctx.reconfigure(); + if (Utils.getConfigBoolean("urls_only.save", false)) { String urlFile = this.workingDir + File.separator + "urls.txt"; try { @@ -519,7 +538,7 @@ public File getWorkingDir() { } @Override - public abstract void setWorkingDir(URL url) throws IOException; + public abstract void setWorkingDir(URL url) throws IOException, URISyntaxException; /** * @@ -532,8 +551,12 @@ public File getWorkingDir() { * @throws MalformedURLException * If any of those damned URLs gets malformed. */ - public String getAlbumTitle(URL url) throws MalformedURLException { - return getHost() + "_" + getGID(url); + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { + try { + return getHost() + "_" + getGID(url); + } catch (URISyntaxException e) { + throw new MalformedURLException(e.getMessage()); + } } /** @@ -572,7 +595,6 @@ public static AbstractRipper getRipper(URL url) throws Exception { * The package name. * @return * List of constructors for all eligible Rippers. - * @throws Exception */ public static List> getRipperConstructors(String pkg) throws Exception { List> constructors = new ArrayList<>(); @@ -586,8 +608,7 @@ public static List> getRipperConstructors(String pkg) throws Exce /** * Sends an update message to the relevant observer(s) on this ripper. - * @param status - * @param message + * @param status */ public void sendUpdate(STATUS status, Object message) { if (observer == null) { @@ -679,4 +700,18 @@ protected static boolean isThisATest() { protected boolean useByteProgessBar() { return false;} // If true ripme will try to resume a broken download for this ripper protected boolean tryResumeDownload() { return false;} -} + + protected boolean shouldIgnoreURL(URL url) { + final String[] ignoredExtensions = Utils.getConfigStringArray("download.ignore_extensions"); + if (ignoredExtensions == null || ignoredExtensions.length == 0) return false; // nothing ignored + String[] pathElements = url.getPath().split("\\."); + if (pathElements.length == 0) return false; // no extension, can't filter + String extension = pathElements[pathElements.length - 1]; + for (String ignoredExtension : ignoredExtensions) { + if (ignoredExtension.equalsIgnoreCase(extension)) { + return true; + } + } + return false; + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java index f433e77f8..f73dba7a2 100644 --- a/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java @@ -1,27 +1,33 @@ package com.rarchives.ripme.ripper; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; +import com.rarchives.ripme.utils.Utils; + import java.io.File; -import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; import java.util.Collections; import java.util.HashMap; import java.util.Map; -import com.rarchives.ripme.ui.RipStatusMessage; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; -import com.rarchives.ripme.utils.Utils; - // Should this file even exist? It does the same thing as abstractHTML ripper /**' * For ripping delicious albums off the interwebz. + * @deprecated Use AbstractHTMLRipper instead. */ public abstract class AlbumRipper extends AbstractRipper { private Map itemsPending = Collections.synchronizedMap(new HashMap()); - private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); + private Map itemsCompleted = Collections.synchronizedMap(new HashMap()); private Map itemsErrored = Collections.synchronizedMap(new HashMap()); protected AlbumRipper(URL url) throws IOException { @@ -29,10 +35,10 @@ protected AlbumRipper(URL url) throws IOException { } public abstract boolean canRip(URL url); - public abstract URL sanitizeURL(URL url) throws MalformedURLException; + public abstract URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException; public abstract void rip() throws IOException; public abstract String getHost(); - public abstract String getGID(URL url) throws MalformedURLException; + public abstract String getGID(URL url) throws MalformedURLException, URISyntaxException; protected boolean allowDuplicates() { return false; @@ -50,11 +56,11 @@ public int getCount() { /** * Queues multiple URLs of single images to download from a single Album URL */ - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { - // Only download one file if this is a test. - if (super.isThisATest() && - (itemsPending.size() > 0 || itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + // Only download one file if this is a test. + if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) { stop(); + itemsPending.clear(); return false; } if (!allowDuplicates() @@ -65,20 +71,24 @@ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies = new HashMap<>(); - private URL url; + private final URL url; private File saveAs; - private String prettySaveAs; - private AbstractRipper observer; - private int retries; - private Boolean getFileExtFromMIME; + private final String prettySaveAs; + private final AbstractRipper observer; + private final int retries; + private final Boolean getFileExtFromMIME; private final int TIMEOUT; + private final int retrySleep; public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) { super(); this.url = url; this.saveAs = saveAs; - this.prettySaveAs = Utils.removeCWD(saveAs); + this.prettySaveAs = Utils.removeCWD(saveAs.toPath()); this.observer = observer; this.retries = Utils.getConfigInteger("download.retries", 1); this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000); + this.retrySleep = Utils.getConfigInteger("download.retry.sleep", 0); this.getFileExtFromMIME = getFileExtFromMIME; } @@ -61,12 +61,13 @@ public void setCookies(Map cookies) { * Attempts to download the file. Retries as needed. Notifies observers upon * completion/error/warn. */ + @Override public void run() { // First thing we make sure the file name doesn't have any illegal chars in it saveAs = new File( saveAs.getParentFile().getAbsolutePath() + File.separator + Utils.sanitizeSaveAs(saveAs.getName())); long fileSize = 0; - int bytesTotal = 0; + int bytesTotal; int bytesDownloaded = 0; if (saveAs.exists() && observer.tryResumeDownload()) { fileSize = saveAs.length(); @@ -78,15 +79,15 @@ public void run() { return; } if (saveAs.exists() && !observer.tryResumeDownload() && !getFileExtFromMIME - || Utils.fuzzyExists(new File(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME + || Utils.fuzzyExists(Paths.get(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME && !observer.tryResumeDownload()) { if (Utils.getConfigBoolean("file.overwrite", false)) { logger.info("[!] " + Utils.getLocalizedString("deleting.existing.file") + prettySaveAs); - saveAs.delete(); + if (!saveAs.delete()) logger.error("could not delete existing file: " + saveAs.getAbsolutePath()); } else { logger.info("[!] " + Utils.getLocalizedString("skipping") + " " + url + " -- " + Utils.getLocalizedString("file.already.exists") + ": " + prettySaveAs); - observer.downloadExists(url, saveAs); + observer.downloadExists(url, saveAs.toPath()); return; } } @@ -95,8 +96,6 @@ public void run() { int tries = 0; // Number of attempts to download do { tries += 1; - InputStream bis = null; - OutputStream fos = null; try { logger.info(" Downloading file: " + urlToDownload + (tries > 0 ? " Retry #" + tries : "")); observer.sendUpdate(STATUS.DOWNLOAD_STARTED, url.toExternalForm()); @@ -119,14 +118,14 @@ public void run() { huc.setRequestProperty("Referer", referrer); // Sic } huc.setRequestProperty("User-agent", AbstractRipper.USER_AGENT); - String cookie = ""; + StringBuilder cookie = new StringBuilder(); for (String key : cookies.keySet()) { - if (!cookie.equals("")) { - cookie += "; "; + if (!cookie.toString().equals("")) { + cookie.append("; "); } - cookie += key + "=" + cookies.get(key); + cookie.append(key).append("=").append(cookies.get(key)); } - huc.setRequestProperty("Cookie", cookie); + huc.setRequestProperty("Cookie", cookie.toString()); if (observer.tryResumeDownload()) { if (fileSize != 0) { huc.setRequestProperty("Range", "bytes=" + fileSize + "-"); @@ -150,7 +149,7 @@ public void run() { redirected = true; } String location = huc.getHeaderField("Location"); - urlToDownload = new URL(location); + urlToDownload = new URI(location).toURL(); // Throw exception so download can be retried throw new IOException("Redirect status code " + statusCode + " - redirect to " + location); } @@ -184,6 +183,7 @@ public void run() { } // Save file + InputStream bis; bis = new BufferedInputStream(huc.getInputStream()); // Check if we should get the file ext from the MIME type @@ -209,6 +209,7 @@ public void run() { } } // If we're resuming a download we append data to the existing file + OutputStream fos = null; if (statusCode == 206) { fos = new FileOutputStream(saveAs, true); } else { @@ -235,9 +236,11 @@ public void run() { } else if (saveAs.getAbsolutePath().length() > 259 && Utils.isWindows()) { // This if is for when the file path has gone above 260 chars which windows does // not allow - fos = new FileOutputStream( + fos = Files.newOutputStream( Utils.shortenSaveAsWindows(saveAs.getParentFile().getPath(), saveAs.getName())); + assert fos != null: "After shortenSaveAsWindows: " + saveAs.getAbsolutePath(); } + assert fos != null: e.getStackTrace(); } } byte[] data = new byte[1024 * 256]; @@ -278,7 +281,7 @@ public void run() { "HTTP status code " + hse.getStatusCode() + " while downloading " + url.toExternalForm()); return; } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.debug("IOException", e); logger.error("[!] " + Utils.getLocalizedString("exception.while.downloading.file") + ": " + url + " - " + e.getMessage()); @@ -289,20 +292,6 @@ public void run() { Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm()); return; - }finally { - // Close any open streams - try { - if (bis != null) { - bis.close(); - } - } catch (IOException e) { - } - try { - if (fos != null) { - fos.close(); - } - } catch (IOException e) { - } } if (tries > this.retries) { logger.error("[!] " + Utils.getLocalizedString("exceeded.maximum.retries") + " (" + this.retries @@ -310,9 +299,13 @@ public void run() { observer.downloadErrored(url, Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm()); return; + } else { + if (retrySleep > 0) { + Utils.sleep(retrySleep); + } } } while (true); - observer.downloadCompleted(url, saveAs); + observer.downloadCompleted(url, saveAs.toPath()); logger.info("[+] Saved " + url + " as " + this.prettySaveAs); } diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java b/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java index a811c98a2..8ae43743f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java +++ b/src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java @@ -4,16 +4,16 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.log4j.Logger; - import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * Simple wrapper around a FixedThreadPool. */ public class DownloadThreadPool { - private static final Logger logger = Logger.getLogger(DownloadThreadPool.class); + private static final Logger logger = LogManager.getLogger(DownloadThreadPool.class); private ThreadPoolExecutor threadPool = null; public DownloadThreadPool() { @@ -35,10 +35,10 @@ private void initialize(String threadPoolName) { } /** * For adding threads to execution pool. - * @param t + * @param t * Thread to be added. */ - public void addThread(Thread t) { + public void addThread(Runnable t) { threadPool.execute(t); } diff --git a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java index ef55e54ec..9430adce3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java +++ b/src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java @@ -1,36 +1,36 @@ package com.rarchives.ripme.ripper; import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; import javax.net.ssl.HttpsURLConnection; -import org.apache.log4j.Logger; - import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * Thread for downloading files. * Includes retry logic, observer notifications, and other goodies. */ -class DownloadVideoThread extends Thread { +class DownloadVideoThread implements Runnable { - private static final Logger logger = Logger.getLogger(DownloadVideoThread.class); + private static final Logger logger = LogManager.getLogger(DownloadVideoThread.class); - private URL url; - private File saveAs; - private String prettySaveAs; - private AbstractRipper observer; - private int retries; + private final URL url; + private final Path saveAs; + private final String prettySaveAs; + private final AbstractRipper observer; + private final int retries; - public DownloadVideoThread(URL url, File saveAs, AbstractRipper observer) { + public DownloadVideoThread(URL url, Path saveAs, AbstractRipper observer) { super(); this.url = url; this.saveAs = saveAs; @@ -43,6 +43,7 @@ public DownloadVideoThread(URL url, File saveAs, AbstractRipper observer) { * Attempts to download the file. Retries as needed. * Notifies observers upon completion/error/warn. */ + @Override public void run() { try { observer.stopCheck(); @@ -50,10 +51,14 @@ public void run() { observer.downloadErrored(url, "Download interrupted"); return; } - if (saveAs.exists()) { + if (Files.exists(saveAs)) { if (Utils.getConfigBoolean("file.overwrite", false)) { logger.info("[!] Deleting existing file" + prettySaveAs); - saveAs.delete(); + try { + Files.delete(saveAs); + } catch (IOException e) { + e.printStackTrace(); + } } else { logger.info("[!] Skipping " + url + " -- file already exists: " + prettySaveAs); observer.downloadExists(url, saveAs); @@ -100,7 +105,7 @@ public void run() { huc.connect(); // Check status code bis = new BufferedInputStream(huc.getInputStream()); - fos = new FileOutputStream(saveAs); + fos = Files.newOutputStream(saveAs); while ( (bytesRead = bis.read(data)) != -1) { try { observer.stopCheck(); @@ -122,10 +127,10 @@ public void run() { // Close any open streams try { if (bis != null) { bis.close(); } - } catch (IOException e) { } + } catch (IOException ignored) { } try { if (fos != null) { fos.close(); } - } catch (IOException e) { } + } catch (IOException ignored) { } } if (tries > this.retries) { logger.error("[!] Exceeded maximum retries (" + this.retries + ") for URL " + url); diff --git a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java index 550209c08..824d639ec 100644 --- a/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java +++ b/src/main/java/com/rarchives/ripme/ripper/RipperInterface.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; /** @@ -11,10 +12,10 @@ * (cheers!) */ interface RipperInterface { - void rip() throws IOException; + void rip() throws IOException, URISyntaxException; boolean canRip(URL url); - URL sanitizeURL(URL url) throws MalformedURLException; - void setWorkingDir(URL url) throws IOException; + URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException; + void setWorkingDir(URL url) throws IOException, URISyntaxException; String getHost(); - String getGID(URL url) throws MalformedURLException; + String getGID(URL url) throws MalformedURLException, URISyntaxException; } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java index 4fb0f32aa..014998fa0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/VideoRipper.java @@ -8,7 +8,9 @@ import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; import java.util.Map; @@ -21,7 +23,7 @@ protected VideoRipper(URL url) throws IOException { super(url); } - public abstract void rip() throws IOException; + public abstract void rip() throws IOException, URISyntaxException; public abstract String getHost(); @@ -43,10 +45,10 @@ public String getAlbumTitle(URL url) { } @Override - public boolean addURLToDownload(URL url, File saveAs) { + public boolean addURLToDownload(URL url, Path saveAs) { if (Utils.getConfigBoolean("urls_only.save", false)) { // Output URL to file - String urlFile = this.workingDir + File.separator + "urls.txt"; + String urlFile = this.workingDir + "/urls.txt"; try (FileWriter fw = new FileWriter(urlFile, true)) { fw.write(url.toExternalForm()); @@ -66,13 +68,17 @@ public boolean addURLToDownload(URL url, File saveAs) { this.url = url; return true; } + if (shouldIgnoreURL(url)) { + sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension"); + return false; + } threadPool.addThread(new DownloadVideoThread(url, saveAs, this)); } return true; } @Override - public boolean addURLToDownload(URL url, File saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { + public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map cookies, Boolean getFileExtFromMIME) { return addURLToDownload(url, saveAs); } @@ -83,7 +89,9 @@ public boolean addURLToDownload(URL url, File saveAs, String referrer, Map getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java index a5fbbd0f2..da8c7bd74 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class AllporncomicRipper extends AbstractHTMLRipper { @@ -46,12 +45,6 @@ public String getGID(URL url) throws MalformedURLException { "allporncomic.com/TITLE/CHAPTER - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java index 66455861b..bc824769b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -50,7 +52,7 @@ public String getGID(URL url) throws MalformedURLException { try { // groupData = Http.url(albumURL.getLocation()).getJSON(); groupData = getJson(albumURL.getLocation()); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { throw new MalformedURLException("Couldn't load JSON from " + albumURL.getLocation()); } return groupData.getString("title"); @@ -62,7 +64,7 @@ public String getGID(URL url) throws MalformedURLException { try { // groupData = Http.url(userInfoURL).getJSON(); groupData = getJson(userInfoURL); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { throw new MalformedURLException("Couldn't load JSON from " + userInfoURL); } return groupData.getString("full_name"); @@ -74,7 +76,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - protected JSONObject getFirstPage() throws IOException { + protected JSONObject getFirstPage() throws IOException, URISyntaxException { if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) { // URL points to JSON of a single project, just return it // return Http.url(albumURL.getLocation()).getJSON(); @@ -90,7 +92,7 @@ protected JSONObject getFirstPage() throws IOException { if (albumContent.getInt("total_count") > 0) { // Get JSON of the first project and return it JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(0); - ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink"))); + ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL()); // return Http.url(projectURL.getLocation()).getJSON(); return getJson(projectURL.getLocation()); } @@ -100,7 +102,7 @@ protected JSONObject getFirstPage() throws IOException { } @Override - protected JSONObject getNextPage(JSONObject doc) throws IOException { + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) { // Initialize the page number if it hasn't been initialized already if (projectPageNumber == null) { @@ -117,7 +119,7 @@ protected JSONObject getNextPage(JSONObject doc) throws IOException { projectIndex = 0; } - Integer currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1); + int currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1); // JSONObject albumContent = Http.url(albumURL.getLocation() + "?page=" + // projectPageNumber).getJSON(); JSONObject albumContent = getJson(albumURL.getLocation() + "?page=" + projectPageNumber); @@ -125,7 +127,7 @@ protected JSONObject getNextPage(JSONObject doc) throws IOException { if (albumContent.getInt("total_count") > currentProject) { // Get JSON of the next project and return it JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(projectIndex); - ParsedURL projectURL = parseURL(new URL(projectInfo.getString("permalink"))); + ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL()); projectIndex++; // return Http.url(projectURL.getLocation()).getJSON(); return getJson(projectURL.getLocation()); @@ -320,8 +322,8 @@ private JSONObject getJson(URL url) throws IOException { throw new IOException("Error fetching json. Status code:" + status); } - private JSONObject getJson(String url) throws IOException { - return getJson(new URL(url)); + private JSONObject getJson(String url) throws IOException, URISyntaxException { + return getJson(new URI(url).toURL()); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java index 82b6e97c0..1caeead48 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java @@ -1,58 +1,60 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; - -import org.jsoup.Connection.Response; - -import com.rarchives.ripme.utils.Http; - -/* - * Ripper for ArtStation's short URL domain. - * Example URL: https://artstn.co/p/JlE15Z - */ - -public class ArtstnRipper extends ArtStationRipper { - public URL artStationUrl = null; - - public ArtstnRipper(URL url) throws IOException { - super(url); - } - - @Override - public boolean canRip(URL url) { - return url.getHost().endsWith("artstn.co"); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - if (artStationUrl == null) { - // Run only once. - try { - artStationUrl = getFinalUrl(url); - if (artStationUrl == null) { - throw new IOException("Null url received."); - } - } catch (IOException e) { - LOGGER.error("Couldnt resolve URL.", e); - } - - } - return super.getGID(artStationUrl); - } - - public URL getFinalUrl(URL url) throws IOException { - if (url.getHost().endsWith("artstation.com")) { - return url; - } - - LOGGER.info("Checking url: " + url); - Response response = Http.url(url).connection().followRedirects(false).execute(); - if (response.statusCode() / 100 == 3 && response.hasHeader("location")) { - return getFinalUrl(new URL(response.header("location"))); - } else { - return null; - } - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import org.jsoup.Connection.Response; + +import com.rarchives.ripme.utils.Http; + +/* + * Ripper for ArtStation's short URL domain. + * Example URL: https://artstn.co/p/JlE15Z + */ + +public class ArtstnRipper extends ArtStationRipper { + public URL artStationUrl = null; + + public ArtstnRipper(URL url) throws IOException { + super(url); + } + + @Override + public boolean canRip(URL url) { + return url.getHost().endsWith("artstn.co"); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + if (artStationUrl == null) { + // Run only once. + try { + artStationUrl = getFinalUrl(url); + if (artStationUrl == null) { + throw new IOException("Null url received."); + } + } catch (IOException | URISyntaxException e) { + LOGGER.error("Couldnt resolve URL.", e); + } + + } + return super.getGID(artStationUrl); + } + + public URL getFinalUrl(URL url) throws IOException, URISyntaxException { + if (url.getHost().endsWith("artstation.com")) { + return url; + } + + LOGGER.info("Checking url: " + url); + Response response = Http.url(url).connection().followRedirects(false).execute(); + if (response.statusCode() / 100 == 3 && response.hasHeader("location")) { + return getFinalUrl(new URI(response.header("location")).toURL()); + } else { + return null; + } + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java index 25491dfe6..8502e6b61 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +14,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class BatoRipper extends AbstractHTMLRipper { @@ -70,10 +70,10 @@ public List getAlbumsToQueue(Document doc) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - return getHost() + "_" + getGID(url) + "_" + getFirstPage().select("title").first().text().replaceAll(" ", "_"); + return getHost() + "_" + getGID(url) + "_" + getCachedFirstPage().select("title").first().text().replaceAll(" ", "_"); } catch (IOException e) { // Fall back to default album naming convention LOGGER.info("Unable to find title at " + url); @@ -94,11 +94,6 @@ public boolean canRip(URL url) { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java index 2a77f02da..2798b1ea8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BcfakesRipper.java @@ -47,11 +47,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java index cb5d4b14b..d99fe61d4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BlackbrickroadofozRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "www.blackbrickroadofoz.com/comic/PAGE - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { sleep(1000); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java index 7d6b17a67..974a00618 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java @@ -12,12 +12,14 @@ import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class BooruRipper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(BooruRipper.class); + private static final Logger logger = LogManager.getLogger(BooruRipper.class); private static Pattern gidPattern = null; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java index 8c7aea6b2..f1d41426a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java @@ -2,10 +2,11 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite; -import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.Utils; import com.rarchives.ripme.utils.RipUtils; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -13,7 +14,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.utils.Utils; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; @@ -72,7 +72,7 @@ public static List getChansFromConfig(String rawChanString) { ); private ChanSite chanSite; - private Boolean generalChanSite = true; + private boolean generalChanSite = true; public ChanRipper(URL url) throws IOException { super(url); @@ -104,7 +104,7 @@ public String getHost() { public String getAlbumTitle(URL url) throws MalformedURLException { try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); try { String subject = doc.select(".post.op > .postinfo > .subject").first().text(); return getHost() + "_" + getGID(url) + "_" + subject; @@ -195,11 +195,9 @@ public String getDomain() { return this.url.getHost(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } - private boolean isURLBlacklisted(String url) { for (String blacklist_item : url_piece_blacklist) { if (url.contains(blacklist_item)) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java index 005ba5c7e..c66465eb4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -50,10 +51,10 @@ public boolean canRip(URL url) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); String title = titleElement.attr("content"); title = title.substring(title.lastIndexOf('/') + 1); return getHost() + "_" + title.trim(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java index 08b27a76d..e794e0725 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ComicextraRipper.java @@ -1,173 +1,174 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -/** - * @author Tushar - * - */ -public class ComicextraRipper extends AbstractHTMLRipper { - - private static final String FILE_NAME = "page"; - - private Pattern p1 = - Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)"); - private Pattern p2 = Pattern.compile( - "https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?"); - private UrlType urlType = UrlType.UNKNOWN; - private List chaptersList = null; - private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page. - private int imageIndex = 0; // image index for each chapter images. - - public ComicextraRipper(URL url) throws IOException { - super(url); - } - - @Override - protected String getDomain() { - return "comicextra.com"; - } - - @Override - public String getHost() { - return "comicextra"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m1 = p1.matcher(url.toExternalForm()); - if (m1.matches()) { - // URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max). - urlType = UrlType.COMIC; - return m1.group(1); - } - - Matcher m2 = p2.matcher(url.toExternalForm()); - if (m2.matches()) { - // URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75). - urlType = UrlType.CHAPTER; - return m2.group(1); - } - - throw new MalformedURLException( - "Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n" - + " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url - + " instead"); - } - - @Override - protected Document getFirstPage() throws IOException { - Document doc = null; - - switch (urlType) { - case COMIC: - // For COMIC type url we extract the urls of each chapters and store them in chapters. - chaptersList = new ArrayList<>(); - Document comicPage = Http.url(url).get(); - Elements elements = comicPage.select("div.episode-list a"); - for (Element e : elements) { - chaptersList.add(getCompleteChapterUrl(e.attr("abs:href"))); - } - - // Set the first chapter from the chapterList as the doc. - chapterIndex = 0; - doc = Http.url(chaptersList.get(chapterIndex)).get(); - break; - case CHAPTER: - doc = Http.url(url).get(); - break; - case UNKNOWN: - default: - throw new IOException("Unknown url type encountered."); - } - - return doc; - } - - @Override - public Document getNextPage(Document doc) throws IOException { - if (urlType == UrlType.COMIC) { - ++chapterIndex; - imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'. - if (chapterIndex < chaptersList.size()) { - return Http.url(chaptersList.get(chapterIndex)).get(); - } - } - - return super.getNextPage(doc); - } - - @Override - protected List getURLsFromPage(Document page) { - List urls = new ArrayList<>(); - - if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) { - Elements images = page.select("img.chapter_img"); - for (Element img : images) { - urls.add(img.attr("src")); - } - } - - return urls; - } - - @Override - protected void downloadURL(URL url, int index) { - String subdirectory = getSubDirectoryName(); - String prefix = getPrefix(++imageIndex); - - addURLToDownload(url, prefix, subdirectory, null, null, FILE_NAME, null, Boolean.TRUE); - } - - /* - * This function appends /full at the end of the chapters url to get all the images for the - * chapter in the same Document. - */ - private String getCompleteChapterUrl(String chapterUrl) { - if (!chapterUrl.endsWith("/full")) { - chapterUrl = chapterUrl + "/full"; - } - return chapterUrl; - } - - /* - * This functions returns sub folder name for the current chapter. - */ - private String getSubDirectoryName() { - String subDirectory = ""; - - if (urlType == UrlType.COMIC) { - Matcher m = p2.matcher(chaptersList.get(chapterIndex)); - if (m.matches()) { - subDirectory = m.group(2); - } - } - - if (urlType == UrlType.CHAPTER) { - Matcher m = p2.matcher(url.toExternalForm()); - if (m.matches()) { - subDirectory = m.group(2); - } - } - - return subDirectory; - } - - /* - * Enum to classify different types of urls. - */ - private enum UrlType { - COMIC, CHAPTER, UNKNOWN - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; + +/** + * @author Tushar + * + */ +public class ComicextraRipper extends AbstractHTMLRipper { + + private static final String FILE_NAME = "page"; + + private Pattern p1 = + Pattern.compile("https:\\/\\/www.comicextra.com\\/comic\\/([A-Za-z0-9_-]+)"); + private Pattern p2 = Pattern.compile( + "https:\\/\\/www.comicextra.com\\/([A-Za-z0-9_-]+)\\/([A-Za-z0-9_-]+)(?:\\/full)?"); + private UrlType urlType = UrlType.UNKNOWN; + private List chaptersList = null; + private int chapterIndex = -1; // index for the chaptersList, useful in getting the next page. + private int imageIndex = 0; // image index for each chapter images. + + public ComicextraRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "comicextra.com"; + } + + @Override + public String getHost() { + return "comicextra"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m1 = p1.matcher(url.toExternalForm()); + if (m1.matches()) { + // URL is of comic( https://www.comicextra.com/comic/the-punisher-frank-castle-max). + urlType = UrlType.COMIC; + return m1.group(1); + } + + Matcher m2 = p2.matcher(url.toExternalForm()); + if (m2.matches()) { + // URL is of chapter( https://www.comicextra.com/the-punisher-frank-castle-max/chapter-75). + urlType = UrlType.CHAPTER; + return m2.group(1); + } + + throw new MalformedURLException( + "Expected comicextra.com url of type: https://www.comicextra.com/comic/some-comic-name\n" + + " or https://www.comicextra.com/some-comic-name/chapter-001 got " + url + + " instead"); + } + + @Override + protected Document getFirstPage() throws IOException { + Document doc = null; + + switch (urlType) { + case COMIC: + // For COMIC type url we extract the urls of each chapters and store them in chapters. + chaptersList = new ArrayList<>(); + Document comicPage = Http.url(url).get(); + Elements elements = comicPage.select("div.episode-list a"); + for (Element e : elements) { + chaptersList.add(getCompleteChapterUrl(e.attr("abs:href"))); + } + + // Set the first chapter from the chapterList as the doc. + chapterIndex = 0; + doc = Http.url(chaptersList.get(chapterIndex)).get(); + break; + case CHAPTER: + doc = Http.url(url).get(); + break; + case UNKNOWN: + default: + throw new IOException("Unknown url type encountered."); + } + + return doc; + } + + @Override + public Document getNextPage(Document doc) throws IOException, URISyntaxException { + if (urlType == UrlType.COMIC) { + ++chapterIndex; + imageIndex = 0; // Resetting the imagesIndex so that images prefix within each chapter starts from '001_'. + if (chapterIndex < chaptersList.size()) { + return Http.url(chaptersList.get(chapterIndex)).get(); + } + } + + return super.getNextPage(doc); + } + + @Override + protected List getURLsFromPage(Document page) { + List urls = new ArrayList<>(); + + if (urlType == UrlType.COMIC || urlType == UrlType.CHAPTER) { + Elements images = page.select("img.chapter_img"); + for (Element img : images) { + urls.add(img.attr("src")); + } + } + + return urls; + } + + @Override + protected void downloadURL(URL url, int index) { + String subdirectory = getSubDirectoryName(); + String prefix = getPrefix(++imageIndex); + + addURLToDownload(url, subdirectory, null, null, prefix, FILE_NAME, null, Boolean.TRUE); + } + + /* + * This function appends /full at the end of the chapters url to get all the images for the + * chapter in the same Document. + */ + private String getCompleteChapterUrl(String chapterUrl) { + if (!chapterUrl.endsWith("/full")) { + chapterUrl = chapterUrl + "/full"; + } + return chapterUrl; + } + + /* + * This functions returns sub folder name for the current chapter. + */ + private String getSubDirectoryName() { + String subDirectory = ""; + + if (urlType == UrlType.COMIC) { + Matcher m = p2.matcher(chaptersList.get(chapterIndex)); + if (m.matches()) { + subDirectory = m.group(2); + } + } + + if (urlType == UrlType.CHAPTER) { + Matcher m = p2.matcher(url.toExternalForm()); + if (m.matches()) { + subDirectory = m.group(2); + } + } + + return subDirectory; + } + + /* + * Enum to classify different types of urls. + */ + private enum UrlType { + COMIC, CHAPTER, UNKNOWN + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java new file mode 100644 index 000000000..f990ae66e --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java @@ -0,0 +1,180 @@ +package com.rarchives.ripme.ripper.rippers; +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.Utils; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * See this link for the API schema. + */ +public class CoomerPartyRipper extends AbstractJSONRipper { + private static final Logger LOGGER = LogManager.getLogger(CoomerPartyRipper.class); + private static final String IMG_URL_BASE = "https://c3.coomer.su/data"; + private static final String VID_URL_BASE = "https://c1.coomer.su/data"; + private static final Pattern IMG_PATTERN = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff)$", Pattern.CASE_INSENSITIVE); + private static final Pattern VID_PATTERN = Pattern.compile("^.*\\.(webm|mp4|m4v)$", Pattern.CASE_INSENSITIVE); + + // just so we can return a JSONObject from getFirstPage + private static final String KEY_WRAPPER_JSON_ARRAY = "array"; + + private static final String KEY_FILE = "file"; + private static final String KEY_PATH = "path"; + private static final String KEY_ATTACHMENTS = "attachments"; + + // Posts Request Endpoint + private static final String POSTS_ENDPOINT = "https://coomer.su/api/v1/%s/user/%s?o=%d"; + + // Pagination is strictly 50 posts per page, per API schema. + private Integer pageCount = 0; + private static final Integer postCount = 50; + + // "Service" of the page to be ripped: Onlyfans, Fansly, Candfans + private final String service; + + // Username of the page to be ripped + private final String user; + + + + public CoomerPartyRipper(URL url) throws IOException { + super(url); + List pathElements = Arrays.stream(url.getPath().split("/")) + .filter(element -> !element.isBlank()) + .collect(Collectors.toList()); + + service = pathElements.get(0); + user = pathElements.get(2); + + if (service == null || user == null || service.isBlank() || user.isBlank()) { + LOGGER.warn("service=" + service + ", user=" + user); + throw new MalformedURLException("Invalid coomer.party URL: " + url); + } + LOGGER.debug("Parsed service=" + service + " and user=" + user + " from " + url); + } + + @Override + protected String getDomain() { + return "coomer.party"; + } + + @Override + public String getHost() { + return "coomer.party"; + } + + @Override + public boolean canRip(URL url) { + String host = url.getHost(); + return host.endsWith("coomer.party") || host.endsWith("coomer.su"); + } + + @Override + public String getGID(URL url) { + return Utils.filesystemSafe(String.format("%s_%s", service, user)); + } + + private JSONObject getJsonPostsForOffset(Integer offset) throws IOException { + String apiUrl = String.format(POSTS_ENDPOINT, service, user, offset); + + String jsonArrayString = Http.url(apiUrl) + .ignoreContentType() + .response() + .body(); + JSONArray jsonArray = new JSONArray(jsonArrayString); + + // Ideally we'd just return the JSONArray from here, but we have to wrap it in a JSONObject + JSONObject wrapperObject = new JSONObject(); + wrapperObject.put(KEY_WRAPPER_JSON_ARRAY, jsonArray); + return wrapperObject; + } + + @Override + protected JSONObject getFirstPage() throws IOException { + return getJsonPostsForOffset(0); + } + + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + pageCount++; + Integer offset = postCount * pageCount; + return getJsonPostsForOffset(offset); + } + + + @Override + protected List getURLsFromJSON(JSONObject json) { + // extract the array from our wrapper JSONObject + JSONArray posts = json.getJSONArray(KEY_WRAPPER_JSON_ARRAY); + ArrayList urls = new ArrayList<>(); + for (int i = 0; i < posts.length(); i++) { + JSONObject post = posts.getJSONObject(i); + pullFileUrl(post, urls); + pullAttachmentUrls(post, urls); + } + LOGGER.debug("Pulled " + urls.size() + " URLs from " + posts.length() + " posts"); + return urls; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + private void pullFileUrl(JSONObject post, ArrayList results) { + try { + JSONObject file = post.getJSONObject(KEY_FILE); + String path = file.getString(KEY_PATH); + if (isImage(path)) { + String url = IMG_URL_BASE + path; + results.add(url); + } else if (isVideo(path)) { + String url = VID_URL_BASE + path; + results.add(url); + } else { + LOGGER.error("Unknown extension for coomer.su path: " + path); + } + } catch (JSONException e) { + /* No-op */ + LOGGER.error("Unable to Parse FileURL " + e.getMessage()); + } + } + + private void pullAttachmentUrls(JSONObject post, ArrayList results) { + try { + JSONArray attachments = post.getJSONArray(KEY_ATTACHMENTS); + for (int i = 0; i < attachments.length(); i++) { + JSONObject attachment = attachments.getJSONObject(i); + pullFileUrl(attachment, results); + } + } catch (JSONException e) { + /* No-op */ + LOGGER.error("Unable to Parse AttachmentURL " + e.getMessage()); + } + } + + private boolean isImage(String path) { + Matcher matcher = IMG_PATTERN.matcher(path); + return matcher.matches(); + } + + private boolean isVideo(String path) { + Matcher matcher = VID_PATTERN.matcher(path); + return matcher.matches(); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java index f288592a1..81a39823a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/CyberdropRipper.java @@ -1,60 +1,55 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.*; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; - -public class CyberdropRipper extends AbstractHTMLRipper { - - public CyberdropRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "cyberdrop"; - } - - @Override - protected Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public String getDomain() { - return "cyberdrop.me"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://cyberdrop\\.me/a/([a-zA-Z0-9]+).*?$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException("Expected cyberdrop.me URL format: " + - "https://cyberdrop.me/a/xxxxxxxx - got " + url + "instead"); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - - @Override - protected List getURLsFromPage(Document page) { - ArrayList urls = new ArrayList<>(); - for (Element element: page.getElementsByClass("image")) { - urls.add(element.attr("href")); - } - return urls; - } +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.*; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +public class CyberdropRipper extends AbstractHTMLRipper { + + public CyberdropRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "cyberdrop"; + } + + @Override + public String getDomain() { + return "cyberdrop.me"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^https?://cyberdrop\\.me/a/([a-zA-Z0-9]+).*?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected cyberdrop.me URL format: " + + "https://cyberdrop.me/a/xxxxxxxx - got " + url + "instead"); + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + @Override + protected List getURLsFromPage(Document page) { + ArrayList urls = new ArrayList<>(); + for (Element element: page.getElementsByClass("image")) { + urls.add(element.attr("href")); + } + return urls; + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java index 8fdd55a1f..9496bb577 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java @@ -3,25 +3,35 @@ import com.rarchives.ripme.ripper.AbstractJSONRipper; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -import org.apache.log4j.Logger; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.Response; +import org.jetbrains.annotations.Nullable; import org.json.JSONArray; import org.json.JSONObject; +import org.jsoup.Connection; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStreamReader; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.zip.GZIPInputStream; public class DanbooruRipper extends AbstractJSONRipper { - private static final Logger logger = Logger.getLogger(DanbooruRipper.class); - private static final String DOMAIN = "danbooru.donmai.us", HOST = "danbooru"; + private final OkHttpClient client; private Pattern gidPattern = null; @@ -29,6 +39,10 @@ public class DanbooruRipper extends AbstractJSONRipper { public DanbooruRipper(URL url) throws IOException { super(url); + this.client = new OkHttpClient.Builder() + .readTimeout(60, TimeUnit.SECONDS) + .writeTimeout(60, TimeUnit.SECONDS) + .build(); } @Override @@ -45,29 +59,51 @@ private String getPage(int num) throws MalformedURLException { return "https://" + getDomain() + "/posts.json?page=" + num + "&tags=" + getTag(url); } + private final String userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"; @Override - protected JSONObject getFirstPage() throws IOException { - String newCompatibleJSON = "{ resources:" + Http.url(getPage(1)).getJSONArray() + " }"; - - return new JSONObject(newCompatibleJSON); + protected JSONObject getFirstPage() throws MalformedURLException { + return getCurrentPage(); } @Override protected JSONObject getNextPage(JSONObject doc) throws IOException { - currentPageNum++; - - JSONArray resourcesJSONArray = Http.url(getPage(currentPageNum)).getJSONArray(); - - int resourcesJSONArrayLength = resourcesJSONArray.length(); + return getCurrentPage(); + } - if (resourcesJSONArrayLength == 0) { - currentPageNum = 0; - throw new IOException("No more images in the next page"); + @Nullable + private JSONObject getCurrentPage() throws MalformedURLException { + Request request = new Request.Builder() + .url(getPage(currentPageNum)) + .header("User-Agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1") + .header("Accept", "application/json,text/javascript,*/*;q=0.01") + .header("Accept-Language", "en-US,en;q=0.9") + .header("Sec-Fetch-Dest", "empty") + .header("Sec-Fetch-Mode", "cors") + .header("Sec-Fetch-Site", "same-origin") + .header("Referer", "https://danbooru.donmai.us/") + .header("X-Requested-With", "XMLHttpRequest") + .header("Connection", "keep-alive") + .build(); + Response response = null; + currentPageNum++; + try { + response = client.newCall(request).execute(); + if (!response.isSuccessful()) throw new IOException("Unexpected code " + response); + + String responseData = response.body().string(); + JSONArray jsonArray = new JSONArray(responseData); + if(!jsonArray.isEmpty()){ + String newCompatibleJSON = "{ \"resources\":" + jsonArray + " }"; + return new JSONObject(newCompatibleJSON); + } + } catch (IOException e) { + e.printStackTrace(); + } finally { + if(response !=null) { + response.body().close(); + } } - - String newCompatibleJSON = "{ resources:" + resourcesJSONArray + " }"; - - return new JSONObject(newCompatibleJSON); + return null; } @Override @@ -87,7 +123,7 @@ public String getGID(URL url) throws MalformedURLException { try { return Utils.filesystemSafe(new URI(getTag(url).replaceAll("([?&])tags=", "")).getPath()); } catch (URISyntaxException ex) { - logger.error(ex); + LOGGER.error(ex); } throw new MalformedURLException("Expected booru URL format: " + getDomain() + "/posts?tags=searchterm - got " + url + " instead"); @@ -99,7 +135,7 @@ protected void downloadURL(URL url, int index) { } private String getTag(URL url) throws MalformedURLException { - gidPattern = Pattern.compile("https?://danbooru.donmai.us/(posts)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(&|(#.*)?$)"); + gidPattern = Pattern.compile("https?://danbooru.donmai.us/(posts)?.*([?&]tags=([^&]*)(?:&z=([0-9]+))?$)"); Matcher m = gidPattern.matcher(url.toExternalForm()); if (m.matches()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java index 1feaf6924..a05386146 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -37,7 +39,7 @@ public String getDomain() { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); String[] uu = u.split("\\?", 2); String newU = uu[0]; @@ -54,7 +56,7 @@ public URL sanitizeURL(URL url) throws MalformedURLException { newU += "&key=" + key; } - return new URL(newU); + return new URI(newU).toURL(); } @Override @@ -99,10 +101,10 @@ public JSONObject getFirstPage() throws IOException { } @Override - public JSONObject getNextPage(JSONObject doc) throws IOException { + public JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { currPage++; String u = currUrl.toExternalForm() + "&page=" + Integer.toString(currPage); - JSONObject json = Http.url(new URL(u)).getJSON(); + JSONObject json = Http.url(new URI(u).toURL()).getJSON(); JSONArray arr; if (json.has("images")) { arr = json.getJSONArray("images"); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java index 99374ad13..9f26a2681 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java @@ -13,12 +13,13 @@ import java.io.ObjectOutputStream; import java.io.Serializable; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.regex.Matcher; @@ -28,7 +29,6 @@ import org.jsoup.Connection.Method; import org.jsoup.Connection.Response; import org.jsoup.HttpStatusException; -import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; @@ -383,11 +383,11 @@ private URL urlWithParams(int offset) { try { String url = cleanURL(); if (this.usingCatPath) { - return (new URL(url + "?catpath=/&offset=" + offset)); + return (new URI(url + "?catpath=/&offset=" + offset)).toURL(); } else { - return (new URL(url + "?offset=" + offset)); + return (new URI(url + "?offset=" + offset).toURL()); } - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { e.printStackTrace(); } return null; @@ -518,8 +518,8 @@ private boolean checkLogin() { * @author MrPlaygon * */ - private class DeviantartImageThread extends Thread { - private URL url; + private class DeviantartImageThread implements Runnable { + private final URL url; public DeviantartImageThread(URL url) { this.url = url; @@ -533,8 +533,6 @@ public void run() { /** * Get URL to Artwork and return fullsize URL with file ending. * - * @param page Like - * https://www.deviantart.com/apofiss/art/warmest-of-the-days-455668450 * @return URL like * https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/intermediary/f/07f7a6bb-2d35-4630-93fc-be249af22b3e/d7jak0y-d20e5932-df72-4d13-b002-5e122037b373.jpg * @@ -630,11 +628,11 @@ private void getFullSizeURL() { } String[] tmpParts = downloadString.split("\\."); //split to get file ending - addURLToDownload(new URL(downloadString), "", "", "", new HashMap(), + addURLToDownload(new URI(downloadString).toURL(), "", "", "", new HashMap(), title + "." + tmpParts[tmpParts.length - 1]); return; - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { e.printStackTrace(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java index dc8cd77e1..c463f5a80 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java @@ -41,10 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "dribbble.com/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java deleted file mode 100644 index 48c1856c4..000000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DuckmoviesRipper.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.ripper.AbstractSingleFileRipper; -import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class DuckmoviesRipper extends AbstractSingleFileRipper { - public DuckmoviesRipper(URL url) throws IOException { - super(url); - } - - @Override - public boolean hasQueueSupport() { - return true; - } - - @Override - public boolean pageContainsAlbums(URL url) { - Pattern pa = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(models|category)/([a-zA-Z0-9_-])+/?"); - Matcher ma = pa.matcher(url.toExternalForm()); - if (ma.matches()) { - return true; - } - pa = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(models|category)/([a-zA-Z0-9_-])+/page/\\d+/?"); - ma = pa.matcher(url.toExternalForm()); - if (ma.matches()) { - return true; - } - return false; - } - - @Override - public List getAlbumsToQueue(Document doc) { - List urlsToAddToQueue = new ArrayList<>(); - for (Element elem : doc.select(".post > li > div > div > a")) { - urlsToAddToQueue.add(elem.attr("href")); - } - return urlsToAddToQueue; - } - - - private static List explicit_domains = Arrays.asList( - "vidporntube.fun", - "pornbj.fun", - "iwantporn.fun", - "neoporn.fun", - "yayporn.fun", - "freshporn.co", - "palapaja.stream", - "freshporn.co", - "pornvidx.fun", - "palapaja.com" - ); - - @Override - public String getHost() { - return url.toExternalForm().split("/")[2]; - } - - @Override - public String getDomain() { - return url.toExternalForm().split("/")[2]; - } - - @Override - public boolean canRip(URL url) { - String url_name = url.toExternalForm(); - return explicit_domains.contains(url_name.split("/")[2]); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List results = new ArrayList<>(); - String duckMoviesUrl = doc.select("iframe").attr("src"); - try { - Document duckDoc = Http.url(new URL(duckMoviesUrl)).get(); - String videoURL = duckDoc.select("source").attr("src"); - // remove any white spaces so we can download the movie without a 400 error - videoURL = videoURL.replaceAll(" ", "%20"); - results.add(videoURL); - } catch (MalformedURLException e) { - LOGGER.error(duckMoviesUrl + " is not a valid url"); - } catch (IOException e) { - LOGGER.error("Unable to load page " + duckMoviesUrl); - e.printStackTrace(); - } - return results; - } - - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("https://[a-zA-Z0-9]+\\.[a-zA-Z]+/([a-zA-Z0-9\\-_]+)/?"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(category|models)/([a-zA-Z0-9_-])+/?"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("https?://[a-zA-Z0-9]+.[a-zA-Z]+/(category|models)/([a-zA-Z0-9_-])+/page/\\d+"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - - throw new MalformedURLException( - "Expected duckmovies format:" - + "domain.tld/Video-title" - + " Got: " + url); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, "", "", null, null, AbstractRipper.getFileName(url, null, null).replaceAll("%20", "_")); - } - - @Override - public boolean tryResumeDownload() {return true;} -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java index 37d3ad939..f8eaa72d3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java @@ -42,12 +42,6 @@ public String getGID(URL url) throws MalformedURLException { "dynasty-scans.com/chapters/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("a[id=next_link]").first(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java index bac6b51fa..1d29a736b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java @@ -10,6 +10,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -17,7 +19,9 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; @@ -25,7 +29,7 @@ public class E621Ripper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(E621Ripper.class); + private static final Logger logger = LogManager.getLogger(E621Ripper.class); private static Pattern gidPattern = null; private static Pattern gidPattern2 = null; @@ -179,22 +183,22 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { if (gidPattern2 == null) gidPattern2 = Pattern.compile( "^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'():,%-]+)(/.*)?(#.*)?$"); Matcher m = gidPattern2.matcher(url.toExternalForm()); if (m.matches()) - return new URL("https://e621.net/post/index/1/" + m.group(2).replace("+", "%20")); + return new URI("https://e621.net/post/index/1/" + m.group(2).replace("+", "%20")).toURL(); return url; } - public class E621FileThread extends Thread { + public class E621FileThread implements Runnable { - private URL url; - private String index; + private final URL url; + private final String index; public E621FileThread(URL url, String index) { this.url = url; @@ -206,9 +210,9 @@ public void run() { try { String fullSizedImage = getFullSizedImage(url); if (fullSizedImage != null && !fullSizedImage.equals("")) { - addURLToDownload(new URL(fullSizedImage), index); + addURLToDownload(new URI(fullSizedImage).toURL(), index); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("Unable to get full sized image from " + url); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java index 3cdbae4ee..81f09aa49 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java @@ -1,9 +1,23 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.ripper.DownloadThreadPool; +import com.rarchives.ripme.ui.RipStatusMessage; +import com.rarchives.ripme.ui.RipStatusMessage.STATUS; +import com.rarchives.ripme.utils.Http; +import com.rarchives.ripme.utils.RipUtils; +import com.rarchives.ripme.utils.Utils; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -11,46 +25,33 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.ui.RipStatusMessage; -import com.rarchives.ripme.utils.RipUtils; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; -import com.rarchives.ripme.utils.Http; -import com.rarchives.ripme.utils.Utils; - public class EHentaiRipper extends AbstractHTMLRipper { // All sleep times are in milliseconds - private static final int PAGE_SLEEP_TIME = 3000; - private static final int IMAGE_SLEEP_TIME = 1500; - private static final int IP_BLOCK_SLEEP_TIME = 60 * 1000; - - private String lastURL = null; + private static final int PAGE_SLEEP_TIME = 3000; + private static final int IMAGE_SLEEP_TIME = 1500; + private static final int IP_BLOCK_SLEEP_TIME = 60 * 1000; + private static final Map cookies = new HashMap<>(); - // Thread pool for finding direct image links from "image" pages (html) - private DownloadThreadPool ehentaiThreadPool = new DownloadThreadPool("ehentai"); - @Override - public DownloadThreadPool getThreadPool() { - return ehentaiThreadPool; - } - - // Current HTML document - private Document albumDoc = null; - - private static final Map cookies = new HashMap<>(); static { cookies.put("nw", "1"); cookies.put("tip", "1"); } + private String lastURL = null; + // Thread pool for finding direct image links from "image" pages (html) + private final DownloadThreadPool ehentaiThreadPool = new DownloadThreadPool("ehentai"); + // Current HTML document + private Document albumDoc = null; + public EHentaiRipper(URL url) throws IOException { super(url); } + @Override + public DownloadThreadPool getThreadPool() { + return ehentaiThreadPool; + } + @Override public String getHost() { return "e-hentai"; @@ -61,7 +62,7 @@ public String getDomain() { return "e-hentai.org"; } - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID if (albumDoc == null) { @@ -93,12 +94,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - /** - * Attempts to get page, checks for IP ban, waits. - * @param url - * @return Page document - * @throws IOException If page loading errors, or if retries are exhausted - */ private Document getPageWithRetries(URL url) throws IOException { Document doc; int retries = 3; @@ -106,9 +101,9 @@ private Document getPageWithRetries(URL url) throws IOException { sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); LOGGER.info("Retrieving " + url); doc = Http.url(url) - .referrer(this.url) - .cookies(cookies) - .get(); + .referrer(this.url) + .cookies(cookies) + .get(); if (doc.toString().contains("IP address will be automatically banned")) { if (retries == 0) { throw new IOException("Hit rate limit and maximum number of retries, giving up"); @@ -120,8 +115,7 @@ private Document getPageWithRetries(URL url) throws IOException { } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for rate limit to subside"); } - } - else { + } else { return doc; } } @@ -155,7 +149,7 @@ public Document getFirstPage() throws IOException { } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { // Check if we've stopped if (isStopped()) { throw new IOException("Ripping interrupted"); @@ -175,7 +169,7 @@ public Document getNextPage(Document doc) throws IOException { // Sleep before loading next page sleep(PAGE_SLEEP_TIME); // Load next page - Document nextPage = getPageWithRetries(new URL(nextURL)); + Document nextPage = getPageWithRetries(new URI(nextURL).toURL()); this.lastURL = nextURL; return nextPage; } @@ -193,27 +187,26 @@ public List getURLsFromPage(Document page) { @Override public void downloadURL(URL url, int index) { - EHentaiImageThread t = new EHentaiImageThread(url, index, this.workingDir); + EHentaiImageThread t = new EHentaiImageThread(url, index, this.workingDir.toPath()); ehentaiThreadPool.addThread(t); try { Thread.sleep(IMAGE_SLEEP_TIME); - } - catch (InterruptedException e) { + } catch (InterruptedException e) { LOGGER.warn("Interrupted while waiting to load next image", e); } } /** * Helper class to find and download images found on "image" pages - * + *

* Handles case when site has IP-banned the user. */ - private class EHentaiImageThread extends Thread { - private URL url; - private int index; - private File workingDir; + private class EHentaiImageThread implements Runnable { + private final URL url; + private final int index; + private final Path workingDir; - EHentaiImageThread(URL url, int index, File workingDir) { + EHentaiImageThread(URL url, int index, Path workingDir) { super(); this.url = url; this.index = index; @@ -246,22 +239,21 @@ private void fetchImage() { Matcher m = p.matcher(imgsrc); if (m.matches()) { // Manually discover filename from URL - String savePath = this.workingDir + File.separator; + String savePath = this.workingDir + "/"; if (Utils.getConfigBoolean("download.save_order", true)) { savePath += String.format("%03d_", index); } savePath += m.group(1); - addURLToDownload(new URL(imgsrc), new File(savePath)); - } - else { + addURLToDownload(new URI(imgsrc).toURL(), Paths.get(savePath)); + } else { // Provide prefix and let the AbstractRipper "guess" the filename String prefix = ""; if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); + addURLToDownload(new URI(imgsrc).toURL(), prefix); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java index 22968216f..7cfd568f3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java @@ -1,8 +1,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -10,8 +9,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.rarchives.ripme.utils.Utils; -import org.json.JSONObject; import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; @@ -23,13 +20,7 @@ public class EightmusesRipper extends AbstractHTMLRipper { - private Document albumDoc = null; - private Map cookies = new HashMap<>(); - // TODO put up a wiki page on using maps to store titles - // the map for storing the title of each album when downloading sub albums - private Map urlTitles = new HashMap<>(); - - private Boolean rippingSubalbums = false; + private Map cookies = new HashMap<>(); public EightmusesRipper(URL url) throws IOException { super(url); @@ -61,10 +52,10 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[name=description]").first(); + Element titleElement = getCachedFirstPage().select("meta[name=description]").first(); String title = titleElement.attr("content"); title = title.replace("A huge collection of free porn comics for adults. Read", ""); title = title.replace("online for free at 8muses.com", ""); @@ -78,21 +69,18 @@ public String getAlbumTitle(URL url) throws MalformedURLException { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - Response resp = Http.url(url).response(); - cookies.putAll(resp.cookies()); - albumDoc = resp.parse(); - } - return albumDoc; + Response resp = Http.url(url).response(); + cookies.putAll(resp.cookies()); + return resp.parse(); } @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); - int x = 1; // This contains the thumbnails of all images on the page Elements pageImages = page.getElementsByClass("c-tile"); - for (Element thumb : pageImages) { + for (int i = 0; i < pageImages.size(); i++) { + Element thumb = pageImages.get(i); // If true this link is a sub album if (thumb.attr("href").contains("/comics/album/")) { String subUrl = "https://www.8muses.com" + thumb.attr("href"); @@ -116,24 +104,14 @@ public List getURLsFromPage(Document page) { if (thumb.hasAttr("data-cfsrc")) { image = thumb.attr("data-cfsrc"); } else { - // Deobfustace the json data - String rawJson = deobfuscateJSON(page.select("script#ractive-public").html() - .replaceAll(">", ">").replaceAll("<", "<").replace("&", "&")); - JSONObject json = new JSONObject(rawJson); + Element imageElement = thumb.select("img").first(); + image = "https://comics.8muses.com" + imageElement.attr("data-src").replace("/th/", "/fl/"); try { - for (int i = 0; i != json.getJSONArray("pictures").length(); i++) { - image = "https://www.8muses.com/image/fl/" + json.getJSONArray("pictures").getJSONObject(i).getString("publicUri"); - URL imageUrl = new URL(image); - addURLToDownload(imageUrl, getPrefixShort(x), getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, "", null, true); - // X is our page index - x++; - if (isThisATest()) { - break; - } - } - return imageURLs; - } catch (MalformedURLException e) { + URL imageUrl = new URI(image).toURL(); + addURLToDownload(imageUrl, getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, getPrefixShort(i), "", null, true); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("\"" + image + "\" is malformed"); + LOGGER.error(e.getMessage()); } } if (!image.contains("8muses.com")) { @@ -173,25 +151,4 @@ public String getPrefixLong(int index) { public String getPrefixShort(int index) { return String.format("%03d", index); } - - private String deobfuscateJSON(String obfuscatedString) { - StringBuilder deobfuscatedString = new StringBuilder(); - // The first char in one of 8muses obfuscated strings is always ! so we replace it - for (char ch : obfuscatedString.replaceFirst("!", "").toCharArray()){ - deobfuscatedString.append(deobfuscateChar(ch)); - } - return deobfuscatedString.toString(); - } - - private String deobfuscateChar(char c) { - if ((int) c == 32) { - return fromCharCode(32); - } - return fromCharCode(33 + (c + 14) % 94); - - } - - private static String fromCharCode(int... codePoints) { - return new String(codePoints, 0, codePoints.length); - } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java index d64e96005..0f77e03c5 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EroShareRipper.java @@ -7,6 +7,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -93,11 +95,11 @@ public Document getNextPage(Document doc) throws IOException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { if (!is_profile(url)) { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); String title = titleElement.attr("content"); title = title.substring(title.lastIndexOf('/') + 1); return getHost() + "_" + getGID(url) + "_" + title.trim(); @@ -119,7 +121,6 @@ public List getURLsFromPage(Document doc) { for (Element img : imgs) { if (img.hasClass("album-image")) { String imageURL = img.attr("src"); - imageURL = imageURL; URLs.add(imageURL); } } @@ -195,7 +196,7 @@ public String getGID(URL url) throws MalformedURLException { throw new MalformedURLException("eroshare album not found in " + url + ", expected https://eroshare.com/album or eroshae.com/album"); } - public static List getURLs(URL url) throws IOException{ + public static List getURLs(URL url) throws IOException, URISyntaxException { Response resp = Http.url(url) .ignoreContentType() @@ -209,7 +210,7 @@ public static List getURLs(URL url) throws IOException{ for (Element img : imgs) { if (img.hasClass("album-image")) { String imageURL = img.attr("src"); - URLs.add(new URL(imageURL)); + URLs.add(new URI(imageURL).toURL()); } } //Videos @@ -218,7 +219,7 @@ public static List getURLs(URL url) throws IOException{ if (vid.hasClass("album-video")) { Elements source = vid.getElementsByTag("source"); String videoURL = source.first().attr("src"); - URLs.add(new URL(videoURL)); + URLs.add(new URI(videoURL).toURL()); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java index dc535deaa..95528470c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java @@ -9,6 +9,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -48,11 +50,6 @@ public String getGID(URL url) throws MalformedURLException { return m.group(m.groupCount()); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { LOGGER.info(page); @@ -94,8 +91,8 @@ public void ripAlbum(Document page) { Map opts = new HashMap(); opts.put("subdirectory", page.title().replaceAll(" \\| Erofus - Sex and Porn Comics", "").replaceAll(" ", "_")); opts.put("prefix", getPrefix(x)); - addURLToDownload(new URL(image), opts); - } catch (MalformedURLException e) { + addURLToDownload(new URI(image).toURL(), opts); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.info(e.getMessage()); } x++; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java index 7f056dc26..3035d7465 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java @@ -2,16 +2,19 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.rarchives.ripme.utils.Utils; import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; @@ -22,10 +25,8 @@ */ public class EromeRipper extends AbstractHTMLRipper { - private static final String EROME_REFERER = "https://www.erome.com/"; - boolean rippingProfile; - + private HashMap cookies = new HashMap<>(); public EromeRipper (URL url) throws IOException { super(url); @@ -33,17 +34,17 @@ public EromeRipper (URL url) throws IOException { @Override public String getDomain() { - return "erome.com"; + return "erome.com"; } @Override public String getHost() { - return "erome"; + return "erome"; } @Override public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index), "", EROME_REFERER, null, null); + addURLToDownload(url, getPrefix(index), "", "erome.com", this.cookies); } @Override @@ -68,39 +69,40 @@ public List getAlbumsToQueue(Document doc) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { - try { - // Attempt to use album title as GID - Element titleElement = getFirstPage().select("meta[property=og:title]").first(); - String title = titleElement.attr("content"); - title = title.substring(title.lastIndexOf('/') + 1); - return getHost() + "_" + getGID(url) + "_" + title.trim(); - } catch (IOException e) { - // Fall back to default album naming convention - LOGGER.info("Unable to find title at " + url); - } catch (NullPointerException e) { - return getHost() + "_" + getGID(url); - } - return super.getAlbumTitle(url); + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { + try { + // Attempt to use album title as GID + Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first(); + String title = titleElement.attr("content"); + title = title.substring(title.lastIndexOf('/') + 1); + return getHost() + "_" + getGID(url) + "_" + title.trim(); + } catch (IOException e) { + // Fall back to default album naming convention + LOGGER.info("Unable to find title at " + url); + } catch (NullPointerException e) { + return getHost() + "_" + getGID(url); + } + return super.getAlbumTitle(url); } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return new URL(url.toExternalForm().replaceAll("https?://erome.com", "https://www.erome.com")); + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + return new URI(url.toExternalForm().replaceAll("https?://erome.com", "https://www.erome.com")).toURL(); } @Override public List getURLsFromPage(Document doc) { - List URLs = new ArrayList<>(); return getMediaFromPage(doc); } @Override public Document getFirstPage() throws IOException { + this.setAuthCookie(); Response resp = Http.url(this.url) - .ignoreContentType() - .response(); + .cookies(cookies) + .ignoreContentType() + .response(); return resp.parse(); } @@ -126,18 +128,17 @@ public String getGID(URL url) throws MalformedURLException { private List getMediaFromPage(Document doc) { List results = new ArrayList<>(); for (Element el : doc.select("img.img-front")) { - if (el.hasAttr("src")) { - if (el.attr("src").startsWith("https:")) { - results.add(el.attr("src")); - } else { - results.add("https:" + el.attr("src")); - } - } else if (el.hasAttr("data-src")) { - //to add images that are not loaded( as all images are lasyloaded as we scroll). - results.add(el.attr("data-src")); - } - - } + if (el.hasAttr("data-src")) { + //to add images that are not loaded( as all images are lasyloaded as we scroll). + results.add(el.attr("data-src")); + } else if (el.hasAttr("src")) { + if (el.attr("src").startsWith("https:")) { + results.add(el.attr("src")); + } else { + results.add("https:" + el.attr("src")); + } + } + } for (Element el : doc.select("source[label=HD]")) { if (el.attr("src").startsWith("https:")) { results.add(el.attr("src")); @@ -154,7 +155,22 @@ private List getMediaFromPage(Document doc) { results.add("https:" + el.attr("src")); } } + + if (results.size() == 0) { + if (cookies.isEmpty()) { + LOGGER.warn("You might try setting erome.laravel_session manually " + + "if you think this page definitely contains media."); + } + } + return results; } + private void setAuthCookie() { + String sessionId = Utils.getConfigString("erome.laravel_session", null); + if (sessionId != null) { + cookies.put("laravel_session", sessionId); + } + } + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java index 10e73346a..045110850 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ErotivRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -11,7 +13,6 @@ import org.jsoup.Connection.Response; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; @@ -57,8 +58,8 @@ public Document getFirstPage() throws IOException { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return new URL(url.toExternalForm().replaceAll("https?://www.erotiv.io", "https://erotiv.io")); + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + return new URI(url.toExternalForm().replaceAll("https?://www.erotiv.io", "https://erotiv.io")).toURL(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java index 1922002b1..2661d0559 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class FemjoyhunterRipper extends AbstractHTMLRipper { @@ -41,12 +40,6 @@ public String getGID(URL url) throws MalformedURLException { "femjoyhunter.com/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java index de6fb73d8..51d5f15f8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java @@ -1,72 +1,66 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -public class FitnakedgirlsRipper extends AbstractHTMLRipper { - - public FitnakedgirlsRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "fitnakedgirls"; - } - - @Override - public String getDomain() { - return "fitnakedgirls.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p; - Matcher m; - - p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - - throw new MalformedURLException( - "Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List imageURLs = new ArrayList<>(); - - Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img"); - for (Element img : imgs) { - String imgSrc = img.attr("src"); - imageURLs.add(imgSrc); - } - - return imageURLs; - } - - @Override - public void downloadURL(URL url, int index) { - // Send referrer when downloading images - addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); - } +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; + +public class FitnakedgirlsRipper extends AbstractHTMLRipper { + + public FitnakedgirlsRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "fitnakedgirls"; + } + + @Override + public String getDomain() { + return "fitnakedgirls.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p; + Matcher m; + + p = Pattern.compile("^.*fitnakedgirls\\.com/gallery/(.+)$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + + throw new MalformedURLException( + "Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url); + } + + @Override + public List getURLsFromPage(Document doc) { + List imageURLs = new ArrayList<>(); + + Elements imgs = doc.select("div[class*=wp-tiles-tile-bg] > img"); + for (Element img : imgs) { + String imgSrc = img.attr("src"); + imageURLs.add(imgSrc); + } + + return imageURLs; + } + + @Override + public void downloadURL(URL url, int index) { + // Send referrer when downloading images + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java index 6591dd011..bba284f14 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java @@ -1,10 +1,9 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -163,8 +162,8 @@ private String getUserID(String username) throws IOException { } @Override - public JSONObject getFirstPage() throws IOException { - URL apiURL = new URL(baseURL + "&consumer_key=" + CONSUMER_KEY); + public JSONObject getFirstPage() throws IOException, URISyntaxException { + URL apiURL = new URI(baseURL + "&consumer_key=" + CONSUMER_KEY).toURL(); LOGGER.debug("apiURL: " + apiURL); JSONObject json = Http.url(apiURL).getJSON(); @@ -231,7 +230,7 @@ else if (baseURL.contains("/blogs?")) { } @Override - public JSONObject getNextPage(JSONObject json) throws IOException { + public JSONObject getNextPage(JSONObject json) throws IOException, URISyntaxException { if (isThisATest()) { return null; } @@ -248,9 +247,9 @@ public JSONObject getNextPage(JSONObject json) throws IOException { sleep(500); ++page; - URL apiURL = new URL(baseURL + URL apiURL = new URI(baseURL + "&page=" + page - + "&consumer_key=" + CONSUMER_KEY); + + "&consumer_key=" + CONSUMER_KEY).toURL(); return Http.url(apiURL).getJSON(); } @@ -295,14 +294,9 @@ public List getURLsFromJSON(JSONObject json) { } } } - if (imageURL == null) { - LOGGER.error("Failed to find image for photo " + photo.toString()); - } - else { - imageURLs.add(imageURL); - if (isThisATest()) { - break; - } + imageURLs.add(imageURL); + if (isThisATest()) { + break; } } return imageURLs; @@ -310,13 +304,13 @@ public List getURLsFromJSON(JSONObject json) { private boolean urlExists(String url) { try { - HttpURLConnection connection = (HttpURLConnection) new URL(url).openConnection(); + HttpURLConnection connection = (HttpURLConnection) new URI(url).toURL().openConnection(); connection.setRequestMethod("HEAD"); if (connection.getResponseCode() != 200) { throw new IOException("Couldn't find full-size image at " + url); } return true; - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { return false; } } @@ -330,8 +324,8 @@ public boolean keepSortOrder() { public void downloadURL(URL url, int index) { String u = url.toExternalForm(); String[] fields = u.split("/"); - String prefix = getPrefix(index) + fields[fields.length - 3]; - File saveAs = new File(getWorkingDir() + File.separator + prefix + ".jpg"); + String prefix = "/" + getPrefix(index) + fields[fields.length - 3]; + Path saveAs = Paths.get(getWorkingDir() + prefix + ".jpg"); addURLToDownload(url, saveAs, "", null, false); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java index 320884245..c58a7e717 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; @@ -20,7 +22,6 @@ public class FlickrRipper extends AbstractHTMLRipper { - private Document albumDoc = null; private final DownloadThreadPool flickrThreadPool; private enum UrlType { @@ -63,7 +64,7 @@ public String getDomain() { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String sUrl = url.toExternalForm(); // Strip out https sUrl = sUrl.replace("https://secure.flickr.com", "http://www.flickr.com"); @@ -74,7 +75,7 @@ public URL sanitizeURL(URL url) throws MalformedURLException { } sUrl += "pool"; } - return new URL(sUrl); + return new URI(sUrl).toURL(); } // FLickr is one of those sites what includes a api key in sites javascript // TODO let the user provide their own api key @@ -129,8 +130,8 @@ private JSONObject getJSON(String page, String apiKey) { String apiURL = null; try { apiURL = apiURLBuilder(getAlbum(url.toExternalForm()), page, apiKey); - pageURL = new URL(apiURL); - } catch (MalformedURLException e) { + pageURL = new URI(apiURL).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Unable to get api link " + apiURL + " is malformed"); } try { @@ -172,13 +173,13 @@ private Album getAlbum(String url) throws MalformedURLException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { if (!url.toExternalForm().contains("/sets/")) { return super.getAlbumTitle(url); } try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String user = url.toExternalForm(); user = user.substring(user.indexOf("/photos/") + "/photos/".length()); user = user.substring(0, user.indexOf("/")); @@ -228,13 +229,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } @Override public List getURLsFromPage(Document doc) { @@ -268,7 +262,7 @@ public List getURLsFromPage(Document doc) { JSONObject data = (JSONObject) pictures.get(i); try { addURLToDownload(getLargestImageURL(data.getString("id"), apiKey)); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("Flickr MalformedURLException: " + e.getMessage()); } @@ -291,11 +285,11 @@ public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); } - private URL getLargestImageURL(String imageID, String apiKey) throws MalformedURLException { + private URL getLargestImageURL(String imageID, String apiKey) throws MalformedURLException, URISyntaxException { TreeMap imageURLMap = new TreeMap<>(); try { - URL imageAPIURL = new URL("https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=" + apiKey + "&photo_id=" + imageID + "&format=json&nojsoncallback=1"); + URL imageAPIURL = new URI("https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=" + apiKey + "&photo_id=" + imageID + "&format=json&nojsoncallback=1").toURL(); JSONArray imageSizes = new JSONObject(Http.url(imageAPIURL).ignoreContentType().get().text()).getJSONObject("sizes").getJSONArray("size"); for (int i = 0; i < imageSizes.length(); i++) { JSONObject imageInfo = imageSizes.getJSONObject(i); @@ -310,6 +304,6 @@ private URL getLargestImageURL(String imageID, String apiKey) throws MalformedUR LOGGER.error("IOException while looking at image sizes: " + e.getMessage()); } - return new URL(imageURLMap.lastEntry().getValue()); + return new URI(imageURLMap.lastEntry().getValue()).toURL(); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java index 3cda70b2c..fed1abe02 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FooktubeRipper.java @@ -10,16 +10,9 @@ import com.rarchives.ripme.ripper.AbstractSingleFileRipper; import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.VideoRipper; -import com.rarchives.ripme.utils.Http; public class FooktubeRipper extends AbstractSingleFileRipper { - private static final String HOST = "mulemax"; - public FooktubeRipper(URL url) throws IOException { super(url); } @@ -34,10 +27,6 @@ public String getDomain() { return "mulemax.com"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public boolean canRip(URL url) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java index e08d77fd2..a39d3b9b2 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java @@ -44,12 +44,6 @@ public String getGID(URL url) throws MalformedURLException { "freecomiconline.me/TITLE/CHAPTER - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { String nextPage = doc.select("div.select-pagination a").get(1).attr("href"); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java index 683c791b9..dbb46fe1c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java @@ -1,10 +1,12 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStream; import java.net.MalformedURLException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -15,11 +17,10 @@ import com.rarchives.ripme.ui.RipStatusMessage; import com.rarchives.ripme.utils.Utils; import org.jsoup.Connection.Response; -import org.jsoup.HttpStatusException; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.safety.Whitelist; +import org.jsoup.safety.Safelist; import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; @@ -91,14 +92,13 @@ public Document getNextPage(Document doc) throws IOException { String nextUrl = urlBase + nextPageUrl.first().attr("href"); sleep(500); - Document nextPage = Http.url(nextUrl).cookies(cookies).get(); - return nextPage; + return Http.url(nextUrl).cookies(cookies).get(); } private String getImageFromPost(String url) { sleep(1000); - Document d = null; + Document d; try { d = Http.url(url).cookies(cookies).get(); Elements links = d.getElementsByTag("a"); @@ -125,6 +125,9 @@ public List getURLsFromPage(Document page) { urls.add(urlToAdd); } } + if (isStopped() || isThisATest()) { + break; + } } return urls; } @@ -164,7 +167,7 @@ public String getDescription(String page) { ele.select("br").append("\\n"); ele.select("p").prepend("\\n\\n"); LOGGER.debug("Returning description at " + page); - String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Whitelist.none(), new Document.OutputSettings().prettyPrint(false)); + String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Safelist.none(), new Document.OutputSettings().prettyPrint(false)); return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name. } catch (IOException ioe) { LOGGER.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'"); @@ -181,24 +184,22 @@ public boolean saveText(URL url, String subdirectory, String text, int index) { } String newText = ""; String saveAs = ""; - File saveFileAs; + Path saveFileAs; saveAs = text.split("\n")[0]; saveAs = saveAs.replaceAll("^(\\S+)\\s+by\\s+(.*)$", "$2_$1"); for (int i = 1;i < text.split("\n").length; i++) { newText = newText.replace("\\","").replace("/","").replace("~","") + "\n" + text.split("\n")[i]; } try { - if (!subdirectory.equals("")) { - subdirectory = File.separator + subdirectory; - } - saveFileAs = new File( - workingDir.getCanonicalPath() + saveFileAs = Paths.get( + workingDir + + "/" + subdirectory - + File.separator + + "/" + saveAs + ".txt"); // Write the file - FileOutputStream out = (new FileOutputStream(saveFileAs)); + OutputStream out = Files.newOutputStream(saveFileAs); out.write(text.getBytes()); out.close(); } catch (IOException e) { @@ -206,9 +207,13 @@ public boolean saveText(URL url, String subdirectory, String text, int index) { return false; } LOGGER.debug("Downloading " + url + "'s description to " + saveFileAs); - if (!saveFileAs.getParentFile().exists()) { + if (!Files.exists(saveFileAs.getParent())) { LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); - saveFileAs.getParentFile().mkdirs(); + try { + Files.createDirectory(saveFileAs.getParent()); + } catch (IOException e) { + e.printStackTrace(); + } } return true; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java index d88b16e87..62a60fccd 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -40,7 +42,7 @@ public String getDomain() { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); if (u.contains("/thumbs/")) { u = u.replace("/thumbs/", "/full/"); @@ -48,7 +50,7 @@ public URL sanitizeURL(URL url) throws MalformedURLException { if (u.contains("/expanded/")) { u = u.replaceAll("/expanded/", "/full/"); } - return new URL(u); + return new URI(u).toURL(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java deleted file mode 100644 index c542c6dcf..000000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatRipper.java +++ /dev/null @@ -1,160 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import org.json.JSONArray; -import org.json.JSONObject; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.utils.Http; - - -public class GfycatRipper extends AbstractHTMLRipper { - - private static final String HOST = "gfycat.com"; - String username = ""; - String cursor = ""; - String count = "30"; - String REFERRER = "www.reddit.com"; - - - - public GfycatRipper(URL url) throws IOException { - super(new URL(url.toExternalForm().split("-")[0].replace("thumbs.", ""))); - } - - @Override - public String getDomain() { - return "gfycat.com"; - } - - @Override - public String getHost() { - return "gfycat"; - } - - @Override - public boolean canRip(URL url) { - return url.getHost().endsWith(HOST); - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - String sUrl = url.toExternalForm(); - sUrl = sUrl.replace("/gifs/detail", ""); - sUrl = sUrl.replace("/amp", ""); - return new URL(sUrl); - } - - public boolean isProfile() { - Pattern p = Pattern.compile("^https?://[wm.]*gfycat\\.com/@([a-zA-Z0-9\\.\\-\\_]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - return m.matches(); - } - - @Override - public Document getFirstPage() throws IOException { - if (!isProfile()) { - return Http.url(url).referrer(REFERRER).get(); - } else { - username = getGID(url); - return Http.url(new URL("https://api.gfycat.com/v1/users/" + username + "/gfycats")).referrer((REFERRER)).ignoreContentType().get(); - } - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://(?:thumbs\\.|[wm\\.]*)gfycat\\.com/@?([a-zA-Z0-9\\.\\-\\_]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()) - return m.group(1); - - throw new MalformedURLException( - "Expected gfycat.com format: " - + "gfycat.com/id or " - + "thumbs.gfycat.com/id.gif" - + " Got: " + url); - } - - private String stripHTMLTags(String t) { - t = t.replaceAll("\n" + - " \n" + - " ", ""); - t = t.replaceAll("\n" + - "", ""); - t = t.replaceAll("\n", ""); - t = t.replaceAll("=\"\"", ""); - return t; - } - - @Override - public Document getNextPage(Document doc) throws IOException { - if (cursor.equals("")) { - throw new IOException("No more pages"); - } - return Http.url(new URL("https://api.gfycat.com/v1/users/" + username + "/gfycats?count=" + count + "&cursor=" + cursor)).ignoreContentType().get(); - } - - @Override - public List getURLsFromPage(Document doc) { - List result = new ArrayList<>(); - if (isProfile()) { - JSONObject page = new JSONObject(stripHTMLTags(doc.html())); - JSONArray content = page.getJSONArray("gfycats"); - for (int i = 0; i < content.length(); i++) { - result.add(content.getJSONObject(i).getString("mp4Url")); - } - cursor = page.getString("cursor"); - } else { - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - result.add(page.getJSONObject("video").getString("contentUrl")); - } - } - } - return result; - } - - /** - * Helper method for retrieving video URLs. - * @param url URL to gfycat page - * @return URL to video - * @throws IOException - */ - public static String getVideoURL(URL url) throws IOException { - LOGGER.info("Retrieving " + url.toExternalForm()); - - //Sanitize the URL first - url = new URL(url.toExternalForm().replace("/gifs/detail", "")); - - Document doc = Http.url(url).get(); - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - return page.getJSONObject("video").getString("contentUrl"); - } - } - throw new IOException(); - } -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java index fd8c292a7..bdb58ad2c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GfycatporntubeRipper.java @@ -11,8 +11,6 @@ import com.rarchives.ripme.ripper.AbstractSingleFileRipper; import org.jsoup.nodes.Document; -import com.rarchives.ripme.utils.Http; - public class GfycatporntubeRipper extends AbstractSingleFileRipper { public GfycatporntubeRipper(URL url) throws IOException { @@ -40,12 +38,6 @@ public String getGID(URL url) throws MalformedURLException { "gfycatporntube.com/NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java index 2afc79d16..49cbfc604 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,11 +14,8 @@ import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class GirlsOfDesireRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; public GirlsOfDesireRipper(URL url) throws IOException { super(url); @@ -32,10 +30,10 @@ public String getDomain() { return "girlsofdesire.org"; } - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); Elements elems = doc.select(".albumName"); return getHost() + "_" + elems.first().text(); } catch (Exception e) { @@ -62,14 +60,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java index fd3b23c24..040ca9780 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HbrowseRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -49,9 +50,9 @@ public Document getFirstPage() throws IOException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String title = doc.select("div[id=main] > table.listTable > tbody > tr > td.listLong").first().text(); return getHost() + "_" + title + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java index cb5215233..2b8ac9674 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -90,7 +91,7 @@ public Document getFirstPage() throws IOException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { return getHost() + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java index ca709418a..4d28f7a2a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Base64; @@ -80,16 +82,15 @@ protected List getURLsFromJSON(JSONObject json) throws JSONException { } @Override - protected JSONObject getFirstPage() throws IOException { + protected JSONObject getFirstPage() throws IOException, URISyntaxException { String jsonEncodedString = getJsonEncodedStringFromPage(); String jsonDecodedString = decodeJsonString(jsonEncodedString); return new JSONObject(jsonDecodedString); } - public String getJsonEncodedStringFromPage() throws MalformedURLException, IOException - { + public String getJsonEncodedStringFromPage() throws MalformedURLException, IOException, URISyntaxException { // Image data only appears on the /read/ page and not on the /view/ one. - URL readUrl = new URL(String.format("http://hentainexus.com/read/%s",getGID(url))); + URL readUrl = new URI(String.format("http://hentainexus.com/read/%s",getGID(url))).toURL(); Document document = Http.url(readUrl).response().parse(); for (Element scripts : document.getElementsByTag("script")) { @@ -143,7 +144,7 @@ The following code is a Java adaptation of the initRender() JavaScript function } magicByte = (byte) (magicByte & 0x7); - ArrayList newArray = new ArrayList(); + ArrayList newArray = new ArrayList<>(); for (int i = 0x0; i < 0x100; i++) { newArray.add(i); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java index 7950f0cf1..246258597 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaidudeRipper.java @@ -10,6 +10,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -50,12 +51,6 @@ public String getGID(URL url) throws MalformedURLException { "Expected hqporner URL format: " + "hentaidude.com/VIDEO - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -84,7 +79,7 @@ public DownloadThreadPool getThreadPool() { return hentaidudeThreadPool; } - private class HentaidudeDownloadThread extends Thread { + private class HentaidudeDownloadThread implements Runnable { private URL url; @@ -97,7 +92,7 @@ public HentaidudeDownloadThread(URL url, int index) { public void run() { try { Document doc = Http.url(url).get(); - URL videoSourceUrl = new URL(getVideoUrl(doc)); + URL videoSourceUrl = new URI(getVideoUrl(doc)).toURL(); addURLToDownload(videoSourceUrl, "", "", "", null, getVideoName(), "mp4"); } catch (Exception e) { LOGGER.error("Could not get video url for " + getVideoName(), e); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java index a4e5895d5..d6dba4190 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -12,7 +13,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class HentaifoxRipper extends AbstractHTMLRipper { @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "https://hentaifox.com/gallery/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { LOGGER.info(doc); @@ -59,9 +53,9 @@ public List getURLsFromPage(Document doc) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); String title = doc.select("div.info > h1").first().text(); return getHost() + "_" + title + "_" + getGID(url); } catch (Exception e) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java index df7bfb963..45628e825 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java @@ -52,13 +52,6 @@ public String getGID(URL url) throws MalformedURLException { "https://hentai-image.com/image/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java index 3196c1394..d312b75b0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +15,6 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Element; public class HitomiRipper extends AbstractHTMLRipper { @@ -35,20 +36,20 @@ public String getDomain() { @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("https://hitomi.la/galleries/([\\d]+).html"); + Pattern p = Pattern.compile("https://hitomi.la/(cg|doujinshi|gamecg|manga)/(.+).html"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { galleryId = m.group(1); return m.group(1); } throw new MalformedURLException("Expected hitomi URL format: " + - "https://hitomi.la/galleries/ID.html - got " + url + " instead"); + "https://hitomi.la/(cg|doujinshi|gamecg|manga)/ID.html - got " + url + " instead"); } @Override - public Document getFirstPage() throws IOException { + public Document getFirstPage() throws IOException, URISyntaxException { // if we go to /GALLERYID.js we get a nice json array of all images in the gallery - return Http.url(new URL(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js"))).ignoreContentType().get(); + return Http.url(new URI(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js")).toURL()).ignoreContentType().get(); } @@ -65,7 +66,7 @@ public List getURLsFromPage(Document doc) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title and username as GID Document doc = Http.url(url).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java index 8d13f1138..0f69c75be 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java @@ -11,6 +11,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -63,9 +65,8 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } @Override @@ -130,7 +131,7 @@ public boolean useByteProgessBar() { return true; } - private class HqpornerDownloadThread extends Thread { + private class HqpornerDownloadThread implements Runnable { private URL hqpornerVideoPageUrl; //private int index; @@ -164,10 +165,10 @@ public void fetchVideo() { } if (downloadUrl != null) { - addURLToDownload(new URL(downloadUrl), "", subdirectory, "", null, getVideoName(), "mp4"); + addURLToDownload(new URI(downloadUrl).toURL(), "", subdirectory, "", null, getVideoName(), "mp4"); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while downloading video.", e); } } @@ -215,7 +216,7 @@ private String getVideoFromUnknown(String videoPageurl) { try { logger.info("Trying to download from unknown video host " + videoPageurl); - URL url = new URL(videoPageurl); + URL url = new URI(videoPageurl).toURL(); Response response = Http.url(url).referrer(hqpornerVideoPageUrl).response(); Document doc = response.parse(); @@ -245,7 +246,7 @@ private String getVideoFromUnknown(String videoPageurl) { } } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("Unable to get video url using generic methods."); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java index 5b4812584..154206552 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java @@ -46,12 +46,6 @@ public String getGID(URL url) throws MalformedURLException { "hypnohub.net/pool/show/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - private String ripPost(String url) throws IOException { LOGGER.info(url); Document doc = Http.url(url).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java deleted file mode 100644 index 062217b21..000000000 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagearnRipper.java +++ /dev/null @@ -1,112 +0,0 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; - -public class ImagearnRipper extends AbstractHTMLRipper { - - public ImagearnRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "imagearn"; - } - @Override - public String getDomain() { - return "imagearn.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^.*imagearn.com/+gallery.php\\?id=([0-9]+).*$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException( - "Expected imagearn.com gallery formats: " - + "imagearn.com/gallery.php?id=####..." - + " Got: " + url); - } - - public URL sanitizeURL(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^.*imagearn.com/+image.php\\?id=[0-9]+.*$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - // URL points to imagearn *image*, not gallery - try { - url = getGalleryFromImage(url); - } catch (Exception e) { - LOGGER.error("[!] " + e.getMessage(), e); - } - } - return url; - } - - private URL getGalleryFromImage(URL url) throws IOException { - Document doc = Http.url(url).get(); - for (Element link : doc.select("a[href~=^gallery\\.php.*$]")) { - LOGGER.info("LINK: " + link.toString()); - if (link.hasAttr("href") - && link.attr("href").contains("gallery.php")) { - url = new URL("http://imagearn.com/" + link.attr("href")); - LOGGER.info("[!] Found gallery from given link: " + url); - return url; - } - } - throw new IOException("Failed to find gallery at URL " + url); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public String getAlbumTitle(URL url) throws MalformedURLException { - try { - Document doc = getFirstPage(); - String title = doc.select("h3 > strong").first().text(); // profile name - return getHost() + "_" + title + "_" + getGID(url); - } catch (Exception e) { - // Fall back to default album naming convention - LOGGER.warn("Failed to get album title from " + url, e); - } - return super.getAlbumTitle(url); - } - - @Override - public List getURLsFromPage(Document doc) { - List imageURLs = new ArrayList<>(); - for (Element thumb : doc.select("div#gallery > div > a")) { - String imageURL = thumb.attr("href"); - try { - Document imagedoc = new Http("http://imagearn.com/" + imageURL).get(); - String image = imagedoc.select("a.thickbox").first().attr("href"); - imageURLs.add(image); - } catch (IOException e) { - LOGGER.warn("Was unable to download page: " + imageURL); - } - } - return imageURLs; - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - sleep(1000); - } -} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java index 3aca67cfc..0699273f1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java @@ -6,20 +6,24 @@ import com.rarchives.ripme.utils.Utils; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; + +import org.apache.commons.lang.StringUtils; +import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; public class ImagebamRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; - // Thread pool for finding direct image links from "image" pages (html) private DownloadThreadPool imagebamThreadPool = new DownloadThreadPool("imagebam"); @Override @@ -45,7 +49,7 @@ public String getGID(URL url) throws MalformedURLException { Pattern p; Matcher m; - p = Pattern.compile("^https?://[wm.]*imagebam.com/gallery/([a-zA-Z0-9]+).*$"); + p = Pattern.compile("^https?://[wm.]*imagebam.com/(gallery|view)/([a-zA-Z0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(1); @@ -57,14 +61,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page @@ -80,7 +76,7 @@ public Document getNextPage(Document doc) throws IOException { @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); - for (Element thumb : doc.select("div > a[target=_blank]:not(.footera)")) { + for (Element thumb : doc.select("div > a[class=thumbnail]:not(.footera)")) { imageURLs.add(thumb.attr("href")); } return imageURLs; @@ -94,18 +90,15 @@ public void downloadURL(URL url, int index) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Elements elems = getFirstPage().select("legend"); + Elements elems = getCachedFirstPage().select("[id=gallery-name]"); String title = elems.first().text(); LOGGER.info("Title text: '" + title + "'"); - Pattern p = Pattern.compile("^(.*)\\s\\d* image.*$"); - Matcher m = p.matcher(title); - if (m.matches()) { - return getHost() + "_" + getGID(url) + " (" + m.group(1).trim() + ")"; + if (StringUtils.isNotBlank(title)) { + return getHost() + "_" + getGID(url) + " (" + title + ")"; } - LOGGER.info("Doesn't match " + p.pattern()); } catch (Exception e) { // Fall back to default album naming convention LOGGER.warn("Failed to get album title from " + url, e); @@ -118,9 +111,9 @@ public String getAlbumTitle(URL url) throws MalformedURLException { * * Handles case when site has IP-banned the user. */ - private class ImagebamImageThread extends Thread { - private URL url; //link to "image page" - private int index; //index in album + private class ImagebamImageThread implements Runnable { + private final URL url; //link to "image page" + private final int index; //index in album ImagebamImageThread(URL url, int index) { super(); @@ -138,19 +131,19 @@ public void run() { */ private void fetchImage() { try { - Document doc = Http.url(url).get(); + Map cookies = new HashMap<>(); + cookies.put("nsfw_inter", "1"); + Document doc = Jsoup.connect(url.toString()) + .cookies(cookies) + .get(); + // Find image Elements metaTags = doc.getElementsByTag("meta"); String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen. - - for (Element metaTag: metaTags) { - //the direct link to the image seems to always be linked in the part of the html. - if (metaTag.attr("property").equals("og:image")) { - imgsrc = metaTag.attr("content"); - LOGGER.info("Found URL " + imgsrc); - break;//only one (useful) image possible for an "image page". - } + Elements elem = doc.select("img[class*=main-image]"); + if ((elem != null) && (elem.size() > 0)) { + imgsrc = elem.first().attr("src"); } //for debug, or something goes wrong. @@ -165,8 +158,8 @@ private void fetchImage() { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); - } catch (IOException e) { + addURLToDownload(new URI(imgsrc).toURL(), prefix); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java index 14d21aa9f..4fcf22012 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java @@ -1,8 +1,13 @@ package com.rarchives.ripme.ripper.rippers; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -10,6 +15,7 @@ import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; @@ -17,13 +23,11 @@ public class ImagefapRipper extends AbstractHTMLRipper { - private Document albumDoc = null; - private boolean isNewAlbumType = false; - private int callsMade = 0; private long startTime = System.nanoTime(); private static final int RETRY_LIMIT = 10; + private static final int HTTP_RETRY_LIMIT = 3; private static final int RATE_LIMIT_HOUR = 1000; // All sleep times are in milliseconds @@ -49,54 +53,40 @@ public String getDomain() { * Reformat given URL into the desired format (all images on single page) */ @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String gid = getGID(url); - String newURL = "https://www.imagefap.com/gallery.php?"; - if (isNewAlbumType) { - newURL += "p"; - } - newURL += "gid=" + gid + "&view=2"; + String newURL = "https://www.imagefap.com/pictures/" + gid + "/random-string"; LOGGER.debug("Changed URL from " + url + " to " + newURL); - return new URL(newURL); + return new URI(newURL).toURL(); } @Override public String getGID(URL url) throws MalformedURLException { Pattern p; Matcher m; + // Old format (I suspect no longer supported) p = Pattern.compile("^.*imagefap.com/gallery.php\\?pgid=([a-f0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - isNewAlbumType = true; return m.group(1); } + p = Pattern.compile("^.*imagefap.com/gallery.php\\?gid=([0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(1); } - p = Pattern.compile("^.*imagefap.com/pictures/([0-9]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("^.*imagefap.com/pictures/([a-f0-9]+).*$"); + p = Pattern.compile("^.*imagefap.com/gallery/([a-f0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - isNewAlbumType = true; return m.group(1); } - p = Pattern.compile("^.*imagefap.com/gallery/([0-9]+).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - p = Pattern.compile("^.*imagefap.com/gallery/([a-f0-9]+).*$"); + // most recent format + p = Pattern.compile("^.*imagefap.com/pictures/([a-f0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { - isNewAlbumType = true; return m.group(1); } @@ -109,18 +99,20 @@ public String getGID(URL url) throws MalformedURLException { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = getPageWithRetries(url); - } - return albumDoc; + + Document firstPage = getPageWithRetries(url); + + sendUpdate(STATUS.LOADING_RESOURCE, "Loading first page..."); + + return firstPage; } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { String nextURL = null; for (Element a : doc.select("a.link3")) { if (a.text().contains("next")) { - nextURL = "https://imagefap.com/gallery.php" + a.attr("href"); + nextURL = this.sanitizeURL(this.url) + a.attr("href"); break; } } @@ -129,26 +121,50 @@ public Document getNextPage(Document doc) throws IOException { } // Sleep before fetching next page. sleep(PAGE_SLEEP_TIME); + + sendUpdate(STATUS.LOADING_RESOURCE, "Loading next page URL: " + nextURL); + LOGGER.info("Attempting to load next page URL: " + nextURL); // Load next page - Document nextPage = getPageWithRetries(new URL(nextURL)); + Document nextPage = getPageWithRetries(new URI(nextURL).toURL()); return nextPage; } @Override public List getURLsFromPage(Document doc) { + List imageURLs = new ArrayList<>(); + + LOGGER.debug("Trying to get URLs from document... "); + for (Element thumb : doc.select("#gallery img")) { if (!thumb.hasAttr("src") || !thumb.hasAttr("width")) { continue; } String image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href")); + + if (image == null) { + for (int i = 0; i < HTTP_RETRY_LIMIT; i++) { + image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href")); + if (image != null) { + break; + } + sleep(PAGE_SLEEP_TIME); + } + if (image == null) + throw new RuntimeException("Unable to extract image URL from single image page! Unable to continue"); + } + + LOGGER.debug("Adding imageURL: '" + image + "'"); + imageURLs.add(image); if (isThisATest()) { break; } } + LOGGER.debug("Adding " + imageURLs.size() + " URLs to download"); + return imageURLs; } @@ -159,10 +175,10 @@ public void downloadURL(URL url, int index) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - String title = getFirstPage().title(); + String title = getCachedFirstPage().title(); title = title.replace("Porn Pics & Porn GIFs", ""); title = title.replace(" ", "_"); String toReturn = getHost() + "_" + title + "_" + getGID(url); @@ -177,9 +193,30 @@ private String getFullSizedImage(String pageURL) { // Sleep before fetching image. sleep(IMAGE_SLEEP_TIME); - Document doc = getPageWithRetries(new URL(pageURL)); - return doc.select("img#mainPhoto").attr("src"); - } catch (IOException e) { + Document doc = getPageWithRetries(new URI(pageURL).toURL()); + + String framedPhotoUrl = doc.select("img#mainPhoto").attr("data-src"); + + // we use a no query param version of the URL to reduce failure rate because of some query params that change between the li elements and the mainPhotoURL + String noQueryPhotoUrl = framedPhotoUrl.split("\\?")[0]; + + LOGGER.debug("noQueryPhotoUrl: " + noQueryPhotoUrl); + + // we look for a li > a element who's framed attribute starts with the noQueryPhotoUrl (only reference in the page to the full URL) + Elements selectedItem = doc.select("ul.thumbs > li > a[framed^='"+noQueryPhotoUrl+"']"); + + // the fullsize URL is in the href attribute + String fullSizedUrl = selectedItem.attr("href"); + + if("".equals(fullSizedUrl)) + throw new IOException("JSoup full URL extraction failed from '" + selectedItem.html() + "'"); + + LOGGER.debug("fullSizedUrl: " + fullSizedUrl); + + return fullSizedUrl; + + } catch (IOException | URISyntaxException e) { + LOGGER.debug("Unable to get full size image URL from page: " + pageURL + " because: " + e.getMessage()); return null; } } @@ -191,9 +228,10 @@ private String getFullSizedImage(String pageURL) { * @throws IOException If page loading errors, or if retries are exhausted */ private Document getPageWithRetries(URL url) throws IOException { - Document doc; + Document doc = null; int retries = RETRY_LIMIT; while (true) { + sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); // For debugging rate limit checker. Useful to track wheter the timeout should be altered or not. @@ -201,15 +239,42 @@ private Document getPageWithRetries(URL url) throws IOException { checkRateLimit(); LOGGER.info("Retrieving " + url); - doc = Http.url(url) - .get(); + + boolean httpCallThrottled = false; + int httpAttempts = 0; - - if (doc.toString().contains("Your IP made too many requests to our servers and we need to check that you are a real human being")) { + // we attempt the http call, knowing it can fail for network reasons + while(true) { + httpAttempts++; + try { + doc = Http.url(url).get(); + } catch(IOException e) { + + LOGGER.info("Retrieving " + url + " error: " + e.getMessage()); + + if(e.getMessage().contains("404")) + throw new IOException("Gallery/Page not found!"); + + if(httpAttempts < HTTP_RETRY_LIMIT) { + sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed: " + e.getMessage() + " retrying " + httpAttempts + " / " + HTTP_RETRY_LIMIT); + + // we sleep for a few seconds + sleep(PAGE_SLEEP_TIME); + continue; + } else { + sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed too many times: " + e.getMessage() + " treating this as a throttle"); + httpCallThrottled = true; + } + } + // no errors, we exit + break; + } + + if (httpCallThrottled || (doc != null && doc.toString().contains("Your IP made too many requests to our servers and we need to check that you are a real human being"))) { if (retries == 0) { throw new IOException("Hit rate limit and maximum number of retries, giving up"); } - String message = "Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining"; + String message = "Probably hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining"; LOGGER.warn(message); sendUpdate(STATUS.DOWNLOAD_WARN, message); retries--; @@ -218,8 +283,7 @@ private Document getPageWithRetries(URL url) throws IOException { } catch (InterruptedException e) { throw new IOException("Interrupted while waiting for rate limit to subside"); } - } - else { + } else { return doc; } } @@ -249,4 +313,5 @@ private long checkRateLimit() { return duration; } + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java index f50a84a04..4691c7c63 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -56,11 +58,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); for (Element thumb : doc.select("a[target=_blank]")) { @@ -79,9 +76,9 @@ public void downloadURL(URL url, int index) { * * Handles case when site has IP-banned the user. */ - private class ImagevenueImageThread extends Thread { - private URL url; - private int index; + private class ImagevenueImageThread implements Runnable { + private final URL url; + private final int index; ImagevenueImageThread(URL url, int index) { super(); @@ -113,8 +110,8 @@ private void fetchImage() { if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(imgsrc), prefix); - } catch (IOException e) { + addURLToDownload(new URI(imgsrc).toURL(), prefix); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java index f3050a13f..b32fcad44 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java @@ -40,10 +40,6 @@ public String getGID(URL url) throws MalformedURLException { "imgbox.com/g/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java index 93cb809e7..4904ac60a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java @@ -1,10 +1,14 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -15,15 +19,15 @@ import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; -import org.jsoup.safety.Whitelist; +import org.jsoup.safety.Safelist; import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AlbumRipper; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -public class ImgurRipper extends AlbumRipper { +public class ImgurRipper extends AbstractHTMLRipper { private static final String DOMAIN = "imgur.com", HOST = "imgur"; @@ -38,7 +42,6 @@ enum ALBUM_TYPE { USER_ALBUM, USER_IMAGES, SINGLE_IMAGE, - SERIES_OF_IMAGES, SUBREDDIT } @@ -58,6 +61,7 @@ public boolean allowDuplicates() { return albumType == ALBUM_TYPE.USER; } + @Override public boolean canRip(URL url) { if (!url.getHost().endsWith(DOMAIN)) { return false; @@ -71,7 +75,24 @@ public boolean canRip(URL url) { return true; } - public URL sanitizeURL(URL url) throws MalformedURLException { + @Override + protected String getDomain() { + return DOMAIN; + } + + @Override + protected void downloadURL(URL url, int index) { + // No-op as we override rip() method + } + + @Override + protected List getURLsFromPage(Document page) { + // No-op as we override rip() method + return Arrays.asList(); + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); if (u.indexOf('#') >= 0) { u = u.substring(0, u.indexOf('#')); @@ -79,11 +100,17 @@ public URL sanitizeURL(URL url) throws MalformedURLException { u = u.replace("imgur.com/gallery/", "imgur.com/a/"); u = u.replace("https?://m\\.imgur\\.com", "http://imgur.com"); u = u.replace("https?://i\\.imgur\\.com", "http://imgur.com"); - return new URL(u); + return new URI(u).toURL(); } + @Override public String getAlbumTitle(URL url) throws MalformedURLException { - String gid = getGID(url); + String gid = null; + try { + gid = getGID(url); + } catch (URISyntaxException e) { + throw new MalformedURLException(e.getMessage()); + } if (this.albumType == ALBUM_TYPE.ALBUM) { try { // Attempt to use album title as GID @@ -91,7 +118,7 @@ public String getAlbumTitle(URL url) throws MalformedURLException { albumDoc = Http.url(url).get(); } - Elements elems = null; + Elements elems; /* // TODO: Add config option for including username in album title. @@ -106,15 +133,13 @@ public String getAlbumTitle(URL url) throws MalformedURLException { } */ - String title = null; + String title; final String defaultTitle1 = "Imgur: The most awesome images on the Internet"; final String defaultTitle2 = "Imgur: The magic of the Internet"; LOGGER.info("Trying to get album title"); elems = albumDoc.select("meta[property=og:title]"); - if (elems != null) { - title = elems.attr("content"); - LOGGER.debug("Title is " + title); - } + title = elems.attr("content"); + LOGGER.debug("Title is " + title); // This is here encase the album is unnamed, to prevent // Imgur: The most awesome images on the Internet from being added onto the album name if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) { @@ -124,27 +149,17 @@ public String getAlbumTitle(URL url) throws MalformedURLException { title = ""; LOGGER.debug("Trying to use title tag to get title"); elems = albumDoc.select("title"); - if (elems != null) { - if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) { - LOGGER.debug("Was unable to get album title or album was untitled"); - } - else { - title = elems.text(); - } + if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) { + LOGGER.debug("Was unable to get album title or album was untitled"); + } + else { + title = elems.text(); } } String albumTitle = "imgur_"; - /* - // TODO: Add config option (see above) - if (user != null) { - albumTitle += "user_" + user; - } - */ albumTitle += gid; - if (title != null) { - albumTitle += "_" + title; - } + albumTitle += "_" + title; return albumTitle; } catch (IOException e) { @@ -156,118 +171,83 @@ public String getAlbumTitle(URL url) throws MalformedURLException { @Override public void rip() throws IOException { - switch (albumType) { - case ALBUM: - // Fall-through - case USER_ALBUM: - LOGGER.info("Album type is USER_ALBUM"); - // Don't call getAlbumTitle(this.url) with this - // as it seems to cause the album to be downloaded to a subdir. - ripAlbum(this.url); - break; - case SERIES_OF_IMAGES: - LOGGER.info("Album type is SERIES_OF_IMAGES"); - ripAlbum(this.url); - break; - case SINGLE_IMAGE: - LOGGER.info("Album type is SINGLE_IMAGE"); - ripSingleImage(this.url); - break; - case USER: - LOGGER.info("Album type is USER"); - ripUserAccount(url); - break; - case SUBREDDIT: - LOGGER.info("Album type is SUBREDDIT"); - ripSubreddit(url); - break; - case USER_IMAGES: - LOGGER.info("Album type is USER_IMAGES"); - ripUserImages(url); - break; + try { + switch (albumType) { + case ALBUM: + // Fall-through + case USER_ALBUM: + LOGGER.info("Album type is USER_ALBUM"); + // Don't call getAlbumTitle(this.url) with this + // as it seems to cause the album to be downloaded to a subdir. + ripAlbum(this.url); + break; + case SINGLE_IMAGE: + LOGGER.info("Album type is SINGLE_IMAGE"); + ripSingleImage(this.url); + break; + case USER: + LOGGER.info("Album type is USER"); + ripUserAccount(url); + break; + case SUBREDDIT: + LOGGER.info("Album type is SUBREDDIT"); + ripSubreddit(url); + break; + case USER_IMAGES: + LOGGER.info("Album type is USER_IMAGES"); + ripUserImages(url); + break; + } + } catch (URISyntaxException e) { + throw new IOException("Failed ripping " + this.url, e); } waitForThreads(); } - private void ripSingleImage(URL url) throws IOException { + private void ripSingleImage(URL url) throws IOException, URISyntaxException { String strUrl = url.toExternalForm(); - Document document = getDocument(strUrl); - Matcher m = getEmbeddedJsonMatcher(document); - if (m.matches()) { - JSONObject json = new JSONObject(m.group(1)).getJSONObject("image"); - addURLToDownload(extractImageUrlFromJson(json), ""); - } + var gid = getGID(url); + var json = getSingleImageData(String.format("https://api.imgur.com/post/v1/media/%s?include=media,adconfig,account", gid)); + var media = json.getJSONArray("media"); + if (media.length()==0) { + throw new IOException(String.format("Failed to fetch image for url %s", strUrl)); + } + if (media.length()>1) { + LOGGER.warn(String.format("Got multiple images for url %s", strUrl)); + } + addURLToDownload(extractImageUrlFromJson((JSONObject)media.get(0)), ""); } - private void ripAlbum(URL url) throws IOException { + private void ripAlbum(URL url) throws IOException, URISyntaxException { ripAlbum(url, ""); } - private void ripAlbum(URL url, String subdirectory) throws IOException { - int index = 0; + private void ripAlbum(URL url, String subdirectory) throws IOException, URISyntaxException { + int index; this.sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); index = 0; ImgurAlbum album = getImgurAlbum(url); for (ImgurImage imgurImage : album.images) { stopCheck(); - String saveAs = workingDir.getCanonicalPath(); - if (!saveAs.endsWith(File.separator)) { - saveAs += File.separator; - } + Path saveAs = workingDir.toPath(); if (subdirectory != null && !subdirectory.equals("")) { - saveAs += subdirectory; + saveAs = saveAs.resolve(subdirectory); } - if (!saveAs.endsWith(File.separator)) { - saveAs += File.separator; - } - File subdirFile = new File(saveAs); - if (!subdirFile.exists()) { - subdirFile.mkdirs(); + if (!Files.exists(saveAs)) { + Files.createDirectory(saveAs); } index += 1; + var imgPath = imgurImage.getSaveAs().replaceAll("\\?\\d", ""); if (Utils.getConfigBoolean("download.save_order", true)) { - saveAs += String.format("%03d_", index); - } - saveAs += imgurImage.getSaveAs(); - saveAs = saveAs.replaceAll("\\?\\d", ""); - addURLToDownload(imgurImage.url, new File(saveAs)); - } - } - - public static ImgurAlbum getImgurSeries(URL url) throws IOException { - Pattern p = Pattern.compile("^.*imgur\\.com/([a-zA-Z0-9,]*).*$"); - Matcher m = p.matcher(url.toExternalForm()); - ImgurAlbum album = new ImgurAlbum(url); - if (m.matches()) { - String[] imageIds = m.group(1).split(","); - for (String imageId : imageIds) { - // TODO: Fetch image with ID imageId - LOGGER.debug("Fetching image info for ID " + imageId); - try { - JSONObject json = Http.url("https://api.imgur.com/2/image/" + imageId + ".json").getJSON(); - if (!json.has("image")) { - continue; - } - JSONObject image = json.getJSONObject("image"); - if (!image.has("links")) { - continue; - } - JSONObject links = image.getJSONObject("links"); - if (!links.has("original")) { - continue; - } - String original = links.getString("original"); - ImgurImage theImage = new ImgurImage(new URL(original)); - album.addImage(theImage); - } catch (Exception e) { - LOGGER.error("Got exception while fetching imgur ID " + imageId, e); - } + saveAs = saveAs.resolve(String.format("%03d_%s", index, imgPath)); + } else { + saveAs = saveAs.resolve(imgPath); } + addURLToDownload(imgurImage.url, saveAs); } - return album; } - public static ImgurAlbum getImgurAlbum(URL url) throws IOException { + public static ImgurAlbum getImgurAlbum(URL url) throws IOException, URISyntaxException { String strUrl = url.toExternalForm(); if (!strUrl.contains(",")) { strUrl += "/all"; @@ -275,13 +255,11 @@ public static ImgurAlbum getImgurAlbum(URL url) throws IOException { LOGGER.info(" Retrieving " + strUrl); Document doc = getAlbumData("https://api.imgur.com/3/album/" + strUrl.split("/a/")[1]); // Try to use embedded JSON to retrieve images - LOGGER.info(Jsoup.clean(doc.body().toString(), Whitelist.none())); - try { - JSONObject json = new JSONObject(Jsoup.clean(doc.body().toString(), Whitelist.none())); + JSONObject json = new JSONObject(Jsoup.clean(doc.body().toString(), Safelist.none())); JSONArray jsonImages = json.getJSONObject("data").getJSONArray("images"); return createImgurAlbumFromJsonArray(url, jsonImages); - } catch (JSONException e) { + } catch (JSONException | URISyntaxException e) { LOGGER.debug("Error while parsing JSON at " + url + ", continuing", e); } @@ -309,54 +287,48 @@ public static ImgurAlbum getImgurAlbum(URL url) throws IOException { image = "http:" + thumb.select("img").attr("src"); } else { // Unable to find image in this div - LOGGER.error("[!] Unable to find image in div: " + thumb.toString()); + LOGGER.error("[!] Unable to find image in div: " + thumb); continue; } if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) { image = image.replace(".gif", ".mp4"); } - ImgurImage imgurImage = new ImgurImage(new URL(image)); + ImgurImage imgurImage = new ImgurImage(new URI(image).toURL()); imgurAlbum.addImage(imgurImage); } return imgurAlbum; } - private static Matcher getEmbeddedJsonMatcher(Document doc) { - Pattern p = Pattern.compile("^.*widgetFactory.mergeConfig\\('gallery', (.*?)\\);.*$", Pattern.DOTALL); - return p.matcher(doc.body().html()); - } - - private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException { + private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException, URISyntaxException { ImgurAlbum imgurAlbum = new ImgurAlbum(url); int imagesLength = jsonImages.length(); for (int i = 0; i < imagesLength; i++) { JSONObject ob = jsonImages.getJSONObject(i); - imgurAlbum.addImage(new ImgurImage( new URL(ob.getString("link")))); + imgurAlbum.addImage(new ImgurImage( new URI(ob.getString("link")).toURL())); } return imgurAlbum; } - private static ImgurImage createImgurImageFromJson(JSONObject json) throws MalformedURLException { - return new ImgurImage(extractImageUrlFromJson(json)); - } - - private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException { + private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException, URISyntaxException { String ext = json.getString("ext"); + if (!ext.startsWith(".")) { + ext = "." + ext; + } if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) { ext = ".mp4"; } - return new URL( - "http://i.imgur.com/" - + json.getString("hash") - + ext); + return new URI( + "https://i.imgur.com/" + + json.getString("id") + + ext).toURL(); } - private static Document getDocument(String strUrl) throws IOException { - return Jsoup.connect(strUrl) + private static JSONObject getSingleImageData(String strUrl) throws IOException { + return Http.url(strUrl) .userAgent(USER_AGENT) .timeout(10 * 1000) - .maxBodySize(0) - .get(); + .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7")) + .getJSON(); } private static Document getAlbumData(String strUrl) throws IOException { @@ -369,35 +341,71 @@ private static Document getAlbumData(String strUrl) throws IOException { .get(); } + private static JSONObject getUserData(String userUrl) throws IOException { + return Http.url(userUrl) + .userAgent(USER_AGENT) + .timeout(10 * 1000) + .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7")) + .getJSON(); + } + /** * Rips all albums in an imgur user's account. * @param url - * URL to imgur user account (http://username.imgur.com) - * @throws IOException + * URL to imgur user account (http://username.imgur.com | https://imgur.com/user/username) */ - private void ripUserAccount(URL url) throws IOException { + private void ripUserAccount(URL url) throws IOException, URISyntaxException { + int cPage = -1, cImage = 0; + String apiUrl = "https://api.imgur.com/3/account/%s/submissions/%d/newest?album_previews=1"; + // Strip 'user_' from username + var username = getGID(url).replace("user_", ""); LOGGER.info("Retrieving " + url); sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm()); - Document doc = Http.url(url).get(); - for (Element album : doc.select("div.cover a")) { - stopCheck(); - if (!album.hasAttr("href") - || !album.attr("href").contains("imgur.com/a/")) { - continue; + + while (true) { + cPage += 1; + var pageUrl = String.format(apiUrl, username, cPage); + var json = getUserData(pageUrl); + var success = json.getBoolean("success"); + var status = json.getInt("status"); + if (!success || status!=200) { + throw new IOException(String.format("Unexpected status code %d for url %s and page %d", status, url, cPage)); } - String albumID = album.attr("href").substring(album.attr("href").lastIndexOf('/') + 1); - URL albumURL = new URL("http:" + album.attr("href") + "/noscript"); - try { - ripAlbum(albumURL, albumID); - Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000); - } catch (Exception e) { - LOGGER.error("Error while ripping album: " + e.getMessage(), e); + var data = json.getJSONArray("data"); + if (data.isEmpty()) { + // Data array is empty for pages beyond the last page + break; + } + for (int i = 0; i < data.length(); i++) { + cImage += 1; + String prefixOrSubdir = ""; + if (Utils.getConfigBoolean("download.save_order", true)) { + prefixOrSubdir = String.format("%03d_", cImage); + } + var d = (JSONObject)data.get(i); + var l = d.getString("link"); + if (d.getBoolean("is_album")) { + // For album links with multiple images create a prefixed folder with album id + prefixOrSubdir += d.getString("id"); + ripAlbum(new URI(l).toURL(), prefixOrSubdir); + try { + Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000L); + } catch (InterruptedException e) { + LOGGER.error(String.format("Error! Interrupted ripping album %s for user account %s", l, username), e); + } + } else { + // For direct links + if (d.has("mp4") && Utils.getConfigBoolean("prefer.mp4", false)) { + l = d.getString("mp4"); + } + addURLToDownload(new URI(l).toURL(), prefixOrSubdir); + } } } } - private void ripUserImages(URL url) throws IOException { + private void ripUserImages(URL url) { int page = 0; int imagesFound = 0; int imagesTotal = 0; String jsonUrl = url.toExternalForm().replace("/all", "/ajax/images"); if (jsonUrl.contains("#")) { @@ -417,12 +425,12 @@ private void ripUserImages(URL url) throws IOException { for (int i = 0; i < images.length(); i++) { imagesFound++; JSONObject image = images.getJSONObject(i); - String imageUrl = "http://i.imgur.com/" + image.getString("hash") + image.getString("ext"); + String imageUrl = "https://i.imgur.com/" + image.getString("hash") + image.getString("ext"); String prefix = ""; if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", imagesFound); } - addURLToDownload(new URL(imageUrl), prefix); + addURLToDownload(new URI(imageUrl).toURL(), prefix); } if (imagesFound >= imagesTotal) { break; @@ -435,7 +443,7 @@ private void ripUserImages(URL url) throws IOException { } } - private void ripSubreddit(URL url) throws IOException { + private void ripSubreddit(URL url) throws IOException, URISyntaxException { int page = 0; while (true) { stopCheck(); @@ -455,7 +463,7 @@ private void ripSubreddit(URL url) throws IOException { if (image.contains("b.")) { image = image.replace("b.", "."); } - URL imageURL = new URL(image); + URL imageURL = new URI(image).toURL(); addURLToDownload(imageURL); } if (imgs.isEmpty()) { @@ -477,29 +485,30 @@ public String getHost() { } @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = null; - Matcher m = null; + public String getGID(URL url) throws MalformedURLException, URISyntaxException { + Pattern p; + Matcher m; - p = Pattern.compile("^https?://(www\\.|m\\.)?imgur\\.com/(a|gallery)/([a-zA-Z0-9]{5,}).*$"); + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/gallery/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+)$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Imgur album or gallery albumType = ALBUM_TYPE.ALBUM; String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/a/" + gid); + this.url = new URI("https://imgur.com/a/" + gid).toURL(); return gid; } - p = Pattern.compile("^https?://(www\\.|m\\.)?imgur\\.com/(a|gallery|t)/[a-zA-Z0-9]*/([a-zA-Z0-9]{5,}).*$"); + // Match urls with path /a + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/(?:a|t)/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+).*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Imgur album or gallery albumType = ALBUM_TYPE.ALBUM; String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/a/" + gid); + this.url = new URI("https://imgur.com/a/" + gid).toURL(); return gid; } - p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/?$"); + p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{4,})\\.imgur\\.com/?$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { // Root imgur account @@ -510,6 +519,14 @@ public String getGID(URL url) throws MalformedURLException { albumType = ALBUM_TYPE.USER; return "user_" + gid; } + // Pattern for new imgur user url https://imgur.com/user/username + p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/user/([a-zA-Z0-9]+).*$"); + m = p.matcher(url.toExternalForm()); + if (m.matches()) { + String gid = m.group(1); + albumType = ALBUM_TYPE.USER; + return "user_" + gid; + } p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/all.*$"); m = p.matcher(url.toExternalForm()); if (m.matches()) { @@ -529,13 +546,13 @@ public String getGID(URL url) throws MalformedURLException { if (m.matches()) { // Imgur subreddit aggregator albumType = ALBUM_TYPE.SUBREDDIT; - String album = m.group(2); + StringBuilder album = new StringBuilder(m.group(2)); for (int i = 3; i <= m.groupCount(); i++) { if (m.group(i) != null) { - album += "_" + m.group(i).replace("/", ""); + album.append("_").append(m.group(i).replace("/", "")); } } - return album; + return album.toString(); } p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/r/(\\w+)/([a-zA-Z0-9,]{5,}).*$"); m = p.matcher(url.toExternalForm()); @@ -544,7 +561,7 @@ public String getGID(URL url) throws MalformedURLException { albumType = ALBUM_TYPE.ALBUM; String subreddit = m.group(m.groupCount() - 1); String gid = m.group(m.groupCount()); - this.url = new URL("http://imgur.com/r/" + subreddit + "/" + gid); + this.url = new URI("https://imgur.com/r/" + subreddit + "/" + gid).toURL(); return "r_" + subreddit + "_" + gid; } p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$"); @@ -554,29 +571,14 @@ public String getGID(URL url) throws MalformedURLException { albumType = ALBUM_TYPE.SINGLE_IMAGE; return m.group(m.groupCount()); } - p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9,]{5,}).*$"); - m = p.matcher(url.toExternalForm()); - if (m.matches()) { - // Series of imgur images - albumType = ALBUM_TYPE.SERIES_OF_IMAGES; - String gid = m.group(m.groupCount()); - if (!gid.contains(",")) { - throw new MalformedURLException("Imgur image doesn't contain commas"); - } - return gid.replaceAll(",", "-"); - } throw new MalformedURLException("Unsupported imgur URL format: " + url.toExternalForm()); } - public ALBUM_TYPE getAlbumType() { - return albumType; - } - public static class ImgurImage { String title = ""; String description = ""; - String extension = ""; - public URL url = null; + String extension; + public URL url; ImgurImage(URL url) { this.url = url; @@ -586,14 +588,7 @@ public static class ImgurImage { this.extension = this.extension.substring(0, this.extension.indexOf("?")); } } - ImgurImage(URL url, String title) { - this(url); - this.title = title; - } - public ImgurImage(URL url, String title, String description) { - this(url, title); - this.description = description; - } + String getSaveAs() { String saveAs = this.title; String u = url.toExternalForm(); @@ -613,7 +608,7 @@ String getSaveAs() { public static class ImgurAlbum { String title = null; - public URL url = null; + public URL url; public List images = new ArrayList<>(); ImgurAlbum(URL url) { this.url = url; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java index e7af19bcf..84fad5055 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java @@ -55,12 +55,6 @@ public String getGID(URL url) throws MalformedURLException { "jabarchives.com/main/view/albumname - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java index d5df1fe5e..2f2d5c336 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -40,12 +42,6 @@ public String getGID(URL url) throws MalformedURLException { throw new MalformedURLException("Expected jagodibuja.com gallery formats hwww.jagodibuja.com/Comic name/ got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -62,8 +58,8 @@ public List getURLsFromPage(Document doc) { Element elem = comicPage.select("span.full-size-link > a").first(); LOGGER.info("Got link " + elem.attr("href")); try { - addURLToDownload(new URL(elem.attr("href")), ""); - } catch (MalformedURLException e) { + addURLToDownload(new URI(elem.attr("href")).toURL(), ""); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.warn("Malformed URL"); e.printStackTrace(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java new file mode 100644 index 000000000..c79e02bc4 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java @@ -0,0 +1,70 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +public class Jpg3Ripper extends AbstractHTMLRipper { + + public Jpg3Ripper(URL url) throws IOException { + super(url); + } + + @Override + public String getDomain() { + return "jpg3.su"; + } + + @Override + public String getHost() { + return "jpg3"; + } + + @Override + public List getURLsFromPage(Document page) { + List urls = new ArrayList<>(); + + for (Element el : page.select(".image-container > img")) { + urls.add(el.attr("src").replaceAll("\\.md", "")); + } + + return urls; + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + String u = url.toExternalForm(); + u = u.replaceAll("https?://jpg3.su/a/([^/]+)/?.*", "https://jpg3.su/a/$1"); + LOGGER.debug("Changed URL from " + url + " to " + u); + return new URI(u).toURL(); + } + + @Override + public Document getNextPage(Document page) throws IOException, URISyntaxException { + String href = page.select("[data-pagination='next']").attr("href"); + if (!href.isEmpty()) { + return Http.url(href).get(); + } else { + return null; + } + } + + @Override + public String getGID(URL url) throws MalformedURLException { + return url.toString().split("/")[url.toString().split("/").length - 1]; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java index 4876237e4..bb8194bcb 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java @@ -41,13 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "kingcomix.com/COMIX - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java index 8986fd91b..408310a7a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java @@ -1,234 +1,236 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.utils.Http; - - - -/** - * @author Tushar - * - */ -public class ListalRipper extends AbstractHTMLRipper { - - private Pattern p1 = Pattern.compile("https:\\/\\/www.listal.com\\/list\\/([a-zA-Z0-9-]+)"); - private Pattern p2 = - Pattern.compile("https:\\/\\/www.listal.com\\/((?:(?:[a-zA-Z0-9-]+)\\/?)+)"); - private String listId = null; // listId to get more images via POST. - private String postUrl = "https://www.listal.com/item-list/"; //to load more images. - private UrlType urlType = UrlType.UNKNOWN; - - private DownloadThreadPool listalThreadPool = new DownloadThreadPool("listalThreadPool"); - - public ListalRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getDomain() { - return "listal.com"; - } - - @Override - public String getHost() { - return "listal"; - } - - @Override - public Document getFirstPage() throws IOException { - Document doc = Http.url(url).get(); - if (urlType == UrlType.LIST) { - listId = doc.select("#customlistitems").first().attr("data-listid"); // Used for list types. - } - return doc; - } - - @Override - public List getURLsFromPage(Document page) { - if (urlType == UrlType.LIST) { - // for url of type LIST, https://www.listal.com/list/my-list - return getURLsForListType(page); - } else if (urlType == UrlType.FOLDER) { - // for url of type FOLDER, https://www.listal.com/jim-carrey/pictures - return getURLsForFolderType(page); - } - return null; - } - - @Override - public void downloadURL(URL url, int index) { - listalThreadPool.addThread(new ListalImageDownloadThread(url, index)); - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m1 = p1.matcher(url.toExternalForm()); - if (m1.matches()) { - // Return the text contained between () in the regex - urlType = UrlType.LIST; - return m1.group(1); - } - - Matcher m2 = p2.matcher(url.toExternalForm()); - if (m2.matches()) { - // Return only gid from capturing group of type listal.com/tvOrSomething/dexter/pictures - urlType = UrlType.FOLDER; - return getFolderTypeGid(m2.group(1)); - } - - throw new MalformedURLException("Expected listal.com URL format: " - + "listal.com/list/my-list-name - got " + url + " instead."); - } - - @Override - public Document getNextPage(Document page) throws IOException { - Document nextPage = super.getNextPage(page); - switch (urlType) { - case LIST: - if (!page.select(".loadmoreitems").isEmpty()) { - // All items are not loaded. - // Load remaining items using postUrl. - - String offSet = page.select(".loadmoreitems").last().attr("data-offset"); - Map postParams = new HashMap<>(); - postParams.put("listid", listId); - postParams.put("offset", offSet); - try { - nextPage = Http.url(postUrl).data(postParams).retries(3).post(); - } catch (IOException e1) { - LOGGER.error("Failed to load more images after " + offSet, e1); - throw e1; - } - } - break; - - case FOLDER: - Elements pageLinks = page.select(".pages a"); - if (!pageLinks.isEmpty() && pageLinks.last().text().startsWith("Next")) { - String nextUrl = pageLinks.last().attr("abs:href"); - nextPage = Http.url(nextUrl).retries(3).get(); - } - break; - - case UNKNOWN: - default: - } - return nextPage; - } - - - @Override - public DownloadThreadPool getThreadPool() { - return listalThreadPool; - } - - /** - * Returns the image urls for UrlType LIST. - */ - private List getURLsForListType(Document page) { - List list = new ArrayList<>(); - for (Element e : page.select(".pure-g a[href*=viewimage]")) { - //list.add("https://www.listal.com" + e.attr("href") + "h"); - list.add(e.attr("abs:href") + "h"); - } - - return list; - } - - /** - * Returns the image urls for UrlType FOLDER. - */ - private List getURLsForFolderType(Document page) { - List list = new ArrayList<>(); - for (Element e : page.select("#browseimagescontainer .imagewrap-outer a")) { - list.add(e.attr("abs:href") + "h"); - } - return list; - } - - /** - * Returns the gid for url type listal.com/tvOrSomething/dexter/pictures - */ - public String getFolderTypeGid(String group) throws MalformedURLException { - String[] folders = group.split("/"); - try { - if (folders.length == 2 && folders[1].equals("pictures")) { - // Url is probably for an actor. - return folders[0]; - } - - if (folders.length == 3 && folders[2].equals("pictures")) { - // Url if for a folder(like movies, tv etc). - Document doc = Http.url(url).get(); - return doc.select(".itemheadingmedium").first().text(); - } - - } catch (Exception e) { - LOGGER.error(e); - } - throw new MalformedURLException("Unable to fetch the gid for given url."); - } - - private class ListalImageDownloadThread extends Thread { - - private URL url; - private int index; - - public ListalImageDownloadThread(URL url, int index) { - super(); - this.url = url; - this.index = index; - } - - @Override - public void run() { - getImage(); - } - - public void getImage() { - try { - Document doc = Http.url(url).get(); - - String imageUrl = doc.getElementsByClass("pure-img").attr("src"); - if (imageUrl != "") { - addURLToDownload(new URL(imageUrl), getPrefix(index), "", null, null, - getImageName()); - } else { - LOGGER.error("Couldnt find image from url: " + url); - } - } catch (IOException e) { - LOGGER.error("[!] Exception while downloading image: " + url, e); - } - } - - public String getImageName() { - // Returns the image number of the link if possible. - String name = this.url.toExternalForm(); - try { - name = name.substring(name.lastIndexOf("/") + 1); - } catch (Exception e) { - LOGGER.info("Failed to get name for the image."); - name = null; - } - // Listal stores images as .jpg - return name + ".jpg"; - } - } - - private static enum UrlType { - LIST, FOLDER, UNKNOWN - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.ripper.DownloadThreadPool; +import com.rarchives.ripme.utils.Http; + + + +/** + * @author Tushar + * + */ +public class ListalRipper extends AbstractHTMLRipper { + + private Pattern p1 = Pattern.compile("https:\\/\\/www.listal.com\\/list\\/([a-zA-Z0-9-]+)"); + private Pattern p2 = + Pattern.compile("https:\\/\\/www.listal.com\\/((?:(?:[a-zA-Z0-9-_%]+)\\/?)+)"); + private String listId = null; // listId to get more images via POST. + private String postUrl = "https://www.listal.com/item-list/"; //to load more images. + private UrlType urlType = UrlType.UNKNOWN; + + private DownloadThreadPool listalThreadPool = new DownloadThreadPool("listalThreadPool"); + + public ListalRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getDomain() { + return "listal.com"; + } + + @Override + public String getHost() { + return "listal"; + } + + @Override + public Document getFirstPage() throws IOException { + Document doc = Http.url(url).get(); + if (urlType == UrlType.LIST) { + listId = doc.select("#customlistitems").first().attr("data-listid"); // Used for list types. + } + return doc; + } + + @Override + public List getURLsFromPage(Document page) { + if (urlType == UrlType.LIST) { + // for url of type LIST, https://www.listal.com/list/my-list + return getURLsForListType(page); + } else if (urlType == UrlType.FOLDER) { + // for url of type FOLDER, https://www.listal.com/jim-carrey/pictures + return getURLsForFolderType(page); + } + return null; + } + + @Override + public void downloadURL(URL url, int index) { + listalThreadPool.addThread(new ListalImageDownloadThread(url, index)); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m1 = p1.matcher(url.toExternalForm()); + if (m1.matches()) { + // Return the text contained between () in the regex + urlType = UrlType.LIST; + return m1.group(1); + } + + Matcher m2 = p2.matcher(url.toExternalForm()); + if (m2.matches()) { + // Return only gid from capturing group of type listal.com/tvOrSomething/dexter/pictures + urlType = UrlType.FOLDER; + return getFolderTypeGid(m2.group(1)); + } + + throw new MalformedURLException("Expected listal.com URL format: " + + "listal.com/list/my-list-name - got " + url + " instead."); + } + + @Override + public Document getNextPage(Document page) throws IOException, URISyntaxException { + Document nextPage = super.getNextPage(page); + switch (urlType) { + case LIST: + if (!page.select(".loadmoreitems").isEmpty()) { + // All items are not loaded. + // Load remaining items using postUrl. + + String offSet = page.select(".loadmoreitems").last().attr("data-offset"); + Map postParams = new HashMap<>(); + postParams.put("listid", listId); + postParams.put("offset", offSet); + try { + nextPage = Http.url(postUrl).data(postParams).retries(3).post(); + } catch (IOException e1) { + LOGGER.error("Failed to load more images after " + offSet, e1); + throw e1; + } + } + break; + + case FOLDER: + Elements pageLinks = page.select(".pages a"); + if (!pageLinks.isEmpty() && pageLinks.last().text().startsWith("Next")) { + String nextUrl = pageLinks.last().attr("abs:href"); + nextPage = Http.url(nextUrl).retries(3).get(); + } + break; + + case UNKNOWN: + default: + } + return nextPage; + } + + + @Override + public DownloadThreadPool getThreadPool() { + return listalThreadPool; + } + + /** + * Returns the image urls for UrlType LIST. + */ + private List getURLsForListType(Document page) { + List list = new ArrayList<>(); + for (Element e : page.select(".pure-g a[href*=viewimage]")) { + //list.add("https://www.listal.com" + e.attr("href") + "h"); + list.add(e.attr("abs:href") + "h"); + } + + return list; + } + + /** + * Returns the image urls for UrlType FOLDER. + */ + private List getURLsForFolderType(Document page) { + List list = new ArrayList<>(); + for (Element e : page.select("#browseimagescontainer .imagewrap-outer a")) { + list.add(e.attr("abs:href") + "h"); + } + return list; + } + + /** + * Returns the gid for url type listal.com/tvOrSomething/dexter/pictures + */ + public String getFolderTypeGid(String group) throws MalformedURLException { + String[] folders = group.split("/"); + try { + if (folders.length == 2 && folders[1].equals("pictures")) { + // Url is probably for an actor. + return folders[0]; + } + + if (folders.length == 3 && folders[2].equals("pictures")) { + // Url if for a folder(like movies, tv etc). + Document doc = Http.url(url).get(); + return doc.select(".itemheadingmedium").first().text(); + } + + } catch (Exception e) { + LOGGER.error(e); + } + throw new MalformedURLException("Unable to fetch the gid for given url."); + } + + private class ListalImageDownloadThread implements Runnable { + + private final URL url; + private final int index; + + public ListalImageDownloadThread(URL url, int index) { + super(); + this.url = url; + this.index = index; + } + + @Override + public void run() { + getImage(); + } + + public void getImage() { + try { + Document doc = Http.url(url).get(); + + String imageUrl = doc.getElementsByClass("pure-img").attr("src"); + if (imageUrl != "") { + addURLToDownload(new URI(imageUrl).toURL(), getPrefix(index), "", null, null, + getImageName()); + } else { + LOGGER.error("Couldnt find image from url: " + url); + } + } catch (IOException | URISyntaxException e) { + LOGGER.error("[!] Exception while downloading image: " + url, e); + } + } + + public String getImageName() { + // Returns the image number of the link if possible. + String name = this.url.toExternalForm(); + try { + name = name.substring(name.lastIndexOf("/") + 1); + } catch (Exception e) { + LOGGER.info("Failed to get name for the image."); + name = null; + } + // Listal stores images as .jpg + return name + ".jpg"; + } + } + + private static enum UrlType { + LIST, FOLDER, UNKNOWN + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java index 7eabfc6f0..de97c533b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java @@ -1,26 +1,26 @@ package com.rarchives.ripme.ripper.rippers; +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.json.JSONArray; +import org.json.JSONObject; +import org.jsoup.Connection; +import org.jsoup.nodes.Document; + import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; +import java.net.URLEncoder; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.utils.Http; - public class LusciousRipper extends AbstractHTMLRipper { - private static final int RETRY_COUNT = 5; // Keeping it high for read timeout exception. + private static String albumid; - private static final Pattern P = Pattern.compile("^https?:\\/\\/(?:members\\.|old\\.|www\\.)?luscious.net\\/albums\\/([-_.0-9a-zA-Z]+)\\/?"); - private DownloadThreadPool lusciousThreadPool = new DownloadThreadPool("lusciousThreadPool"); + private static final Pattern P = Pattern.compile("^https?://(?:members\\.|legacy\\.|www\\.)?luscious.net/albums/([-_.0-9a-zA-Z]+)/?"); public LusciousRipper(URL url) throws IOException { super(url); @@ -37,40 +37,48 @@ public String getHost() { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - Document page = Http.url(url).get(); - LOGGER.info("First page is " + url); - return page; - } - - @Override - public List getURLsFromPage(Document page) { + public List getURLsFromPage(Document page) { // gets urls for all pages through the api List urls = new ArrayList<>(); - Elements urlElements = page.select("div.item.thumbnail.ic_container > a"); - for (Element e : urlElements) { - urls.add(e.attr("abs:href")); - } + int totalPages = 1; + + for (int i = 1; i <= totalPages; i++) { + String APIStringWOVariables = "https://apicdn.luscious.net/graphql/nobatch/?operationName=PictureListInsideAlbum&query=%2520query%2520PictureListInsideAlbum%28%2524input%253A%2520PictureListInput%21%29%2520%257B%2520picture%2520%257B%2520list%28input%253A%2520%2524input%29%2520%257B%2520info%2520%257B%2520...FacetCollectionInfo%2520%257D%2520items%2520%257B%2520__typename%2520id%2520title%2520description%2520created%2520like_status%2520number_of_comments%2520number_of_favorites%2520moderation_status%2520width%2520height%2520resolution%2520aspect_ratio%2520url_to_original%2520url_to_video%2520is_animated%2520position%2520permissions%2520url%2520tags%2520%257B%2520category%2520text%2520url%2520%257D%2520thumbnails%2520%257B%2520width%2520height%2520size%2520url%2520%257D%2520%257D%2520%257D%2520%257D%2520%257D%2520fragment%2520FacetCollectionInfo%2520on%2520FacetCollectionInfo%2520%257B%2520page%2520has_next_page%2520has_previous_page%2520total_items%2520total_pages%2520items_per_page%2520url_complete%2520%257D%2520&variables="; + Connection con = Http.url(APIStringWOVariables + encodeVariablesPartOfURL(i, albumid)).method(Connection.Method.GET).retries(5).connection(); + con.ignoreHttpErrors(true); + con.ignoreContentType(true); + con.userAgent("Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0"); + Connection.Response res; + try { + res = con.execute(); + } catch (IOException e) { + throw new RuntimeException(e); + } + String body = res.body(); - return urls; - } + JSONObject jsonObject = new JSONObject(body); - @Override - public Document getNextPage(Document doc) throws IOException { - // luscious sends xhr requests to nextPageUrl and appends new set of images to the current page while in browser. - // Simply GET the nextPageUrl also works. Therefore, we do this... - Element nextPageElement = doc.select("div#next_page > div > a").first(); - if (nextPageElement == null) { - throw new IOException("No next page found."); + JSONObject data = jsonObject.getJSONObject("data"); + JSONObject picture = data.getJSONObject("picture"); + JSONObject list = picture.getJSONObject("list"); + JSONArray items = list.getJSONArray("items"); + JSONObject info = list.getJSONObject("info"); + totalPages = info.getInt("total_pages"); + + for (int j = 0; j < items.length(); j++) { + JSONObject item = items.getJSONObject(j); + String urlToOriginal = item.getString("url_to_original"); + urls.add(urlToOriginal); + } } - return Http.url(nextPageElement.attr("abs:href")).get(); + return urls; } @Override public String getGID(URL url) throws MalformedURLException { Matcher m = P.matcher(url.toExternalForm()); if (m.matches()) { + albumid = m.group(1).split("_")[m.group(1).split("_").length - 1]; return m.group(1); } throw new MalformedURLException("Expected luscious.net URL format: " @@ -78,79 +86,17 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void downloadURL(URL url, int index) { - lusciousThreadPool.addThread(new LusciousDownloadThread(url, index)); + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); } - @Override - public DownloadThreadPool getThreadPool() { - return lusciousThreadPool; - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - // Sanitizes the url removing GET parameters and convert to old api url. - // "https://old.luscious.net/albums/albumname" + public static String encodeVariablesPartOfURL(int page, String albumId) { try { - Matcher m = P.matcher(url.toString()); - if (m.matches()) { - String sanitizedUrl = m.group(); - sanitizedUrl = sanitizedUrl.replaceFirst( - "^https?:\\/\\/(?:members\\.|old\\.|www\\.)?luscious.net", - "https://old.luscious.net"); - return new URL(sanitizedUrl); - } - - throw new Exception("ERROR: Unable to sanitize url."); - } catch (Exception e) { - LOGGER.info("Error sanitizing the url."); - LOGGER.error(e); - return super.sanitizeURL(url); - } - } - - @Override - public String normalizeUrl(String url) { - try { - return url.toString().replaceFirst( - "^https?:\\/\\/(?:members\\.|old\\.)?luscious.net", "https://www.luscious.net"); - } catch (Exception e) { - LOGGER.info("Error normalizing the url."); - LOGGER.error(e); - return super.normalizeUrl(url); - } - } - - public class LusciousDownloadThread extends Thread { - private URL url; - private int index; + String json = "{\"input\":{\"filters\":[{\"name\":\"album_id\",\"value\":\"" + albumId + "\"}],\"display\":\"rating_all_time\",\"items_per_page\":50,\"page\":" + page + "}}"; - public LusciousDownloadThread(URL url, int index) { - this.url = url; - this.index = index; + return URLEncoder.encode(json, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException("Could not encode variables"); } - - @Override - public void run() { - try { - Document page = Http.url(url).retries(RETRY_COUNT).get(); - - String downloadUrl = page.select(".icon-download").attr("abs:href"); - if (downloadUrl.equals("")) { - // This is here for pages with mp4s instead of images. - downloadUrl = page.select("div > video > source").attr("src"); - if (!downloadUrl.equals("")) { - throw new IOException("Could not find download url for image or video."); - } - } - - //If a valid download url was found. - addURLToDownload(new URL(downloadUrl), getPrefix(index)); - - } catch (IOException e) { - LOGGER.error("Error downloadiong url " + url, e); - } - } - } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java index ea8c45306..8c6c92271 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java @@ -1,40 +1,42 @@ package com.rarchives.ripme.ripper.rippers; import com.rarchives.ripme.ripper.AbstractJSONRipper; -import com.rarchives.ripme.ui.History; import com.rarchives.ripme.ui.RipStatusMessage; import com.rarchives.ripme.utils.Http; -import com.rarchives.ripme.utils.Utils; -import org.apache.log4j.Logger; import org.json.JSONArray; import org.json.JSONObject; -import org.jsoup.Connection; -import org.jsoup.nodes.Document; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeMap; import java.util.regex.Matcher; import java.util.regex.Pattern; public class MangadexRipper extends AbstractJSONRipper { - private String chapterApiEndPoint = "https://mangadex.org/api/chapter/"; - private String mangaApiEndPoint = "https://mangadex.org/api/manga/"; + private final String chapterApiEndPoint = "https://mangadex.org/api/chapter/"; + private final String mangaApiEndPoint = "https://mangadex.org/api/manga/"; private boolean isSingleChapter; - private String getImageUrl(String chapterHash, String imageName, String server) { - return server + chapterHash + "/" + imageName; - } public MangadexRipper(URL url) throws IOException { super(url); } + private String getImageUrl(String chapterHash, String imageName, String server) { + return server + chapterHash + "/" + imageName; + } + @Override public String getHost() { return "mangadex"; } + @Override public String getDomain() { return "mangadex.org"; @@ -50,14 +52,12 @@ public String getGID(URL url) throws MalformedURLException { String capID = getChapterID(url.toExternalForm()); String mangaID = getMangaID(url.toExternalForm()); if (capID != null) { - isSingleChapter=true; + isSingleChapter = true; return capID; + } else if (mangaID != null) { + isSingleChapter = false; + return mangaID; } - else - if(mangaID!=null){ - isSingleChapter=false; - return mangaID; - } throw new MalformedURLException("Unable to get chapter ID from" + url); } @@ -69,10 +69,11 @@ private String getChapterID(String url) { } return null; } - private String getMangaID(String url){ + + private String getMangaID(String url) { Pattern p = Pattern.compile("https://mangadex.org/title/([\\d]+)/(.+)"); Matcher m = p.matcher(url); - if(m.matches()){ + if (m.matches()) { return m.group(1); } return null; @@ -80,20 +81,19 @@ private String getMangaID(String url){ @Override - public JSONObject getFirstPage() throws IOException { + public JSONObject getFirstPage() throws IOException, URISyntaxException { // Get the chapter ID String chapterID = getChapterID(url.toExternalForm()); String mangaID = getMangaID(url.toExternalForm()); - if(mangaID!=null){ - return Http.url(new URL(mangaApiEndPoint+mangaID)).getJSON(); - } - else - return Http.url(new URL(chapterApiEndPoint + chapterID)).getJSON(); + if (mangaID != null) { + return Http.url(new URI(mangaApiEndPoint + mangaID).toURL()).getJSON(); + } else + return Http.url(new URI(chapterApiEndPoint + chapterID).toURL()).getJSON(); } @Override protected List getURLsFromJSON(JSONObject json) { - if(isSingleChapter){ + if (isSingleChapter) { List assetURLs = new ArrayList<>(); JSONArray currentObject; String chapterHash; @@ -111,12 +111,12 @@ protected List getURLsFromJSON(JSONObject json) { JSONObject chaptersJSON = (JSONObject) json.get("chapter"); JSONObject temp; Iterator keys = chaptersJSON.keys(); - HashMap chapterIDs = new HashMap<>(); + HashMap chapterIDs = new HashMap<>(); while (keys.hasNext()) { - String keyValue = (String) keys.next(); - temp=(JSONObject)chaptersJSON.get(keyValue); - if(temp.getString("lang_name").equals("English")) { - chapterIDs.put(temp.getDouble("chapter"),keyValue); + String keyValue = keys.next(); + temp = (JSONObject) chaptersJSON.get(keyValue); + if (temp.getString("lang_name").equals("English")) { + chapterIDs.put(temp.getDouble("chapter"), keyValue); } } @@ -126,17 +126,16 @@ protected List getURLsFromJSON(JSONObject json) { String chapterHash; // Server is the cdn hosting the images. String server; - JSONObject chapterJSON=null; - TreeMap treeMap = new TreeMap<>(chapterIDs); - Iterator it = treeMap.keySet().iterator(); - while(it.hasNext()) { - double key =(double) it.next(); + JSONObject chapterJSON = null; + TreeMap treeMap = new TreeMap<>(chapterIDs); + for (Double aDouble : treeMap.keySet()) { + double key = (double) aDouble; try { - chapterJSON = Http.url(new URL(chapterApiEndPoint + treeMap.get(key))).getJSON(); - } catch (IOException e) { + chapterJSON = Http.url(new URI(chapterApiEndPoint + treeMap.get(key)).toURL()).getJSON(); + } catch (IOException | URISyntaxException e) { e.printStackTrace(); } - sendUpdate(RipStatusMessage.STATUS.LOADING_RESOURCE,"chapter "+key); + sendUpdate(RipStatusMessage.STATUS.LOADING_RESOURCE, "chapter " + key); chapterHash = chapterJSON.getString("hash"); server = chapterJSON.getString("server"); for (int i = 0; i < chapterJSON.getJSONArray("page_array").length(); i++) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java index f4325aa12..c5f6b1429 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ManganeloRipper.java @@ -48,12 +48,6 @@ public String getGID(URL url) throws MalformedURLException { "/manganelo.com/manga/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("div.btn-navigation-chap > a.back").first(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java index 8bdd2b2fb..2c83ce7ed 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MeituriRipper.java @@ -21,12 +21,12 @@ public MeituriRipper(URL url) throws IOException { @Override public String getHost() { - return "meituri"; + return "tujigu"; } @Override public String getDomain() { - return "meituri.com"; + return "tujigu.com"; } // To use in getting URLs @@ -35,23 +35,18 @@ public String getDomain() { @Override public String getGID(URL url) throws MalformedURLException { // without escape - // ^https?://[w.]*meituri\.com/a/([0-9]+)/([0-9]+\.html)*$ - // https://www.meituri.com/a/14449/ - // also matches https://www.meituri.com/a/14449/3.html etc. + // ^https?://[w.]*tujigu\.com/a/([0-9]+)/([0-9]+\.html)*$ + // https://www.tujigu.com/a/14449/ + // also matches https://www.tujigu.com/a/14449/3.html etc. // group 1 is 14449 - Pattern p = Pattern.compile("^https?://[w.]*meituri\\.com/a/([0-9]+)/([0-9]+\\.html)*$"); + Pattern p = Pattern.compile("^https?://[w.]*tujigu\\.com/a/([0-9]+)/([0-9]+\\.html)*$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { albumID = m.group(1); return m.group(1); } throw new MalformedURLException( - "Expected meituri.com URL format: " + "meituri.com/a/albumid/ - got " + url + "instead"); - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); + "Expected tujigu.com URL format: " + "tujigu.com/a/albumid/ - got " + url + "instead"); } @Override @@ -71,7 +66,7 @@ public List getURLsFromPage(Document doc) { } // Base URL: http://ii.hywly.com/a/1/albumid/imgnum.jpg - String baseURL = "http://ii.hywly.com/a/1/" + albumID + "/"; + String baseURL = "https://tjg.hywly.com/a/1/" + albumID + "/"; // Loop through and add images to the URL list for (int i = 1; i <= numOfImages; i++) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java index 0b513b377..c2d6ed47d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ModelxRipper.java @@ -41,11 +41,6 @@ public String getGID(URL url) throws MalformedURLException { throw new MalformedURLException("Expected URL format: http://www.modelx.org/[category (one or more)]/xxxxx got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java index 598cf5d4f..d2af02a15 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,7 +15,6 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.ripper.DownloadThreadPool; -import com.rarchives.ripme.ui.RipStatusMessage.STATUS; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; import org.jsoup.select.Elements; @@ -59,20 +60,21 @@ protected Document getFirstPage() throws IOException { if (!notHome) { StringBuilder newPath = new StringBuilder(path); newPath.insert(2, "M"); - firstURL = new URL(this.url, "https://" + DOMAIN + newPath); + firstURL = URI.create("https://" + DOMAIN + newPath).toURL(); LOGGER.info("Changed URL to " + firstURL); } return Http.url(firstURL).referrer("https://motherless.com").get(); } @Override - public Document getNextPage(Document doc) throws IOException { + public Document getNextPage(Document doc) throws IOException, URISyntaxException { + Elements nextPageLink = doc.head().select("link[rel=next]"); if (nextPageLink.isEmpty()) { throw new IOException("Last page reached"); } else { String referrerLink = doc.head().select("link[rel=canonical]").first().attr("href"); - URL nextURL = new URL(this.url, nextPageLink.first().attr("href")); + URL nextURL = this.url.toURI().resolve(nextPageLink.first().attr("href")).toURL(); return Http.url(nextURL).referrer(referrerLink).get(); } } @@ -109,7 +111,7 @@ protected List getURLsFromPage(Document page) { @Override protected void downloadURL(URL url, int index) { // Create thread for finding image at "url" page - MotherlessImageThread mit = new MotherlessImageThread(url, index); + MotherlessImageRunnable mit = new MotherlessImageRunnable(url, index); motherlessThreadPool.addThread(mit); try { Thread.sleep(IMAGE_SLEEP_TIME); @@ -148,15 +150,19 @@ public String getGID(URL url) throws MalformedURLException { throw new MalformedURLException("Expected URL format: https://motherless.com/GIXXXXXXX, got: " + url); } - + @Override + protected DownloadThreadPool getThreadPool() { + return motherlessThreadPool; + } + /** * Helper class to find and download images found on "image" pages */ - private class MotherlessImageThread extends Thread { - private URL url; - private int index; + private class MotherlessImageRunnable implements Runnable { + private final URL url; + private final int index; - MotherlessImageThread(URL url, int index) { + MotherlessImageRunnable(URL url, int index) { super(); this.url = url; this.index = index; @@ -180,11 +186,11 @@ public void run() { if (Utils.getConfigBoolean("download.save_order", true)) { prefix = String.format("%03d_", index); } - addURLToDownload(new URL(file), prefix); + addURLToDownload(new URI(file).toURL(), prefix); } else { LOGGER.warn("[!] could not find '__fileurl' at " + url); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java new file mode 100644 index 000000000..642c6417e --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java @@ -0,0 +1,223 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; + + +public class MrCongRipper extends AbstractHTMLRipper { + + private Document currDoc; + private int lastPageNum; + private int currPageNum; + private boolean tagPage = false; + + public MrCongRipper(URL url) throws IOException { + super(url); + currPageNum = 1; + } + + @Override + public String getHost() { + return "mrcong"; + } + + @Override + public String getDomain() { + return "mrcong.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + System.out.println(url.toExternalForm()); + Pattern p = Pattern.compile("^https?://mrcong\\.com/(\\S*)[0-9]+-anh(-[0-9]+-videos)?(|/|/[0-9]+)$"); + Pattern p2 = Pattern.compile("^https?://mrcong\\.com/tag/(\\S*)/$"); //Added 6-10-21 + Matcher m = p.matcher(url.toExternalForm()); + Matcher m2 = p2.matcher(url.toExternalForm()); //6-10-21 + if (m.matches()) { + return m.group(1); + } + else if(m2.matches()) { //Added 6-10-21 + tagPage = true; + System.out.println("tagPage = TRUE"); + return m2.group(1); + } + + throw new MalformedURLException("Expected mrcong.com URL format: " + + "mrcong.com/GALLERY_NAME(-anh OR -anh/ OR -anh/PAGE_NUMBER OR -anh/PAGE_NUMBER/) - got " + url + " instead"); + } + + @Override + public Document getFirstPage() throws IOException { //returns the root gallery page regardless of actual page number + // "url" is an instance field of the superclass + String rootUrlStr; + URL rootUrl; + + if(!tagPage) { + rootUrlStr = url.toExternalForm().replaceAll("(|/|/[0-9]+/?)$", "/"); + } else { //6-10-21 + rootUrlStr = url.toExternalForm().replaceAll("(page/[0-9]+/)$", "page/1/"); + } + + rootUrl = URI.create(rootUrlStr).toURL(); + url = rootUrl; + currPageNum = 1; + currDoc = Http.url(url).get(); + getMaxPageNumber(currDoc); + return currDoc; + } + + @Override + public Document getNextPage(Document doc) throws IOException { + int pageNum = currPageNum; + String urlStr; + if(!tagPage) { + if (pageNum == 1 && lastPageNum > 1) { + urlStr = url.toExternalForm().concat((pageNum + 1) + ""); + System.out.printf("Old Str: %s New Str: %s\n", url.toExternalForm(), urlStr); + } else if (pageNum < lastPageNum) { + urlStr = url.toExternalForm().replaceAll("(/([0-9]*)/?)$", ("/" + (pageNum + 1) + "/")); + System.out.printf("Old Str: %s New Str: %s\n", url.toString(), urlStr); + } else { + //System.out.printf("Error: Page number provided goes past last valid page number\n"); + throw (new IOException("Error: Page number provided goes past last valid page number\n")); + } + } else { //6-10-21 + //if (pageNum == 1 && lastPageNum >= 1) { + if (pageNum == 1 && lastPageNum > 1) { //6-10-21 + urlStr = url.toExternalForm().concat("page/" + (pageNum + 1) + ""); + System.out.printf("Old Str: %s New Str: %s\n", url.toExternalForm(), urlStr); + } else if (pageNum < lastPageNum) { + urlStr = url.toExternalForm().replaceAll("(page/([0-9]*)/?)$", ("page/" + (pageNum + 1) + "/")); + System.out.printf("Old Str: %s New Str: %s\n", url.toString(), urlStr); + } else { + //System.out.printf("Error: Page number provided goes past last valid page number\n"); + System.out.print("Error: There is no next page!\n"); + return null; + //throw (new IOException("Error: Page number provided goes past last valid page number\n")); + } + } + + url = URI.create(urlStr).toURL(); + currDoc = Http.url(url).get(); + currPageNum ++;//hi + return currDoc; + } + + private int getMaxPageNumber(Document doc) { + if(!tagPage) { + try { + lastPageNum = Integer.parseInt(doc.select("div.page-link > a").last().text()); //gets the last possible page for the gallery + } catch(Exception e) { + return 1; + } + } else { + try { + lastPageNum = Integer.parseInt(doc.select("div.pagination > a").last().text()); //gets the last possible page for the gallery + System.out.println("The last page found for " + url + " was " + lastPageNum); + } catch(Exception e) { + return 1; + } + } + + return lastPageNum; + } + + private int getCurrentPageNum(Document doc) { + int currPage; //6-10-21 + + if(!tagPage) { + currPage = Integer.parseInt(doc.select("div.page-link > span").first().text()); + } else { + currPage = Integer.parseInt(doc.select("div.pagination > span").first().text()); + } + + System.out.println("The current page was found to be: " + currPage); + + return currPage; + } + + @Override + public List getURLsFromPage(Document doc) { //gets the urls of the images + List result = new ArrayList<>(); + + if(!tagPage) { + for (Element el : doc.select("p > img")) { + String imageSource = el.attr("src"); + result.add(imageSource); + } + + System.out.println("\n1.)Printing List: " + result + "\n"); + } else { //6-10-21 + //List gallery_set_list = new ArrayList<>(); + + for (Element el : doc.select("h2 > a")) { + String pageSource = el.attr("href"); + if(!pageSource.equals("https://mrcong.com/")) { + result.add(pageSource); + System.out.println("\n" + pageSource + " has been added to the list."); + } + } + + /*for (String el2 : gallery_set_list) { + try { + URL temp_urL = URI.create(el2).toURL(); + MrCongRipper mcr = new MrCongRipper(temp_urL); + System.out.println("URL being ripped: " + mcr.url.toString()); + result.addAll(mcr.getURLsFromPage(mcr.getFirstPage())); + + Document nextPg = mcr.getNextPage(mcr.currDoc); + while(nextPg != null) { + result.addAll(mcr.getURLsFromPage(nextPg)); + nextPg = mcr.getNextPage(mcr.currDoc); + } + } catch (IOException e) { + e.printStackTrace(); + } + + }*/ + + System.out.println("\n2.)Printing List: " + result + "\n"); + } + + return result; + } + + @Override + public void downloadURL(URL url, int index) { + //addURLToDownload(url, getPrefix(index)); + + if(!tagPage) { + addURLToDownload(url, getPrefix(index)); + } else { + try { + List ls = this.getURLsFromPage(this.currDoc); + Document np = this.getNextPage(this.currDoc); + + while(np != null) { //Creates a list of all sets to download + ls.addAll(this.getURLsFromPage(np)); + np = this.getNextPage(np); + } + + for(String urlStr : ls) { + MrCongRipper mcr = new MrCongRipper(URI.create(urlStr).toURL()); + mcr.setup(); + mcr.rip(); + } + + } catch (IOException | URISyntaxException e) { + e.printStackTrace(); + } + } + } +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java new file mode 100644 index 000000000..cdc873f2d --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java @@ -0,0 +1,71 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class MultpornRipper extends AbstractHTMLRipper { + + public MultpornRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "multporn.net"; + } + + @Override + public String getHost() { + return "multporn"; + } + + @Override + public String getGID(URL url) throws MalformedURLException, URISyntaxException { + Pattern p = Pattern.compile("^https?://multporn\\.net/node/(\\d+)/.*$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + + try { + String nodeHref = Http.url(url).get().select(".simple-mode-switcher").attr("href"); + p = Pattern.compile("/node/(\\d+)/.*"); + m = p.matcher(nodeHref); + if (m.matches()) { + this.url = new URI("https://multporn.net" + nodeHref).toURL(); + return m.group(1); + } + }catch (Exception ignored){}; + + throw new MalformedURLException("Expected multporn.net URL format: " + + "multporn.net/comics/comicid / multporn.net/node/id/* - got " + url + " instead"); + } + + @Override + protected List getURLsFromPage(Document page) { + List imageURLs = new ArrayList<>(); + Elements thumbs = page.select(".mfp-gallery-image .mfp-item"); + for (Element el : thumbs) { + imageURLs.add(el.attr("href")); + } + return imageURLs; + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java index 453826a39..deedfb888 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java @@ -4,6 +4,7 @@ import com.rarchives.ripme.utils.Http; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -13,8 +14,6 @@ import org.jsoup.nodes.Element; public class MyhentaicomicsRipper extends AbstractHTMLRipper { - private static boolean isTag; - public MyhentaicomicsRipper(URL url) throws IOException { super(url); } @@ -69,7 +68,6 @@ public boolean pageContainsAlbums(URL url) { Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$"); Matcher mat = pat.matcher(url.toExternalForm()); if (mat.matches()) { - isTag = true; return true; } return false; @@ -85,9 +83,8 @@ public List getAlbumsToQueue(Document doc) { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); } @Override diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java index d8422942c..c9f4c0bd9 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java @@ -40,12 +40,6 @@ public String getGID(URL url) throws MalformedURLException { + "myhentaigallery.com/gallery/thumbnails/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java index 20a3cf2d9..30fab521d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { + "myreadingmanga.info/title - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java index 952b434e8..8cf24fd8e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java @@ -79,11 +79,6 @@ public String getDomain() { return this.url.getHost(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java index b3ededc4b..a7be157a3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java @@ -53,7 +53,7 @@ public String getGID(URL url) throws MalformedURLException { @Override protected Document getFirstPage() throws IOException { - return Http.url("https://" + this.username + ".newgrounds.com/art").get(); + return Http.url("https://" + this.username + ".newgrounds.com/art").timeout(10*1000).get(); } @Override @@ -71,7 +71,7 @@ protected List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); String documentHTMLString = page.toString().replaceAll(""", ""); - String findStr = "newgrounds.com\\/art\\/view\\/" + this.username; + String findStr = "newgrounds.com/art/view/" + this.username; int lastIndex = 0; // Index where findStr is found; each occasion contains the link to an image @@ -95,7 +95,7 @@ protected List getURLsFromPage(Document page) { if(i == indices.size() - 1){ s = documentHTMLString.substring(indices.get(i) + 2); } else{ - s = documentHTMLString.substring(indices.get(i) + 2, indices.get(i + 1)); + s = documentHTMLString.substring(indices.get(i) + 1, indices.get(i + 1)); } s = s.replaceAll("\n", "").replaceAll("\t", "") @@ -106,13 +106,14 @@ protected List getURLsFromPage(Document page) { if (m.lookingAt()) { String testURL = m.group(3) + "_" + this.username + "_" + m.group(1); + testURL = testURL.replace("_full", ""); // Open new document to get full sized image try { Document imagePage = Http.url(inLink + m.group(1)).get(); for(String extensions: this.ALLOWED_EXTENSIONS){ if(imagePage.toString().contains(testURL + "." + extensions)){ - imageUrl += m.group(2) + "/" + m.group(3) + "_" + this.username + "_" + m.group(1) + "." + extensions; + imageUrl += m.group(2) + "/" + m.group(3).replace("_full","") + "_" + this.username + "_" + m.group(1) + "." + extensions; imageURLs.add(imageUrl); break; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java index 86079edc3..35a1f8add 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -29,8 +31,6 @@ public class NfsfwRipper extends AbstractHTMLRipper { "https?://[wm.]*nfsfw.com/gallery/v/[^/]+/(.+)$" ); - // cached first page - private Document fstPage; // threads pool for downloading images from image pages private DownloadThreadPool nfsfwThreadPool; @@ -49,13 +49,6 @@ public String getHost() { return HOST; } - @Override - protected Document getFirstPage() throws IOException { - // cache the first page - this.fstPage = Http.url(url).get(); - return fstPage; - } - @Override public Document getNextPage(Document page) throws IOException { String nextURL = null; @@ -113,13 +106,13 @@ protected void downloadURL(URL url, int index) { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { // always start on the first page of an album // (strip the options after the '?') String u = url.toExternalForm(); if (u.contains("?")) { u = u.substring(0, u.indexOf("?")); - return new URL(u); + return new URI(u).toURL(); } else { return url; } @@ -157,9 +150,15 @@ public boolean hasQueueSupport() { @Override public boolean pageContainsAlbums(URL url) { - List imageURLs = getImagePageURLs(fstPage); - List subalbumURLs = getSubalbumURLs(fstPage); - return imageURLs.isEmpty() && !subalbumURLs.isEmpty(); + try { + final var fstPage = getCachedFirstPage(); + List imageURLs = getImagePageURLs(fstPage); + List subalbumURLs = getSubalbumURLs(fstPage); + return imageURLs.isEmpty() && !subalbumURLs.isEmpty(); + } catch (IOException | URISyntaxException e) { + LOGGER.error("Unable to load " + url, e); + return false; + } } @Override @@ -196,10 +195,10 @@ private List getSubalbumURLs(Document page){ /** * Helper class to find and download images found on "image" pages */ - private class NfsfwImageThread extends Thread { - private URL url; - private String subdir; - private int index; + private class NfsfwImageThread implements Runnable { + private final URL url; + private final String subdir; + private final int index; NfsfwImageThread(URL url, String subdir, int index) { super(); @@ -223,8 +222,8 @@ public void run() { if (file.startsWith("/")) { file = "http://nfsfw.com" + file; } - addURLToDownload(new URL(file), getPrefix(index), this.subdir); - } catch (IOException e) { + addURLToDownload(new URI(file).toURL(), getPrefix(index), this.subdir); + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java index 49fc1d8a3..fe50f1f16 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java @@ -126,7 +126,7 @@ public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); Elements thumbs = page.select("a.gallerythumb > img"); for (Element el : thumbs) { - imageURLs.add(el.attr("data-src").replaceAll("t\\.n", "i.n").replaceAll("t\\.", ".")); + imageURLs.add(el.attr("data-src").replaceAll("://t", "://i").replaceAll("t\\.", ".")); } return imageURLs; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java new file mode 100644 index 000000000..7e26faa2a --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java @@ -0,0 +1,135 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; +import com.rarchives.ripme.utils.Http; +import org.apache.commons.lang.StringEscapeUtils; +import org.json.JSONArray; +import org.json.JSONObject; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class NsfwXxxRipper extends AbstractJSONRipper { + + public NsfwXxxRipper(URL url) throws IOException { + super(url); + } + + @Override + protected String getDomain() { + return "nsfw.xxx"; + } + + @Override + public String getHost() { + return "nsfw_xxx"; + } + + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { + String u = url.toExternalForm(); + // https://nsfw.xxx/user/kelly-kat/foo -> https://nsfw.xxx/user/kelly-kat + // https://nsfw.xxx/user/kelly-kat -> https://nsfw.xxx/user/kelly-kat + // keep up to and including the username + u = u.replaceAll("https?://nsfw.xxx/user/([^/]+)/?.*", "https://nsfw.xxx/user/$1"); + if (!u.contains("nsfw.xxx/user")) { + throw new MalformedURLException("Invalid URL: " + url); + } + + return new URI(u).toURL(); + } + + String getUser() throws MalformedURLException { + return getGID(url); + } + + URL getPage(int page) throws MalformedURLException, URISyntaxException { + return new URI("https://nsfw.xxx/slide-page/" + page + "?nsfw%5B%5D=0&types%5B%5D=image&types%5B%5D=video&types%5B%5D=gallery&slider=1&jsload=1&user=" + getUser()).toURL(); + } + + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("https://nsfw.xxx/user/([^/]+)/?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected URL format: " + + "nsfw.xxx/user/USER - got " + url + " instead"); + } + + + int currentPage = 1; + + @Override + protected JSONObject getFirstPage() throws IOException, URISyntaxException { + return Http.url(getPage(1)).getJSON(); + } + + List descriptions = new ArrayList<>(); + + @Override + protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + currentPage++; + JSONObject nextPage = Http.url(getPage(doc.getInt("page") + 1)).getJSON(); + JSONArray items = nextPage.getJSONArray("items"); + if (items.isEmpty()) { + throw new IOException("No more pages"); + } + return nextPage; + } + + class ApiEntry { + String srcUrl; + String author; + String title; + + public ApiEntry(String srcUrl, String author, String title) { + this.srcUrl = srcUrl; + this.author = author; + this.title = title; + } + } + + @Override + protected List getURLsFromJSON(JSONObject json) { + JSONArray items = json.getJSONArray("items"); + List data = IntStream + .range(0, items.length()) + .mapToObj(items::getJSONObject) + .map(o -> { + String srcUrl; + if(o.has("src")) { + srcUrl = o.getString("src"); + } else { + // video source + Pattern videoHtmlSrcPattern = Pattern.compile("src=\"([^\"]+)\""); + Matcher matches = videoHtmlSrcPattern.matcher(o.getString("html")); + matches.find(); + srcUrl = StringEscapeUtils.unescapeHtml(matches.group(1)); + } + + return new ApiEntry(srcUrl, o.getString("author"), o.getString("title")); + }) + .toList(); + + data.forEach(e -> descriptions.add(e.title)); + return data.stream().map(e -> e.srcUrl).collect(Collectors.toList()); + } + + @Override + protected void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index) + descriptions.get(index - 1) + "_" , "", "", null); + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java index 3300da500..ea145aad3 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java @@ -16,8 +16,6 @@ import com.rarchives.ripme.utils.Http; public class NudeGalsRipper extends AbstractHTMLRipper { - // Current HTML document - private Document albumDoc = null; public NudeGalsRipper(URL url) throws IOException { super(url); @@ -50,14 +48,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - if (albumDoc == null) { - albumDoc = Http.url(url).get(); - } - return albumDoc; - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); @@ -77,4 +67,4 @@ public void downloadURL(URL url, int index) { // Send referrer when downloading images addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null); } -} \ No newline at end of file +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java index a51833972..e03d3bdcd 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java @@ -46,12 +46,6 @@ public String getAlbumTitle(URL url) throws MalformedURLException { return getDomain(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { if (doc.select("div#nav > a > div#nx").first() == null) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java index d2421f373..39d56b83e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java @@ -3,25 +3,28 @@ import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.Utils; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements; public class PahealRipper extends AbstractHTMLRipper { - private static final Logger logger = Logger.getLogger(PahealRipper.class); + private static final Logger logger = LogManager.getLogger(PahealRipper.class); private static Map cookies = null; private static Pattern gidPattern = null; @@ -56,7 +59,7 @@ public Document getFirstPage() throws IOException { @Override public Document getNextPage(Document page) throws IOException { for (Element e : page.select("#paginator a")) { - if (e.text().toLowerCase().equals("next")) { + if (e.text().equalsIgnoreCase("next")) { return Http.url(e.absUrl("href")).cookies(getCookies()).get(); } } @@ -88,12 +91,12 @@ public void downloadURL(URL url, int index) { name = name.substring(0, name.length() - ext.length()); } - File outFile = new File(workingDir.getCanonicalPath() - + File.separator + Path outFile = Paths.get(workingDir + + "/" + Utils.filesystemSafe(new URI(name).getPath()) + ext); addURLToDownload(url, outFile); - } catch (IOException | URISyntaxException ex) { + } catch (URISyntaxException ex) { logger.error("Error while downloading URL " + url, ex); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java index 680d2c09c..097fe2c05 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -82,7 +84,7 @@ public String getHost() { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { LOGGER.info(url); String u = url.toExternalForm(); if (u.contains("?")) { @@ -93,11 +95,11 @@ public URL sanitizeURL(URL url) throws MalformedURLException { // append trailing slash u = u + "/"; } - return new URL(u); + return new URI(u).toURL(); } @Override - public String getGID(URL url) throws MalformedURLException { + public String getGID(URL url) throws MalformedURLException, URISyntaxException { Matcher m; URL sanitized = sanitizeURL(url); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java index e6c5d110e..bdb5f528c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PichunterRipper.java @@ -63,12 +63,6 @@ private boolean isPhotoSet(URL url) { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // We use comic-nav-next to the find the next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java index 1bd103b54..65d43d397 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PicstatioRipper.java @@ -51,12 +51,6 @@ public String getGID(URL url) throws MalformedURLException { "www.picstatio.com//ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { if (doc.select("a.next_page") != null) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java index b45796848..f021269f1 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "porncomix.info/comic - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java index 241ad5d7e..8aef59a62 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PorncomixinfoRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "porncomixinfo.net/chapter/CHAP/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { // Find next page diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java index 197bdcbd9..a2ce4a196 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornhubRipper.java @@ -1,9 +1,11 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; import java.util.ArrayList; import java.util.List; import java.util.regex.Matcher; @@ -47,12 +49,12 @@ protected Document getFirstPage() throws IOException { } @Override - public Document getNextPage(Document page) throws IOException { + public Document getNextPage(Document page) throws IOException, URISyntaxException { Elements nextPageLink = page.select("li.page_next > a"); if (nextPageLink.isEmpty()){ throw new IOException("No more pages"); } else { - URL nextURL = new URL(this.url, nextPageLink.first().attr("href")); + URL nextURL = this.url.toURI().resolve(nextPageLink.first().attr("href")).toURL(); return Http.url(nextURL).get(); } } @@ -74,7 +76,7 @@ protected List getURLsFromPage(Document page) { @Override protected void downloadURL(URL url, int index) { - PornhubImageThread t = new PornhubImageThread(url, index, this.workingDir); + PornhubImageThread t = new PornhubImageThread(url, index, this.workingDir.toPath()); pornhubThreadPool.addThread(t); try { Thread.sleep(IMAGE_SLEEP_TIME); @@ -83,13 +85,13 @@ protected void downloadURL(URL url, int index) { } } - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { // always start on the first page of an album // (strip the options after the '?') String u = url.toExternalForm(); if (u.contains("?")) { u = u.substring(0, u.indexOf("?")); - return new URL(u); + return new URI(u).toURL(); } else { return url; } @@ -126,11 +128,11 @@ public boolean canRip(URL url) { * * Handles case when site has IP-banned the user. */ - private class PornhubImageThread extends Thread { - private URL url; - private int index; + private class PornhubImageThread implements Runnable { + private final URL url; + private final int index; - PornhubImageThread(URL url, int index, File workingDir) { + PornhubImageThread(URL url, int index, Path workingDir) { super(); this.url = url; this.index = index; @@ -159,10 +161,10 @@ private void fetchImage() { prefix = String.format("%03d_", index); } - URL imgurl = new URL(url, imgsrc); + URL imgurl = url.toURI().resolve(imgsrc).toURL(); addURLToDownload(imgurl, prefix); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while loading/parsing " + this.url, e); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java index b779c480a..799f7294d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/PornpicsRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "www.pornpics.com/galleries/ID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java index 09569fc74..dcfa14e77 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedditRipper.java @@ -1,14 +1,22 @@ package com.rarchives.ripme.ripper.rippers; -import java.io.File; import java.io.IOException; +import java.io.OutputStream; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import com.rarchives.ripme.ui.RipStatusMessage; +import j2html.TagCreator; +import j2html.tags.ContainerTag; +import j2html.tags.specialized.DivTag; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; @@ -19,6 +27,9 @@ import com.rarchives.ripme.utils.Http; import com.rarchives.ripme.utils.RipUtils; import com.rarchives.ripme.utils.Utils; +import org.jsoup.Jsoup; + +import static j2html.TagCreator.*; public class RedditRipper extends AlbumRipper { @@ -47,19 +58,19 @@ public boolean canRip(URL url) { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); // Strip '/u/' from URL u = u.replaceAll("reddit\\.com/u/", "reddit.com/user/"); - return new URL(u); + return new URI(u).toURL(); } - private URL getJsonURL(URL url) throws MalformedURLException { + private URL getJsonURL(URL url) throws MalformedURLException, URISyntaxException { // Convert gallery to post link and append ".json" Pattern p = Pattern.compile("^https?://[a-zA-Z0-9.]{0,4}reddit\\.com/gallery/([a-zA-Z0-9]+).*$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { - return new URL("https://reddit.com/" +m.group(m.groupCount())+ ".json"); + return new URI("https://reddit.com/" +m.group(m.groupCount())+ ".json").toURL(); } // Append ".json" to URL in appropriate location. @@ -67,28 +78,32 @@ private URL getJsonURL(URL url) throws MalformedURLException { if (url.getQuery() != null) { result += "?" + url.getQuery(); } - return new URL(result); + return new URI(result).toURL(); } @Override public void rip() throws IOException { - URL jsonURL = getJsonURL(this.url); - while (true) { - if (shouldAddURL()) { - sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); - break; - } - jsonURL = getAndParseAndReturnNext(jsonURL); - if (jsonURL == null || isThisATest() || isStopped()) { - break; + try { + URL jsonURL = getJsonURL(this.url); + while (true) { + if (shouldAddURL()) { + sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip"); + break; + } + jsonURL = getAndParseAndReturnNext(jsonURL); + if (jsonURL == null || isThisATest() || isStopped()) { + break; + } } + } catch (URISyntaxException e) { + new IOException(e.getMessage()); } waitForThreads(); } - private URL getAndParseAndReturnNext(URL url) throws IOException { + private URL getAndParseAndReturnNext(URL url) throws IOException, URISyntaxException { JSONArray jsonArray = getJsonArrayFromURL(url), children; JSONObject json, data; URL nextURL = null; @@ -103,7 +118,19 @@ private URL getAndParseAndReturnNext(URL url) throws IOException { } children = data.getJSONArray("children"); for (int j = 0; j < children.length(); j++) { - parseJsonChild(children.getJSONObject(j)); + try { + parseJsonChild(children.getJSONObject(j)); + + if (children.getJSONObject(j).getString("kind").equals("t3") && + children.getJSONObject(j).getJSONObject("data").getBoolean("is_self") + ) { + URL selfPostURL = new URI(children.getJSONObject(j).getJSONObject("data").getString("url")).toURL(); + System.out.println(selfPostURL.toExternalForm()); + saveText(getJsonArrayFromURL(getJsonURL(selfPostURL))); + } + } catch (Exception e) { + LOGGER.debug("at index " + i + ", for this data: " + data.toString() + e); + } } if (data.has("after") && !data.isNull("after")) { String nextURLString = Utils.stripURLParameter(url.toExternalForm(), "after"); @@ -113,7 +140,7 @@ private URL getAndParseAndReturnNext(URL url) throws IOException { else { nextURLString = nextURLString.concat("?after=" + data.getString("after")); } - nextURL = new URL(nextURLString); + nextURL = new URI(nextURLString).toURL(); } } @@ -225,8 +252,123 @@ private void handleBody(String body, String id, String title) { } } + private void saveText(JSONArray jsonArray) throws JSONException { + Path saveFileAs; + + JSONObject selfPost = jsonArray.getJSONObject(0).getJSONObject("data") + .getJSONArray("children").getJSONObject(0).getJSONObject("data"); + JSONArray comments = jsonArray.getJSONObject(1).getJSONObject("data") + .getJSONArray("children"); + + if (selfPost.getString("selftext").equals("")) { return; } + + final String title = selfPost.getString("title"); + final String id = selfPost.getString("id"); + final String author = selfPost.getString("author"); + final String creationDate = new Date((long) selfPost.getInt("created") * 1000).toString(); + final String subreddit = selfPost.getString("subreddit"); + final String selfText = selfPost.getString("selftext_html"); + final String permalink = selfPost.getString("url"); + + String html = TagCreator.html( + head( + title(title), + style(rawHtml(HTML_STYLING)) + ), + body( + div( + h1(title), + a(subreddit).withHref("https://www.reddit.com/r/" + subreddit), + a("Original").withHref(permalink), + br() + ).withClass("thing"), + div( + div( + span( + a(author).withHref("https://www.reddit.com/u/" + author) + ).withClass("author op") + ).withClass("thing oppost") + .withText(creationDate) + .with(rawHtml(Jsoup.parse(selfText).text())) + ).withClass("flex") + ).with(getComments(comments, author)), + script(rawHtml(HTML_SCRIPT)) + ).renderFormatted(); + + try { + saveFileAs = Utils.getPath(workingDir + + "/" + + id + "_" + Utils.filesystemSafe(title) + + ".html"); + OutputStream out = Files.newOutputStream(saveFileAs); + out.write(html.getBytes()); + out.close(); + } catch (IOException e) { + LOGGER.error("[!] Error creating save file path for description '" + url + "':", e); + return; + } + + LOGGER.debug("Downloading " + url + "'s self post to " + saveFileAs); + super.retrievingSource(permalink); + if (!Files.exists(saveFileAs.getParent())) { + LOGGER.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent())); + try { + Files.createDirectory(saveFileAs.getParent()); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + private ContainerTag getComments(JSONArray comments, String author) { + ContainerTag commentsDiv = div().withId("comments"); + + for (int i = 0; i < comments.length(); i++) { + JSONObject data = comments.getJSONObject(i).getJSONObject("data"); + + try { + ContainerTag commentDiv = + div( + span(data.getString("author")).withClasses("author", iff(data.getString("author").equals(author), "op")), + a(new Date((long) data.getInt("created") * 1000).toString()).withHref("#" + data.getString("name")) + ).withClass("thing comment").withId(data.getString("name")) + .with(rawHtml(Jsoup.parse(data.getString("body_html")).text())); + getNestedComments(data, commentDiv, author); + commentsDiv.with(commentDiv); + } catch (Exception e) { + LOGGER.debug("at index " + i + ", for this data: " + data.toString() + e); + } + } + return commentsDiv; + } + + private ContainerTag getNestedComments(JSONObject data, ContainerTag parentDiv, String author) { + if (data.has("replies") && data.get("replies") instanceof JSONObject) { + JSONArray commentChildren = data.getJSONObject("replies").getJSONObject("data").getJSONArray("children"); + for (int i = 0; i < commentChildren.length(); i++) { + JSONObject nestedComment = commentChildren + .getJSONObject(i).getJSONObject("data"); + + String nestedCommentAuthor = nestedComment.optString("author"); + if (!nestedCommentAuthor.isBlank()) { + ContainerTag childDiv = + div( + div( + span(nestedCommentAuthor).withClasses("author", iff(nestedCommentAuthor.equals(author), "op")), + a(new Date((long) nestedComment.getInt("created") * 1000).toString()).withHref("#" + nestedComment.getString("name")) + ).withClass("comment").withId(nestedComment.getString("name")) + .with(rawHtml(Jsoup.parse(nestedComment.getString("body_html")).text())) + ).withClass("child"); + + parentDiv.with(getNestedComments(nestedComment, childDiv, author)); + } + } + } + return parentDiv; + } + private URL parseRedditVideoMPD(String vidURL) { - org.jsoup.nodes.Document doc = null; + org.jsoup.nodes.Document doc; try { doc = Http.url(vidURL + "/DASHPlaylist.mpd").ignoreContentType().get(); int largestHeight = 0; @@ -242,8 +384,8 @@ private URL parseRedditVideoMPD(String vidURL) { baseURL = doc.select("MPD > Period > AdaptationSet > Representation[height=" + height + "]").select("BaseURL").text(); } } - return new URL(vidURL + "/" + baseURL); - } catch (IOException e) { + return new URI(vidURL + "/" + baseURL).toURL(); + } catch (IOException | URISyntaxException e) { e.printStackTrace(); } return null; @@ -253,8 +395,8 @@ private URL parseRedditVideoMPD(String vidURL) { private void handleURL(String theUrl, String id, String title) { URL originalURL; try { - originalURL = new URL(theUrl); - } catch (MalformedURLException e) { + originalURL = new URI(theUrl).toURL(); + } catch (MalformedURLException | URISyntaxException e) { return; } String subdirectory = ""; @@ -274,21 +416,21 @@ private void handleURL(String theUrl, String id, String title) { Matcher m = p.matcher(url); if (m.matches()) { // It's from reddituploads. Assume .jpg extension. - String savePath = this.workingDir + File.separator; - savePath += id + "-" + m.group(1) + title + ".jpg"; - addURLToDownload(urls.get(0), new File(savePath)); + String savePath = this.workingDir + "/"; + savePath += id + "-" + m.group(1) + Utils.filesystemSafe(title) + ".jpg"; + addURLToDownload(urls.get(0), Utils.getPath(savePath)); } if (url.contains("v.redd.it")) { - String savePath = this.workingDir + File.separator; - savePath += id + "-" + url.split("/")[3] + title + ".mp4"; + String savePath = this.workingDir + "/"; + savePath += id + "-" + url.split("/")[3] + Utils.filesystemSafe(title) + ".mp4"; URL urlToDownload = parseRedditVideoMPD(urls.get(0).toExternalForm()); if (urlToDownload != null) { LOGGER.info("url: " + urlToDownload + " file: " + savePath); - addURLToDownload(urlToDownload, new File(savePath)); + addURLToDownload(urlToDownload, Utils.getPath(savePath)); } } else { - addURLToDownload(urls.get(0), id + title, "", theUrl, null); + addURLToDownload(urls.get(0), Utils.filesystemSafe(id + title), "", theUrl, null); } } else if (urls.size() > 1) { for (int i = 0; i < urls.size(); i++) { @@ -307,7 +449,6 @@ private void handleGallery(JSONArray data, JSONObject metadata, String id, Strin if (Utils.getConfigBoolean("reddit.use_sub_dirs", true)) { if (Utils.getConfigBoolean("album_titles.save", true)) { subdirectory = title; - title = "-" + title + "-"; } } for (int i = 0; i < data.length(); i++) { @@ -320,12 +461,12 @@ private void handleGallery(JSONArray data, JSONObject metadata, String id, Strin try { URL mediaURL; if (!media.getJSONObject("s").isNull("gif")) { - mediaURL = new URL(media.getJSONObject("s").getString("gif").replaceAll("&", "&")); + mediaURL = new URI(media.getJSONObject("s").getString("gif").replaceAll("&", "&")).toURL(); } else { - mediaURL = new URL(media.getJSONObject("s").getString("u").replaceAll("&", "&")); + mediaURL = new URI(media.getJSONObject("s").getString("u").replaceAll("&", "&")).toURL(); } addURLToDownload(mediaURL, prefix, subdirectory); - } catch (MalformedURLException | JSONException e) { + } catch (MalformedURLException | JSONException | URISyntaxException e) { LOGGER.error("[!] Unable to parse gallery JSON:\ngallery_data:\n" + data +"\nmedia_metadata:\n" + metadata); } } @@ -369,4 +510,7 @@ public String getGID(URL url) throws MalformedURLException { throw new MalformedURLException("Only accepts user pages, subreddits, post, or gallery can't understand " + url); } + private static final String HTML_STYLING = " .author { font-weight: bold; } .op { color: blue; } .comment { border: 0px; margin: 0 0 25px; padding-left: 5px; } .child { margin: 2px 0 0 20px; border-left: 2px dashed #AAF; } .collapsed { background: darkgrey; margin-bottom: 0; } .collapsed > div { display: none; } .md { max-width: 840px; padding-right: 1em; } h1 { margin: 0; } body { position: relative; background-color: #eeeeec; color: #00000a; font-weight: 400; font-style: normal; font-variant: normal; font-family: Helvetica,Arial,sans-serif; line-height: 1.4 } blockquote { margin: 5px 5px 5px 15px; padding: 1px 1px 1px 15px; max-width: 60em; border: 1px solid #ccc; border-width: 0 0 0 1px; } pre { white-space: pre-wrap; } img, video { max-width: 60vw; max-height: 90vh; object-fit: contain; } .thing { overflow: hidden; margin: 0 5px 3px 40px; border: 1px solid #e0e0e0; background-color: #fcfcfb; } :target > .md { border: 5px solid blue; } .post { margin-bottom: 20px; margin-top: 20px; } .gold { background: goldenrod; } .silver { background: silver; } .platinum { background: aqua; } .deleted { background: #faa; } .md.deleted { background: inherit; border: 5px solid #faa; } .oppost { background-color: #EEF; } blockquote > p { margin: 0; } #related { max-height: 20em; overflow-y: scroll; background-color: #F4FFF4; } #related h3 { position: sticky; top: 0; background-color: white; } .flex { display: flex; flex-flow: wrap; flex-direction: row-reverse; justify-content: flex-end; } "; + private static final String HTML_SCRIPT = "document.addEventListener('mousedown', function(e) { var t = e.target; if (t.className == 'author') { t = t.parentElement; } if (t.classList.contains('comment')) { t.classList.toggle('collapsed'); e.preventDefault(); e.stopPropagation(); return false; } });"; + } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java index 17105ee45..e82db4b28 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RedgifsRipper.java @@ -1,35 +1,57 @@ package com.rarchives.ripme.ripper.rippers; -import com.rarchives.ripme.ripper.AbstractHTMLRipper; import com.rarchives.ripme.utils.Http; -import org.json.JSONArray; + import org.json.JSONObject; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class RedgifsRipper extends AbstractHTMLRipper { +import org.apache.http.client.utils.URIBuilder; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; + +public class RedgifsRipper extends AbstractJSONRipper { private static final String HOST = "redgifs.com"; private static final String HOST_2 = "gifdeliverynetwork.com"; - String username = ""; - String cursor = ""; - String count = "100"; + private static final String GIFS_DETAIL_ENDPOINT = "https://api.redgifs.com/v2/gifs/%s"; + private static final String USERS_SEARCH_ENDPOINT = "https://api.redgifs.com/v2/users/%s/search"; + private static final String GALLERY_ENDPOINT = "https://api.redgifs.com/v2/gallery/%s"; + private static final String SEARCH_ENDPOINT = "https://api.redgifs.com/v2/search/%s"; + private static final String TAGS_ENDPOINT = "https://api.redgifs.com/v2/gifs/search"; + private static final String TEMPORARY_AUTH_ENDPOINT = "https://api.redgifs.com/v2/auth/temporary"; + private static final Pattern PROFILE_PATTERN = Pattern.compile("^https?://[a-zA-Z0-9.]*redgifs\\.com/users/([a-zA-Z0-9_.-]+).*$"); + private static final Pattern SEARCH_PATTERN = Pattern.compile("^https?:\\/\\/[a-zA-Z0-9.]*redgifs\\.com\\/search(?:\\/[a-zA-Z]+)?\\?.*?query=([a-zA-Z0-9-_+%]+).*$"); + private static final Pattern TAGS_PATTERN = Pattern.compile("^https?:\\/\\/[a-zA-Z0-9.]*redgifs\\.com\\/gifs\\/([a-zA-Z0-9_.,-]+).*$"); + private static final Pattern SINGLETON_PATTERN = Pattern.compile("^https?://[a-zA-Z0-9.]*redgifs\\.com/watch/([a-zA-Z0-9_-]+).*$"); + + /** + * Keep a single auth token for the complete lifecycle of the app. + * This should prevent fetching of multiple tokens. + */ + private static String authToken = ""; - String searchText = ""; - int searchCount = 150; - int searchStart = 0; + String username = ""; + int count = 40; + int currentPage = 1; + int maxPages = 1; - public RedgifsRipper(URL url) throws IOException { - super(new URL(url.toExternalForm().replace("thumbs.", ""))); + public RedgifsRipper(URL url) throws IOException, URISyntaxException { + super(new URI(url.toExternalForm().replace("thumbs.", "")).toURL()); } @Override @@ -46,41 +68,57 @@ public boolean canRip(URL url) { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String sUrl = url.toExternalForm(); sUrl = sUrl.replace("/gifs/detail", ""); sUrl = sUrl.replace("/amp", ""); sUrl = sUrl.replace("gifdeliverynetwork.com", "redgifs.com/watch"); - return new URL(sUrl); + return new URI(sUrl).toURL(); } public Matcher isProfile() { - Pattern p = Pattern.compile("^https?://[wm.]*redgifs\\.com/users/([a-zA-Z0-9_-]+).*$"); - return p.matcher(url.toExternalForm()); + return PROFILE_PATTERN.matcher(url.toExternalForm()); } public Matcher isSearch() { - Pattern p = Pattern.compile("^https?://[wm.]*redgifs\\.com/gifs/browse/([a-zA-Z0-9_-]+).*$"); - return p.matcher(url.toExternalForm()); + return SEARCH_PATTERN.matcher(url.toExternalForm()); + } + + public Matcher isTags() { + return TAGS_PATTERN.matcher(url.toExternalForm()); } public Matcher isSingleton() { - Pattern p = Pattern.compile("^https?://[wm.]*redgifs\\.com/watch/([a-zA-Z0-9_-]+).*$"); - return p.matcher(url.toExternalForm()); + return SINGLETON_PATTERN.matcher(url.toExternalForm()); } @Override - public Document getFirstPage() throws IOException { - if (!isProfile().matches() && !isSearch().matches()) { - return Http.url(url).get(); - } else if (isSearch().matches()) { - searchText = getGID(url).replace("-", " "); - return Http.url( - new URL("https://napi.redgifs.com/v1/gfycats/search?search_text=" + searchText + "&count=" + searchCount + "&start=" + searchStart*searchCount)).ignoreContentType().get(); - } else { - username = getGID(url); - return Http.url(new URL("https://napi.redgifs.com/v1/users/" + username + "/gfycats?count=" + count)) - .ignoreContentType().get(); + public JSONObject getFirstPage() throws IOException { + try { + if (authToken == null || authToken.isBlank()) { + fetchAuthToken(); + } + + if (isSingleton().matches()) { + maxPages = 1; + String gifDetailsURL = String.format(GIFS_DETAIL_ENDPOINT, getGID(url)); + return Http.url(gifDetailsURL).header("Authorization", "Bearer " + authToken).getJSON(); + } else if (isSearch().matches() || isTags().matches()) { + var json = Http.url(getSearchOrTagsURL()).header("Authorization", "Bearer " + authToken).getJSON(); + maxPages = json.getInt("pages"); + return json; + } else { + username = getGID(url); + var uri = new URIBuilder(String.format(USERS_SEARCH_ENDPOINT, username)); + uri.addParameter("order", "new"); + uri.addParameter("count", Integer.toString(count)); + uri.addParameter("page", Integer.toString(currentPage)); + var json = Http.url(uri.build().toURL()).header("Authorization", "Bearer " + authToken).getJSON(); + maxPages = json.getInt("pages"); + return json; + } + } catch (URISyntaxException e) { + throw new IOException("Failed to build first page url", e); } } @@ -91,14 +129,35 @@ public void downloadURL(URL url, int index) { @Override public String getGID(URL url) throws MalformedURLException { - Matcher m = isProfile(); if (m.matches()) { return m.group(1); } m = isSearch(); if (m.matches()) { - return m.group(1); + var sText = m.group(1); + if (sText == null || sText.isBlank()) { + throw new MalformedURLException(String.format("Expected redgifs.com/search?query=searchtext\n Got %s", url)); + } + sText = URLDecoder.decode(sText, StandardCharsets.UTF_8); + sText = sText.replaceAll("[^A-Za-z0-9_-]", "-"); + return sText; + } + m = isTags(); + if (m.matches()) { + var sText = m.group(1); + if (sText == null || sText.isBlank()) { + throw new MalformedURLException(String.format("Expected redgifs.com/gifs/searchtags\n Got %s", url)); + } + sText = URLDecoder.decode(sText, StandardCharsets.UTF_8); + var list = Arrays.asList(sText.split(",")); + if (list.size() > 1) { + LOGGER.warn("Url with multiple tags found. \nThey will be sorted alphabetically for folder name."); + } + Collections.sort(list); + var gid = list.stream().reduce("", (acc, val) -> acc.concat("_" + val)); + gid = gid.replaceAll("[^A-Za-z0-9_-]", "-"); + return gid; } m = isSingleton(); if (m.matches()) { @@ -106,96 +165,206 @@ public String getGID(URL url) throws MalformedURLException { } throw new MalformedURLException( "Expected redgifs.com format: " - + "redgifs.com/id or " - + "thumbs.redgifs.com/id.gif" + + "redgifs.com/watch/id or " + + "redgifs.com/users/id or " + + "redgifs.com/gifs/id or " + + "redgifs.com/search?query=text" + " Got: " + url); } - private String stripHTMLTags(String t) { - t = t.replaceAll("\n" + - " \n" + - " ", ""); - t = t.replaceAll("\n" + - "", ""); - t = t.replaceAll("\n", ""); - t = t.replaceAll("=\"\"", ""); - return t; - } - @Override - public Document getNextPage(Document doc) throws IOException { - if (isSearch().matches()) { - Document d = Http.url( - new URL("https://napi.redgifs.com/v1/gfycats/search?search_text=" + searchText - + "&count=" + searchCount + "&start=" + searchCount*++searchStart)) - .ignoreContentType().get(); - return (hasURLs(d).isEmpty()) ? null : d; + public JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException { + if (currentPage == maxPages || isSingleton().matches()) { + return null; + } + currentPage++; + if (isSearch().matches() || isTags().matches()) { + var json = Http.url(getSearchOrTagsURL()).header("Authorization", "Bearer " + authToken).getJSON(); + // Handle rare maxPages change during a rip + maxPages = json.getInt("pages"); + return json; + } else if (isProfile().matches()) { + var uri = new URIBuilder(String.format(USERS_SEARCH_ENDPOINT, getGID(url))); + uri.addParameter("order", "new"); + uri.addParameter("count", Integer.toString(count)); + uri.addParameter("page", Integer.toString(currentPage)); + var json = Http.url(uri.build().toURL()).header("Authorization", "Bearer " + authToken).getJSON(); + // Handle rare maxPages change during a rip + maxPages = json.getInt("pages"); + return json; } else { - if (cursor.equals("")) { - return null; - } else { - Document d = Http.url(new URL("https://napi.redgifs.com/v1/users/" + username + "/gfycats?count=" + count + "&cursor=" + cursor)).ignoreContentType().get(); - return (hasURLs(d).isEmpty()) ? null : d; - } + return null; } } @Override - public List getURLsFromPage(Document doc) { + public List getURLsFromJSON(JSONObject json) { List result = new ArrayList<>(); - if (isProfile().matches() || isSearch().matches()) { - result = hasURLs(doc); - } else { - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - result.add(page.getJSONObject("video").getString("contentUrl")); + if (isProfile().matches() || isSearch().matches() || isTags().matches()) { + var gifs = json.getJSONArray("gifs"); + for (var gif : gifs) { + if (((JSONObject)gif).isNull("gallery")) { + var hdURL = ((JSONObject)gif).getJSONObject("urls").getString("hd"); + result.add(hdURL); + } else { + var galleryID = ((JSONObject)gif).getString("gallery"); + var gifID = ((JSONObject)gif).getString("id"); + result.addAll(getURLsForGallery(galleryID, gifID)); } } + } else { + var gif = json.getJSONObject("gif"); + if (gif.isNull("gallery")) { + String hdURL = gif.getJSONObject("urls").getString("hd"); + result.add(hdURL); + } else { + var galleryID = gif.getString("gallery"); + var gifID = gif.getString("id"); + result.addAll(getURLsForGallery(galleryID, gifID)); + } } return result; } - /** - * Helper method for retrieving URLs. - * @param doc Document of the URL page to look through - * @return List of URLs to download + + /** + * Get all images for a gif url with multiple images + * @param galleryID gallery id + * @param gifID gif id with multiple images for logging + * @return List */ - public List hasURLs(Document doc) { - List result = new ArrayList<>(); - JSONObject page = new JSONObject(stripHTMLTags(doc.html())); - JSONArray content = page.getJSONArray("gfycats"); - for (int i = 0; i < content.length(); i++) { - result.add(content.getJSONObject(i).getString("mp4Url")); + private static List getURLsForGallery(String galleryID, String gifID) { + List list = new ArrayList<>(); + if (galleryID == null || galleryID.isBlank()) { + return list; } - cursor = page.getString("cursor"); - return result; + try { + var json = Http.url(String.format(GALLERY_ENDPOINT, galleryID)).header("Authorization", "Bearer " + authToken).getJSON(); + for (var gif : json.getJSONArray("gifs")) { + var hdURL = ((JSONObject)gif).getJSONObject("urls").getString("hd"); + list.add(hdURL); + } + } catch (IOException e) { + LOGGER.error(String.format("Error fetching gallery %s for gif %s", galleryID, gifID), e); + } + return list; } - /** - * Helper method for retrieving video URLs. - * @param url URL to gfycat page + * Static helper method for retrieving video URLs for usage in RipUtils. + * Most of the code is lifted from getFirstPage and getURLsFromJSON + * @param url URL to redgif page * @return URL to video * @throws IOException */ - public static String getVideoURL(URL url) throws IOException { + public static String getVideoURL(URL url) throws IOException, URISyntaxException { LOGGER.info("Retrieving " + url.toExternalForm()); + var m = SINGLETON_PATTERN.matcher(url.toExternalForm()); + if (!m.matches()){ + throw new IOException(String.format("Cannot fetch redgif url %s", url.toExternalForm())); + } + if (authToken == null || authToken.isBlank()){ + fetchAuthToken(); + } + var gid = m.group(1).split("-")[0]; + var gifDetailsURL = String.format(GIFS_DETAIL_ENDPOINT, gid); + var json = Http.url(gifDetailsURL).header("Authorization", "Bearer " + authToken).getJSON(); + var gif = json.getJSONObject("gif"); + if (!gif.isNull("gallery")){ + // TODO check how to handle a image gallery + throw new IOException(String.format("Multiple images found for url %s", url)); + } + return gif.getJSONObject("urls").getString("hd"); + } + - //Sanitize the URL first - url = new URL(url.toExternalForm().replace("/gifs/detail", "")); + /** + * Fetch a temorary auth token for the rip + * @throws IOException + */ + private static void fetchAuthToken() throws IOException{ + var json = Http.url(TEMPORARY_AUTH_ENDPOINT).getJSON(); + var token = json.getString("token"); + authToken = token; + LOGGER.info("Incase of redgif 401 errors, please restart the app to refresh the auth token"); + } + + /** + * Map browser url query params to search or tags endpoint query params and return the complete url. + * + * Search text for search url comes from the query params, whereas search text for tags url comes from the path. + * + * Tab type for search url comes from the path whereas, tab type for tags url comes from query params. + * @return Search or tags endpoint url + */ + private URL getSearchOrTagsURL() throws IOException, URISyntaxException { + URIBuilder uri; + Map endpointQueryParams = new HashMap<>(); + var browserURLQueryParams = new URIBuilder(url.toString()).getQueryParams(); + for (var qp : browserURLQueryParams) { + var name = qp.getName(); + var value = qp.getValue(); + switch (name) { + case "query": + endpointQueryParams.put("query", URLDecoder.decode(value, StandardCharsets.UTF_8)); + break; + case "tab": + switch (value) { + case "gifs" -> endpointQueryParams.put("type", "g"); + case "images" -> endpointQueryParams.put("type", "i"); + default -> LOGGER.warn(String.format("Unsupported tab for tags url %s", value)); + } + break; + case "verified": + if (value != null && value.equals("1")) { + if (isTags().matches()) { + endpointQueryParams.put("verified", "y"); + } else { + endpointQueryParams.put("verified", "yes"); + } + } + break; + case "order": + endpointQueryParams.put("order", value); + break; + case "viewMode": + break; + default: + LOGGER.warn(String.format("Unexpected query param %s for search url. Skipping.", name)); + } + } - Document doc = Http.url(url).get(); - Elements videos = doc.select("script"); - for (Element el : videos) { - String json = el.html(); - if (json.startsWith("{")) { - JSONObject page = new JSONObject(json); - return page.getJSONObject("video").getString("contentUrl"); + // Build the search or tags url and add missing query params if any + if (isTags().matches()) { + var subpaths = url.getPath().split("/"); + if (subpaths.length != 0) { + endpointQueryParams.put("search_text", subpaths[subpaths.length-1]); + } else { + throw new IOException("Failed to get search tags for url"); + } + // Check if it is the main tags page with all gifs, images, creator etc + if (!endpointQueryParams.containsKey("type")) { + LOGGER.warn("No tab selected, defaulting to gifs"); + endpointQueryParams.put("type", "g"); } + uri = new URIBuilder(TAGS_ENDPOINT); + } else { + var tabType = "gifs"; + var subpaths = url.getPath().split("/"); + if (subpaths.length != 0) { + switch (subpaths[subpaths.length-1]) { + case "gifs" -> tabType = "gifs"; + case "images" -> tabType = "images"; + case "search" -> LOGGER.warn("No tab selected, defaulting to gifs"); + default -> LOGGER.warn(String.format("Unsupported search tab %s, defaulting to gifs", subpaths[subpaths.length-1])); + } + } + uri = new URIBuilder(String.format(SEARCH_ENDPOINT, tabType)); } - throw new IOException(); - } + endpointQueryParams.put("page", Integer.toString(currentPage)); + endpointQueryParams.put("count", Integer.toString(count)); + endpointQueryParams.forEach((k, v) -> uri.addParameter(k, v)); + + return uri.build().toURL(); + } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java index 681738fa0..c7245739e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/Rule34Ripper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -51,13 +53,13 @@ public String getGID(URL url) throws MalformedURLException { "rule34.xxx/index.php?page=post&s=list&tags=TAG - got " + url + " instead"); } - public URL getAPIUrl() throws MalformedURLException { - URL urlToReturn = new URL("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url)); + public URL getAPIUrl() throws MalformedURLException, URISyntaxException { + URL urlToReturn = new URI("https://rule34.xxx/index.php?page=dapi&s=post&q=index&limit=100&tags=" + getGID(url)).toURL(); return urlToReturn; } @Override - public Document getFirstPage() throws IOException { + public Document getFirstPage() throws IOException, URISyntaxException { apiUrl = getAPIUrl().toExternalForm(); // "url" is an instance field of the superclass return Http.url(getAPIUrl()).get(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java index c9c487a77..be33c945f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/RulePornRipper.java @@ -40,11 +40,6 @@ public String getGID(URL url) throws MalformedURLException { "Expected ruleporn.com URL format: " + "ruleporn.com/NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java index 7e0c1c46e..2df6ab2c4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ScrolllerRipper.java @@ -1,293 +1,293 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.net.*; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.java_websocket.client.WebSocketClient; - -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.java_websocket.handshake.ServerHandshake; -import org.json.JSONArray; -import org.json.JSONException; -import org.json.JSONObject; - -import com.rarchives.ripme.ripper.AbstractJSONRipper; - -public class ScrolllerRipper extends AbstractJSONRipper { - - public ScrolllerRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "scrolller"; - } - @Override - public String getDomain() { - return "scrolller.com"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - // Typical URL is: https://scrolller.com/r/subreddit - // Parameters like "filter" and "sort" can be passed (ex: https://scrolller.com/r/subreddit?filter=xxx&sort=yyyy) - Pattern p = Pattern.compile("^https?://scrolller\\.com/r/([a-zA-Z0-9]+).*?$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException("Expected scrolller.com URL format: " + - "scrolller.com/r/subreddit OR scroller.com/r/subreddit?filter= - got " + url + "instead"); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - - - private JSONObject prepareQuery(String iterator, String gid, String sortByString) throws IOException, URISyntaxException { - - String QUERY_NOSORT = "query SubredditQuery( $url: String! $filter: SubredditPostFilter $iterator: String ) { getSubreddit(url: $url) { children( limit: 50 iterator: $iterator filter: $filter ) { iterator items { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } } } }"; - String QUERY_SORT = "subscription SubredditSubscription( $url: String! $sortBy: SubredditSortBy $timespan: SubredditTimespan $iterator: String $limit: Int $filter: SubredditPostFilter ) { fetchSubreddit( url: $url sortBy: $sortBy timespan: $timespan iterator: $iterator limit: $limit filter: $filter ) { __typename ... on Subreddit { __typename url title secondaryTitle description createdAt isNsfw subscribers isComplete itemCount videoCount pictureCount albumCount isFollowing } ... on SubredditPost { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } ... on Iterator { iterator } ... on Error { message } } }"; - - String filterString = convertFilterString(getParameter(this.url,"filter")); - - JSONObject variablesObject = new JSONObject().put("url", String.format("/r/%s", gid)).put("sortBy", sortByString.toUpperCase()); - JSONObject finalQueryObject = new JSONObject().put("variables", variablesObject).put("query", sortByString.equals("") ? QUERY_NOSORT : QUERY_SORT); - - if (iterator != null) { - // Iterator is not present on the first page - variablesObject.put("iterator", iterator); - } - if (!filterString.equals("NOFILTER")) { - variablesObject.put("filter", filterString); - } - - return sortByString.equals("") ? getPosts(finalQueryObject) : getPostsSorted(finalQueryObject); - - } - - - public String convertFilterString(String filterParameter) { - // Converts the ?filter= parameter of the URL to one that can be used in the GraphQL query - // I could basically remove the last "s" and call toUpperCase instead of this switch statement but this looks easier to read. - switch (filterParameter.toLowerCase()) { - case "pictures": - return "PICTURE"; - case "videos": - return "VIDEO"; - case "albums": - return "ALBUM"; - case "": - return "NOFILTER"; - default: - LOGGER.error(String.format("Invalid filter %s using no filter",filterParameter)); - return ""; - } - } - - public String getParameter(URL url, String parameter) throws MalformedURLException { - // Gets passed parameters from the URL - String toReplace = String.format("https://scrolller.com/r/%s?",getGID(url)); - List args= URLEncodedUtils.parse(url.toExternalForm(), Charset.defaultCharset()); - for (NameValuePair arg:args) { - // First parameter contains part of the url so we have to remove it - // Ex: for the url https://scrolller.com/r/CatsStandingUp?filter=xxxx&sort=yyyy - // 1) arg.getName() => https://scrolller.com/r/CatsStandingUp?filter - // 2) arg.getName() => sort - - if (arg.getName().replace(toReplace,"").toLowerCase().equals((parameter))) { - return arg.getValue(); - } - } - return ""; - } - - private JSONObject getPosts(JSONObject data) { - // The actual GraphQL query call - - try { - String url = "https://api.scrolller.com/api/v2/graphql"; - - URL obj = new URL(url); - HttpURLConnection conn = (HttpURLConnection) obj.openConnection(); - conn.setReadTimeout(5000); - conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8"); - conn.addRequestProperty("User-Agent", "Mozilla"); - conn.addRequestProperty("Referer", "scrolller.com"); - - conn.setDoOutput(true); - - OutputStreamWriter w = new OutputStreamWriter(conn.getOutputStream(), "UTF-8"); - - w.write(data.toString()); - w.close(); - - BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream())); - String inputLine; - StringBuffer jsonString = new StringBuffer(); - - while ((inputLine = in.readLine()) != null) { - jsonString.append(inputLine); - } - - in.close(); - conn.disconnect(); - - return new JSONObject(jsonString.toString()); - - } catch (Exception e) { - e.printStackTrace(); - } - - return new JSONObject("{}"); - } - - private JSONObject getPostsSorted(JSONObject data) throws MalformedURLException { - - // The actual GraphQL query call (if sort parameter is present) - try { - - ArrayList postsJsonStrings = new ArrayList<>(); - - WebSocketClient wsc = new WebSocketClient(new URI("wss://api.scrolller.com/api/v2/graphql")) { - @Override - public void onOpen(ServerHandshake serverHandshake) { - // As soon as the WebSocket connects send our query - this.send(data.toString()); - } - - @Override - public void onMessage(String s) { - postsJsonStrings.add(s); - if (new JSONObject(s).getJSONObject("data").getJSONObject("fetchSubreddit").has("iterator")) { - this.close(); - } - } - - @Override - public void onClose(int i, String s, boolean b) { - } - - @Override - public void onError(Exception e) { - LOGGER.error(String.format("WebSocket error, server reported %s", e.getMessage())); - } - }; - wsc.connect(); - - while (!wsc.isClosed()) { - // Posts list is not over until the connection closes. - } - - JSONObject finalObject = new JSONObject(); - JSONArray posts = new JSONArray(); - - // Iterator is the last object in the post list, let's duplicate it in his own object for clarity. - finalObject.put("iterator", new JSONObject(postsJsonStrings.get(postsJsonStrings.size()-1))); - - for (String postString : postsJsonStrings) { - posts.put(new JSONObject(postString)); - } - finalObject.put("posts", posts); - - if (finalObject.getJSONArray("posts").length() == 1 && !finalObject.getJSONArray("posts").getJSONObject(0).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { - // Only iterator, no posts. - return null; - } - - return finalObject; - - - } catch (URISyntaxException ue) { - // Nothing to catch, it's an hardcoded URI. - } - - return null; - } - - - @Override - protected List getURLsFromJSON(JSONObject json) throws JSONException { - - boolean sortRequested = json.has("posts"); - - int bestArea = 0; - String bestUrl = ""; - List list = new ArrayList<>(); - - JSONArray itemsList = sortRequested ? json.getJSONArray("posts") : json.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").getJSONArray("items"); - - for (Object item : itemsList) { - - if (sortRequested && !((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { - continue; - } - - JSONArray sourcesTMP = sortRequested ? ((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").getJSONArray("mediaSources") : ((JSONObject) item).getJSONArray("mediaSources"); - for (Object sourceTMP : sourcesTMP) - { - int widthTMP = ((JSONObject) sourceTMP).getInt("width"); - int heightTMP = ((JSONObject) sourceTMP).getInt("height"); - int areaTMP = widthTMP * heightTMP; - - if (areaTMP > bestArea) { - bestArea = widthTMP; - bestUrl = ((JSONObject) sourceTMP).getString("url"); - } - } - list.add(bestUrl); - bestUrl = ""; - bestArea = 0; - } - - return list; - } - - @Override - protected JSONObject getFirstPage() throws IOException { - try { - return prepareQuery(null, this.getGID(url), getParameter(url,"sort")); - } catch (URISyntaxException e) { - LOGGER.error(String.format("Error obtaining first page: %s", e.getMessage())); - return null; - } - } - - @Override - public JSONObject getNextPage(JSONObject source) throws IOException { - // Every call the the API contains an "iterator" string that we need to pass to the API to get the next page - // Checking if iterator is null is not working for some reason, hence why the weird "iterator.toString().equals("null")" - - Object iterator = null; - if (source.has("iterator")) { - // Sort requested, custom JSON. - iterator = source.getJSONObject("iterator").getJSONObject("data").getJSONObject("fetchSubreddit").get("iterator"); - } else { - iterator = source.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").get("iterator"); - } - - if (!iterator.toString().equals("null")) { - // Need to change page. - try { - return prepareQuery(iterator.toString(), this.getGID(url), getParameter(url,"sort")); - } catch (URISyntaxException e) { - LOGGER.error(String.format("Error changing page: %s", e.getMessage())); - return null; - } - } else { - return null; - } - } +package com.rarchives.ripme.ripper.rippers; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.net.*; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.java_websocket.client.WebSocketClient; + +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.java_websocket.handshake.ServerHandshake; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import com.rarchives.ripme.ripper.AbstractJSONRipper; + +public class ScrolllerRipper extends AbstractJSONRipper { + + public ScrolllerRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "scrolller"; + } + @Override + public String getDomain() { + return "scrolller.com"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + // Typical URL is: https://scrolller.com/r/subreddit + // Parameters like "filter" and "sort" can be passed (ex: https://scrolller.com/r/subreddit?filter=xxx&sort=yyyy) + Pattern p = Pattern.compile("^https?://scrolller\\.com/r/([a-zA-Z0-9]+).*?$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected scrolller.com URL format: " + + "scrolller.com/r/subreddit OR scroller.com/r/subreddit?filter= - got " + url + "instead"); + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + + + private JSONObject prepareQuery(String iterator, String gid, String sortByString) throws IOException, URISyntaxException { + + String QUERY_NOSORT = "query SubredditQuery( $url: String! $filter: SubredditPostFilter $iterator: String ) { getSubreddit(url: $url) { children( limit: 50 iterator: $iterator filter: $filter ) { iterator items { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } } } }"; + String QUERY_SORT = "subscription SubredditSubscription( $url: String! $sortBy: SubredditSortBy $timespan: SubredditTimespan $iterator: String $limit: Int $filter: SubredditPostFilter ) { fetchSubreddit( url: $url sortBy: $sortBy timespan: $timespan iterator: $iterator limit: $limit filter: $filter ) { __typename ... on Subreddit { __typename url title secondaryTitle description createdAt isNsfw subscribers isComplete itemCount videoCount pictureCount albumCount isFollowing } ... on SubredditPost { __typename url title subredditTitle subredditUrl redditPath isNsfw albumUrl isFavorite mediaSources { url width height isOptimized } } ... on Iterator { iterator } ... on Error { message } } }"; + + String filterString = convertFilterString(getParameter(this.url,"filter")); + + JSONObject variablesObject = new JSONObject().put("url", String.format("/r/%s", gid)).put("sortBy", sortByString.toUpperCase()); + JSONObject finalQueryObject = new JSONObject().put("variables", variablesObject).put("query", sortByString.equals("") ? QUERY_NOSORT : QUERY_SORT); + + if (iterator != null) { + // Iterator is not present on the first page + variablesObject.put("iterator", iterator); + } + if (!filterString.equals("NOFILTER")) { + variablesObject.put("filter", filterString); + } + + return sortByString.equals("") ? getPosts(finalQueryObject) : getPostsSorted(finalQueryObject); + + } + + + public String convertFilterString(String filterParameter) { + // Converts the ?filter= parameter of the URL to one that can be used in the GraphQL query + // I could basically remove the last "s" and call toUpperCase instead of this switch statement but this looks easier to read. + switch (filterParameter.toLowerCase()) { + case "pictures": + return "PICTURE"; + case "videos": + return "VIDEO"; + case "albums": + return "ALBUM"; + case "": + return "NOFILTER"; + default: + LOGGER.error(String.format("Invalid filter %s using no filter",filterParameter)); + return ""; + } + } + + public String getParameter(URL url, String parameter) throws MalformedURLException { + // Gets passed parameters from the URL + String toReplace = String.format("https://scrolller.com/r/%s?",getGID(url)); + List args= URLEncodedUtils.parse(url.toExternalForm(), Charset.defaultCharset()); + for (NameValuePair arg:args) { + // First parameter contains part of the url so we have to remove it + // Ex: for the url https://scrolller.com/r/CatsStandingUp?filter=xxxx&sort=yyyy + // 1) arg.getName() => https://scrolller.com/r/CatsStandingUp?filter + // 2) arg.getName() => sort + + if (arg.getName().replace(toReplace,"").toLowerCase().equals((parameter))) { + return arg.getValue(); + } + } + return ""; + } + + private JSONObject getPosts(JSONObject data) { + // The actual GraphQL query call + + try { + String url = "https://api.scrolller.com/api/v2/graphql"; + + URL obj = new URI(url).toURL(); + HttpURLConnection conn = (HttpURLConnection) obj.openConnection(); + conn.setReadTimeout(5000); + conn.addRequestProperty("Accept-Language", "en-US,en;q=0.8"); + conn.addRequestProperty("User-Agent", "Mozilla"); + conn.addRequestProperty("Referer", "scrolller.com"); + + conn.setDoOutput(true); + + OutputStreamWriter w = new OutputStreamWriter(conn.getOutputStream(), "UTF-8"); + + w.write(data.toString()); + w.close(); + + BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + StringBuffer jsonString = new StringBuffer(); + + while ((inputLine = in.readLine()) != null) { + jsonString.append(inputLine); + } + + in.close(); + conn.disconnect(); + + return new JSONObject(jsonString.toString()); + + } catch (Exception e) { + e.printStackTrace(); + } + + return new JSONObject("{}"); + } + + private JSONObject getPostsSorted(JSONObject data) throws MalformedURLException { + + // The actual GraphQL query call (if sort parameter is present) + try { + + ArrayList postsJsonStrings = new ArrayList<>(); + + WebSocketClient wsc = new WebSocketClient(new URI("wss://api.scrolller.com/api/v2/graphql")) { + @Override + public void onOpen(ServerHandshake serverHandshake) { + // As soon as the WebSocket connects send our query + this.send(data.toString()); + } + + @Override + public void onMessage(String s) { + postsJsonStrings.add(s); + if (new JSONObject(s).getJSONObject("data").getJSONObject("fetchSubreddit").has("iterator")) { + this.close(); + } + } + + @Override + public void onClose(int i, String s, boolean b) { + } + + @Override + public void onError(Exception e) { + LOGGER.error(String.format("WebSocket error, server reported %s", e.getMessage())); + } + }; + wsc.connect(); + + while (!wsc.isClosed()) { + // Posts list is not over until the connection closes. + } + + JSONObject finalObject = new JSONObject(); + JSONArray posts = new JSONArray(); + + // Iterator is the last object in the post list, let's duplicate it in his own object for clarity. + finalObject.put("iterator", new JSONObject(postsJsonStrings.get(postsJsonStrings.size()-1))); + + for (String postString : postsJsonStrings) { + posts.put(new JSONObject(postString)); + } + finalObject.put("posts", posts); + + if (finalObject.getJSONArray("posts").length() == 1 && !finalObject.getJSONArray("posts").getJSONObject(0).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { + // Only iterator, no posts. + return null; + } + + return finalObject; + + + } catch (URISyntaxException ue) { + // Nothing to catch, it's an hardcoded URI. + } + + return null; + } + + + @Override + protected List getURLsFromJSON(JSONObject json) throws JSONException { + + boolean sortRequested = json.has("posts"); + + int bestArea = 0; + String bestUrl = ""; + List list = new ArrayList<>(); + + JSONArray itemsList = sortRequested ? json.getJSONArray("posts") : json.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").getJSONArray("items"); + + for (Object item : itemsList) { + + if (sortRequested && !((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").has("mediaSources")) { + continue; + } + + JSONArray sourcesTMP = sortRequested ? ((JSONObject) item).getJSONObject("data").getJSONObject("fetchSubreddit").getJSONArray("mediaSources") : ((JSONObject) item).getJSONArray("mediaSources"); + for (Object sourceTMP : sourcesTMP) + { + int widthTMP = ((JSONObject) sourceTMP).getInt("width"); + int heightTMP = ((JSONObject) sourceTMP).getInt("height"); + int areaTMP = widthTMP * heightTMP; + + if (areaTMP > bestArea) { + bestArea = widthTMP; + bestUrl = ((JSONObject) sourceTMP).getString("url"); + } + } + list.add(bestUrl); + bestUrl = ""; + bestArea = 0; + } + + return list; + } + + @Override + protected JSONObject getFirstPage() throws IOException { + try { + return prepareQuery(null, this.getGID(url), getParameter(url,"sort")); + } catch (URISyntaxException e) { + LOGGER.error(String.format("Error obtaining first page: %s", e.getMessage())); + return null; + } + } + + @Override + public JSONObject getNextPage(JSONObject source) throws IOException { + // Every call the the API contains an "iterator" string that we need to pass to the API to get the next page + // Checking if iterator is null is not working for some reason, hence why the weird "iterator.toString().equals("null")" + + Object iterator = null; + if (source.has("iterator")) { + // Sort requested, custom JSON. + iterator = source.getJSONObject("iterator").getJSONObject("data").getJSONObject("fetchSubreddit").get("iterator"); + } else { + iterator = source.getJSONObject("data").getJSONObject("getSubreddit").getJSONObject("children").get("iterator"); + } + + if (!iterator.toString().equals("null")) { + // Need to change page. + try { + return prepareQuery(iterator.toString(), this.getGID(url), getParameter(url,"sort")); + } catch (URISyntaxException e) { + LOGGER.error(String.format("Error changing page: %s", e.getMessage())); + return null; + } + } else { + return null; + } + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java index 73dad1b12..b96e2f6b4 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ShesFreakyRipper.java @@ -12,7 +12,6 @@ import org.jsoup.nodes.Element; import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; public class ShesFreakyRipper extends AbstractHTMLRipper { @@ -41,11 +40,6 @@ public String getGID(URL url) throws MalformedURLException { + "shesfreaky.com/gallery/... - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java index d6a0f9cb0..f3a216f45 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SinfestRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "sinfest.net/view.php?date=XXXX-XX-XX/ - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { Element elem = doc.select("td.style5 > a > img").last(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java index b61f2fef7..ad00e5c8a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SmuttyRipper.java @@ -89,11 +89,6 @@ public Document getNextPage(Document doc) throws IOException { } @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java new file mode 100644 index 000000000..ab9ebfa9a --- /dev/null +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SoundgasmRipper.java @@ -0,0 +1,69 @@ +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class SoundgasmRipper extends AbstractHTMLRipper { + + private static final String HOST = "soundgasm.net"; + + public SoundgasmRipper(URL url) throws IOException, URISyntaxException { + super(new URI(url.toExternalForm()).toURL()); + } + + @Override + protected String getDomain() { return "soundgasm.net"; } + + @Override + public String getHost() { return "soundgasm"; } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^/u/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+).*$"); + Matcher m = p.matcher(url.getFile()); + if (m.find()) { + return m.group(m.groupCount()); + } + throw new MalformedURLException( + "Expected soundgasm.net format: " + + "soundgasm.net/u/username/id or " + + " Got: " + url); + } + + @Override + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); + } + + @Override + public List getURLsFromPage(Document page) { + List res = new ArrayList<>(); + + Elements script = page.select("script"); + Pattern p = Pattern.compile("m4a\\:\\s\"(https?:.*)\\\""); + + for (Element e: script) { + Matcher m = p.matcher(e.data()); + if (m.find()) { res.add(m.group(1)); } + } + return res; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java index bca5ef666..9ea1a130a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/SpankbangRipper.java @@ -28,11 +28,6 @@ public String getDomain() { return "spankbang.com"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java index b331bbce0..ac7414dd0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/StaRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -47,12 +49,6 @@ public String getGID(URL url) throws MalformedURLException { "sta.sh/ALBUMID - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList<>(); @@ -61,10 +57,10 @@ public List getURLsFromPage(Document doc) { Document thumbPage = null; if (checkURL(thumbPageURL)) { try { - Connection.Response resp = Http.url(new URL(thumbPageURL)).response(); + Connection.Response resp = Http.url(new URI(thumbPageURL).toURL()).response(); cookies.putAll(resp.cookies()); thumbPage = resp.parse(); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.info(thumbPageURL + " is a malformed URL"); } catch (IOException e) { LOGGER.info(e.getMessage()); @@ -81,9 +77,9 @@ public List getURLsFromPage(Document doc) { private boolean checkURL(String url) { try { - new URL(url); + new URI(url).toURL(); return true; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { return false; } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java index 369ce741e..d514c1e62 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TapasticRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -46,11 +48,6 @@ public String getHost() { return "tapas"; } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List urls = new ArrayList<>(); @@ -87,12 +84,12 @@ public void downloadURL(URL url, int index) { prefix.append(String.format("-%0" + imgLog + "dof%0" + imgLog + "d-", i + 1, images.size())); prefix.append(episode.filename.replace(" ", "-")); prefix.append("-"); - addURLToDownload(new URL(link), prefix.toString()); + addURLToDownload(new URI(link).toURL(), prefix.toString()); if (isThisATest()) { break; } } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { LOGGER.error("[!] Exception while downloading " + url, e); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java index 9791ab907..25edb5f79 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TeenplanetRipper.java @@ -34,11 +34,6 @@ public String getHost() { return HOST; } - @Override - protected Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override protected List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java index 3c9d751d8..8105fe73f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ThechiveRipper.java @@ -70,12 +70,6 @@ public String getGID(URL url) throws MalformedURLException { + "thechive.com/YEAR/MONTH/DAY/POSTTITLE/ OR i.thechive.com/username, got " + url + " instead."); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result; diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java index ac3e363cd..3f616faa7 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TheyiffgalleryRipper.java @@ -41,12 +41,6 @@ public String getGID(URL url) throws MalformedURLException { "theyiffgallery.com/index?/category/#### - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public Document getNextPage(Document doc) throws IOException { String nextPage = doc.select("span.navPrevNext > a").attr("href"); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java index 846c4795a..49baa384a 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TsuminoRipper.java @@ -1,9 +1,11 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; +import java.io.UnsupportedEncodingException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -110,11 +112,11 @@ public Document getFirstPage() throws IOException { } @Override - public List getURLsFromPage(Document doc) { + public List getURLsFromPage(Document doc) throws UnsupportedEncodingException { JSONArray imageIds = getPageUrls(); List result = new ArrayList<>(); for (int i = 0; i < imageIds.length(); i++) { - result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i))); + result.add("http://www.tsumino.com/Image/Object?name=" + URLEncoder.encode(imageIds.getString(i), StandardCharsets.UTF_8.name())); } return result; @@ -127,6 +129,6 @@ public void downloadURL(URL url, int index) { There is no way to tell if an image returned from tsumino.com is a png to jpg. The content-type header is always "image/jpeg" even when the image is a png. The file ext is not included in the url. */ - addURLToDownload(url, getPrefix(index), "", null, null, null, null, true); + addURLToDownload(url, "", null, null, getPrefix(index), null, null, true); } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java index 0c561d773..6d91361bc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TumblrRipper.java @@ -1,9 +1,7 @@ package com.rarchives.ripme.ripper.rippers; import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.MalformedURLException; -import java.net.URL; +import java.net.*; import java.util.Arrays; import java.util.List; import java.util.Random; @@ -100,11 +98,11 @@ public boolean canRip(URL url) { * @throws MalformedURLException */ @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { String u = url.toExternalForm(); // Convert .tumblr.com/path to /path if needed if (StringUtils.countMatches(u, ".") > 2) { - url = new URL(u.replace(".tumblr.com", "")); + url = new URI(u.replace(".tumblr.com", "")).toURL(); if (isTumblrURL(url)) { LOGGER.info("Detected tumblr site: " + url); } @@ -263,7 +261,7 @@ private boolean handleJSON(JSONObject json) { fileLocation = photo.getJSONObject("original_size").getString("url").replaceAll("http:", "https:"); qualM = qualP.matcher(fileLocation); fileLocation = qualM.replaceFirst("_1280.$1"); - fileURL = new URL(fileLocation); + fileURL = new URI(fileLocation).toURL(); m = p.matcher(fileURL.toString()); if (m.matches()) { @@ -278,7 +276,7 @@ private boolean handleJSON(JSONObject json) { } } else if (post.has("video_url")) { try { - fileURL = new URL(post.getString("video_url").replaceAll("http:", "https:")); + fileURL = new URI(post.getString("video_url").replaceAll("http:", "https:")).toURL(); downloadURL(fileURL, date); } catch (Exception e) { LOGGER.error("[!] Error while parsing video in " + post, e); @@ -293,8 +291,8 @@ private boolean handleJSON(JSONObject json) { // If the image is any smaller, it will still get the largest available size qualM = qualP.matcher(imgSrc); imgSrc = qualM.replaceFirst("_1280.$1"); - downloadURL(new URL(imgSrc), date); - } catch (MalformedURLException e) { + downloadURL(new URI(imgSrc).toURL(), date); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("[!] Error while getting embedded image at " + post, e); return true; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java index 1a1bf1abb..2ce658345 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/TwitterRipper.java @@ -121,7 +121,7 @@ private String getApiURL(Long maxID) { case ACCOUNT: req.append("https://api.twitter.com/1.1/statuses/user_timeline.json") .append("?screen_name=" + this.accountName).append("&include_entities=true") - .append("&exclude_replies=true").append("&trim_user=true").append("&count=" + MAX_ITEMS_REQUEST) + .append("&exclude_replies=false").append("&trim_user=true").append("&count=" + MAX_ITEMS_REQUEST) .append("&tweet_mode=extended"); break; case SEARCH:// Only get tweets from last week diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java index 9a962f3a9..baf5e2128 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VidbleRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -45,11 +47,6 @@ public String getGID(URL url) throws MalformedURLException { + " Got: " + url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { return getURLsFromPageStatic(doc); @@ -75,11 +72,11 @@ public void downloadURL(URL url, int index) { addURLToDownload(url, getPrefix(index)); } - public static List getURLsFromPage(URL url) throws IOException { + public static List getURLsFromPage(URL url) throws IOException, URISyntaxException { List urls = new ArrayList<>(); Document doc = Http.url(url).get(); for (String stringURL : getURLsFromPageStatic(doc)) { - urls.add(new URL(stringURL)); + urls.add(new URI(stringURL).toURL()); } return urls; } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java index 27015a061..72f652496 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ViewcomicRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -31,10 +32,10 @@ public String getDomain() { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - String titleText = getFirstPage().select("title").first().text(); + String titleText = getCachedFirstPage().select("title").first().text(); String title = titleText.replace("Viewcomic reading comics online for free", ""); title = title.replace("_", ""); title = title.replace("|", ""); @@ -60,12 +61,6 @@ public String getGID(URL url) throws MalformedURLException { "view-comic.com/COMIC_NAME - got " + url + " instead"); } - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document doc) { List result = new ArrayList(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java index b364a5ae2..c6394bb87 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VkRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; @@ -137,13 +139,13 @@ public URL sanitizeURL(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { if (this.url.toExternalForm().contains("/videos")) { RIP_TYPE = RipType.VIDEO; JSONObject json = getFirstPage(); List URLs = getURLsFromJSON(json); for (int index = 0; index < URLs.size(); index ++) { - downloadURL(new URL(URLs.get(index)), index); + downloadURL(new URI(URLs.get(index)).toURL(), index); } waitForThreads(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java index d0a36cdcb..a4fc08ccc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/VscoRipper.java @@ -1,230 +1,223 @@ -package com.rarchives.ripme.ripper.rippers; - -import com.rarchives.ripme.ripper.AbstractHTMLRipper; -import com.rarchives.ripme.utils.Http; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.*; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.json.JSONObject; -import org.jsoup.Jsoup; -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - -/** - * For ripping VSCO pictures. - */ -public class VscoRipper extends AbstractHTMLRipper { - - int pageNumber = 1; - JSONObject profileJSON; - - - private static final String DOMAIN = "vsco.co", - HOST = "vsco"; - - public VscoRipper(URL url) throws IOException{ - super(url); - } - - /** - * Checks to see if VscoRipper can Rip specified url. - * @param url - * @return True if can rip. - * False if cannot rip. - */ - @Override - public boolean canRip(URL url) { - if (!url.getHost().endsWith(DOMAIN)) { - return false; - } - // Ignores personalized things (e.g. login, feed) and store page - // Allows links to user profiles and links to images. - //@TODO: Add support for journals and collections. - String u = url.toExternalForm(); - return !u.contains("/store/") || - !u.contains("/feed/") || - !u.contains("/login/") || - !u.contains("/journal/") || - !u.contains("/collection/")|| - !u.contains("/images/") || - u.contains("/media/"); - - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - //no sanitization needed. - return url; - } - - /** - *

Gets the direct URL of full-sized image through the tag.

- * When expanding future functionality (e.g. support from journals), put everything into this method. - * @param page - * @return - */ - @Override - public List getURLsFromPage(Document page){ - List toRip = new ArrayList<>(); - //If user wanted to rip single image - if (url.toString().contains("/media/")){ - try { - toRip.add(vscoImageToURL(url.toExternalForm())); - } catch (IOException ex) { - LOGGER.debug("Failed to convert " + url.toString() + " to external form."); - } - - } else { - String username = getUserName(); - String userTkn = getUserTkn(username); - String siteID = getSiteID(userTkn, username); - while (true) { - profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID); - for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) { - toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url")); - } - if (pageNumber * 1000 > profileJSON.getInt("total")) { - return toRip; - } - pageNumber++; - } - - - } - - return toRip; - } - - private String getUserTkn(String username) { - String userinfoPage = "https://vsco.co/content/Static/userinfo"; - String referer = "https://vsco.co/" + username + "/gallery"; - Map cookies = new HashMap<>(); - cookies.put("vs_anonymous_id", UUID.randomUUID().toString()); - try { - Element doc = Http.url(userinfoPage).cookies(cookies).referrer(referer).ignoreContentType().get().body(); - String json = doc.text().replaceAll("define\\(", ""); - json = json.replaceAll("\\)", ""); - return new JSONObject(json).getString("tkn"); - } catch (IOException e) { - LOGGER.error("Could not get user tkn"); - return null; - } - } - - private String getUserName() { - Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()) { - String user = m.group(1); - return user; - } - return null; - } - - private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) { - String size = "1000"; - String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size; - Map cookies = new HashMap<>(); - cookies.put("vs", tkn); - try { - JSONObject j = Http.url(purl).cookies(cookies).getJSON(); - return j; - } catch (IOException e) { - LOGGER.error("Could not profile images"); - return null; - } - } - - private String getSiteID(String tkn, String username) { - Map cookies = new HashMap<>(); - cookies.put("vs", tkn); - try { - JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON(); - return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id")); - } catch (IOException e) { - LOGGER.error("Could not get site id"); - return null; - } - } - - private String vscoImageToURL(String url) throws IOException{ - Document page = Jsoup.connect(url).userAgent(USER_AGENT) - .get(); - //create Elements filled only with Elements with the "meta" tag. - Elements metaTags = page.getElementsByTag("meta"); - String result = ""; - - for(Element metaTag : metaTags){ - //find URL inside meta-tag with property of "og:image" - if (metaTag.attr("property").equals("og:image")){ - String givenURL = metaTag.attr("content"); - givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number) - - result = givenURL; - LOGGER.debug("Found image URL: " + givenURL); - break;//immediately stop after getting URL (there should only be 1 image to be downloaded) - } - } - - //Means website changed, things need to be fixed. - if (result.isEmpty()){ - LOGGER.error("Could not find image URL at: " + url); - } - - return result; - - } - - @Override - public String getHost() { - return HOST; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - - //Single Image - Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)"); - Matcher m = p.matcher(url.toExternalForm()); - - if (m.matches()){ - // Return the text contained between () in the regex - String user = m.group(1); - String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique - return user + "/" + imageNum; - } - - //Member profile (Usernames should all be different, so this should work. - p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); - m = p.matcher(url.toExternalForm()); - - if (m.matches()){ - String user = m.group(1); - return user; - } - - throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead"); - - } - - @Override - public String getDomain() { - return DOMAIN; - } - - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - - @Override - public void downloadURL(URL url, int index) { - addURLToDownload(url, getPrefix(index)); - } - -} +package com.rarchives.ripme.ripper.rippers; + +import com.rarchives.ripme.ripper.AbstractHTMLRipper; +import com.rarchives.ripme.utils.Http; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.json.JSONObject; +import org.jsoup.Jsoup; +import org.jsoup.nodes.Document; +import org.jsoup.Connection.Response; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + +/** + * For ripping VSCO pictures. + */ +public class VscoRipper extends AbstractHTMLRipper { + + int pageNumber = 1; + JSONObject profileJSON; + + + private static final String DOMAIN = "vsco.co", + HOST = "vsco"; + + public VscoRipper(URL url) throws IOException{ + super(url); + } + + /** + * Checks to see if VscoRipper can Rip specified url. + * @param url + * @return True if can rip. + * False if cannot rip. + */ + @Override + public boolean canRip(URL url) { + if (!url.getHost().endsWith(DOMAIN)) { + return false; + } + // Ignores personalized things (e.g. login, feed) and store page + // Allows links to user profiles and links to images. + //@TODO: Add support for journals and collections. + String u = url.toExternalForm(); + return !u.contains("/store/") || + !u.contains("/feed/") || + !u.contains("/login/") || + !u.contains("/journal/") || + !u.contains("/collection/")|| + !u.contains("/images/") || + u.contains("/media/"); + + } + + @Override + public URL sanitizeURL(URL url) throws MalformedURLException { + //no sanitization needed. + return url; + } + + /** + *

Gets the direct URL of full-sized image through the tag.

+ * When expanding future functionality (e.g. support from journals), put everything into this method. + * @param page + * @return + */ + @Override + public List getURLsFromPage(Document page){ + List toRip = new ArrayList<>(); + //If user wanted to rip single image + if (url.toString().contains("/media/")){ + try { + toRip.add(vscoImageToURL(url.toExternalForm())); + } catch (IOException ex) { + LOGGER.debug("Failed to convert " + url.toString() + " to external form."); + } + + } else { + String username = getUserName(); + String userTkn = getUserTkn(username); + String siteID = getSiteID(userTkn, username); + while (true) { + profileJSON = getProfileJSON(userTkn, username, Integer.toString(pageNumber), siteID); + for (int i = 0; i < profileJSON.getJSONArray("media").length(); i++) { + toRip.add("https://" + profileJSON.getJSONArray("media").getJSONObject(i).getString("responsive_url")); + } + if (pageNumber * 1000 > profileJSON.getInt("total")) { + return toRip; + } + pageNumber++; + } + + + } + + return toRip; + } + + private String getUserTkn(String username) { + String userTokenPage = "https://vsco.co/content/Static"; + Map responseCookies = new HashMap<>(); + try { + Response resp = Http.url(userTokenPage).ignoreContentType().response(); + responseCookies = resp.cookies(); + return responseCookies.get("vs"); + } catch (IOException e) { + LOGGER.error("Could not get user tkn"); + return null; + } + } + + private String getUserName() { + Pattern p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); + Matcher m = p.matcher(url.toExternalForm()); + + if (m.matches()) { + String user = m.group(1); + return user; + } + return null; + } + + private JSONObject getProfileJSON(String tkn, String username, String page, String siteId) { + String size = "1000"; + String purl = "https://vsco.co/ajxp/" + tkn + "/2.0/medias?site_id=" + siteId + "&page=" + page + "&size=" + size; + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url(purl).cookies(cookies).getJSON(); + return j; + } catch (IOException e) { + LOGGER.error("Could not profile images"); + return null; + } + } + + private String getSiteID(String tkn, String username) { + Map cookies = new HashMap<>(); + cookies.put("vs", tkn); + try { + JSONObject j = Http.url("https://vsco.co/ajxp/" + tkn + "/2.0/sites?subdomain=" + username).cookies(cookies).getJSON(); + return Integer.toString(j.getJSONArray("sites").getJSONObject(0).getInt("id")); + } catch (IOException e) { + LOGGER.error("Could not get site id"); + return null; + } + } + + private String vscoImageToURL(String url) throws IOException{ + Document page = Jsoup.connect(url).userAgent(USER_AGENT) + .get(); + //create Elements filled only with Elements with the "meta" tag. + Elements metaTags = page.getElementsByTag("meta"); + String result = ""; + + for(Element metaTag : metaTags){ + //find URL inside meta-tag with property of "og:image" + if (metaTag.attr("property").equals("og:image")){ + String givenURL = metaTag.attr("content"); + givenURL = givenURL.replaceAll("\\?h=[0-9]+", "");//replace the "?h=xxx" tag at the end of the URL (where each x is a number) + + result = givenURL; + LOGGER.debug("Found image URL: " + givenURL); + break;//immediately stop after getting URL (there should only be 1 image to be downloaded) + } + } + + //Means website changed, things need to be fixed. + if (result.isEmpty()){ + LOGGER.error("Could not find image URL at: " + url); + } + + return result; + + } + + @Override + public String getHost() { + return HOST; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + + //Single Image + Pattern p = Pattern.compile("^https?://vsco\\.co/([a-zA-Z0-9-]+)/media/([a-zA-Z0-9]+)"); + Matcher m = p.matcher(url.toExternalForm()); + + if (m.matches()){ + // Return the text contained between () in the regex + String user = m.group(1); + String imageNum = m.group(2).substring(0, 5);//first 5 characters should be enough to make each rip unique + return user + "/" + imageNum; + } + + //Member profile (Usernames should all be different, so this should work. + p = Pattern.compile("^https?://vsco.co/([a-zA-Z0-9-]+)(/gallery)?(/)?"); + m = p.matcher(url.toExternalForm()); + + if (m.matches()){ + String user = m.group(1); + return user; + } + + throw new MalformedURLException("Expected a URL to a single image or to a member profile, got " + url + " instead"); + + } + + @Override + public String getDomain() { + return DOMAIN; + } + + @Override + public void downloadURL(URL url, int index) { + addURLToDownload(url, getPrefix(index)); + } + +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java index d82f4aff4..0da345b7e 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WebtoonsRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -43,7 +44,7 @@ public boolean canRip(URL url) { @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern pat = Pattern.compile("https?://www.webtoons.com/[a-zA-Z-_]+/[a-zA-Z_-]+/([a-zA-Z0-9_-]*)/[a-zA-Z0-9_-]+/\\S*"); Matcher mat = pat.matcher(url.toExternalForm()); if (mat.matches()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java index 0589f29d2..6c6962721 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/WordpressComicRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; @@ -220,7 +221,7 @@ public List getAlbumsToQueue(Document doc) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern totempole666Pat = Pattern.compile("(?:https?://)?(?:www\\.)?totempole666.com/comic/([a-zA-Z0-9_-]*)/?$"); Matcher totempole666Mat = totempole666Pat.matcher(url.toExternalForm()); if (totempole666Mat.matches()) { @@ -421,10 +422,4 @@ public void downloadURL(URL url, int index) { } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java index 64829a0b6..0b616726c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XcartxRipper.java @@ -44,11 +44,6 @@ public String getGID(URL url) throws MalformedURLException { } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } - @Override public List getURLsFromPage(Document page) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java index 7ade1e558..2cea95a70 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XhamsterRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -43,36 +45,36 @@ public String getDomain() { } @Override - public URL sanitizeURL(URL url) throws MalformedURLException { + public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException { if (isVideoUrl(url)) { return url; } String URLToReturn = url.toExternalForm(); URLToReturn = URLToReturn.replaceAll("https?://\\w?\\w?\\.?xhamster([^<]*)\\.", "https://m.xhamster$1."); - URL san_url = new URL(URLToReturn); + URL san_url = new URI(URLToReturn).toURL(); LOGGER.info("sanitized URL is " + san_url.toExternalForm()); return san_url; } @Override public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|one|desi)/photos/gallery/.*?(\\d+)$"); + Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|desi)/photos/gallery/.*?(\\d+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(4); } - p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|one|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return "user_" + m.group(1); } - p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|one|desi)/(movies|videos)/(.*$)"); + p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return m.group(4); } - throw new MalformedURLException( + throw new MalformedURLException( "Expected xhamster.com gallery formats: " + "xhamster.com/photos/gallery/xxxxx-#####" + " Got: " + url); @@ -84,6 +86,9 @@ public List getAlbumsToQueue(Document doc) { LOGGER.info("getting albums"); for (Element elem : doc.select("div.item-container > a.item")) { urlsToAddToQueue.add(elem.attr("href")); + if (isStopped() || isThisATest()) { + break; + } } LOGGER.info(doc.html()); return urlsToAddToQueue; @@ -96,33 +101,26 @@ public boolean hasQueueSupport() { @Override public boolean pageContainsAlbums(URL url) { - Pattern p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|one|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + Pattern p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); Matcher m = p.matcher(url.toExternalForm()); LOGGER.info("Checking if page has albums"); LOGGER.info(m.matches()); return m.matches(); } - - @Override - public Document getFirstPage() throws IOException { - // "url" is an instance field of the superclass - return Http.url(url).get(); - } - @Override public boolean canRip(URL url) { - Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|one|desi)/photos/gallery/.*?(\\d+)$"); + Pattern p = Pattern.compile("^https?://([\\w\\w]*\\.)?xhamster([^<]*)\\.(com|desi)/photos/gallery/.*?(\\d+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; } - p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|one|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); + p = Pattern.compile("^https?://[\\w\\w.]*xhamster([^<]*)\\.(com|desi)/users/([a-zA-Z0-9_-]+)/(photos|videos)(/\\d+)?"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; } - p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|one|desi)/(movies|videos)/(.*$)"); + p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); m = p.matcher(url.toExternalForm()); if (m.matches()) { return true; @@ -131,15 +129,15 @@ public boolean canRip(URL url) { } private boolean isVideoUrl(URL url) { - Pattern p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|one|desi)/(movies|videos)/(.*$)"); + Pattern p = Pattern.compile("^https?://.*xhamster([^<]*)\\.(com|desi)/(movies|videos)/(.*$)"); Matcher m = p.matcher(url.toExternalForm()); return m.matches(); } @Override public Document getNextPage(Document doc) throws IOException { - if (doc.select("a[rel=next]").first() != null) { - String nextPageUrl = doc.select("a[rel=next]").first().attr("href"); + if (doc.select("a.prev-next-list-link").first() != null) { + String nextPageUrl = doc.select("a.prev-next-list-link--next").first().attr("href"); if (nextPageUrl.startsWith("http")) { nextPageUrl = nextPageUrl.replaceAll("https?://\\w?\\w?\\.?xhamster([^<]*)\\.", "https://m.xhamster$1."); return Http.url(nextPageUrl).get(); @@ -149,29 +147,56 @@ public Document getNextPage(Document doc) throws IOException { } + @Override + public Document getFirstPage() throws IOException, URISyntaxException { + return super.getFirstPage(); + } + @Override public List getURLsFromPage(Document doc) { LOGGER.debug("Checking for urls"); List result = new ArrayList<>(); if (!isVideoUrl(url)) { - for (Element page : doc.select("div.picture_view > div.pictures_block > div.items > div.item-container > a.item")) { - // Make sure we don't waste time running the loop if the ripper has been stopped - if (isStopped()) { - break; - } - String pageWithImageUrl = page.attr("href"); - try { - // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to - // the page chamster.com but displays the mobile site from m.xhamster.com - pageWithImageUrl = pageWithImageUrl.replaceAll("://xhamster([^<]*)\\.", "://m.xhamster$1."); - String image = Http.url(new URL(pageWithImageUrl)).get().select("a > img#photoCurr").attr("src"); - downloadFile(image); - } catch (IOException e) { - LOGGER.error("Was unable to load page " + pageWithImageUrl); + if (!doc.select("div.picture_view > div.pictures_block > div.items > div.item-container > a.item").isEmpty()) { + // Old HTML structure is still present at some places + for (Element page : doc.select(".clearfix > div > a.slided")) { + // Make sure we don't waste time running the loop if the ripper has been stopped + if (isStopped()) { + break; + } + String pageWithImageUrl = page.attr("href"); + try { + // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to + // the page chamster.com but displays the mobile site from m.xhamster.com + pageWithImageUrl = pageWithImageUrl.replaceAll("://xhamster([^<]*)\\.", "://m.xhamster$1."); + String image = Http.url(new URI(pageWithImageUrl).toURL()).get().select("a > img#photoCurr").attr("src"); + result.add(image); + downloadFile(image); + } catch (IOException | URISyntaxException e) { + LOGGER.error("Was unable to load page " + pageWithImageUrl); } - } + if (isStopped() || isThisATest()) { + break; + } + } + } else { + // New HTML structure + for (Element page : doc.select("div#photo-slider > div#photo_slider > a")) { + // Make sure we don't waste time running the loop if the ripper has been stopped + if (isStopped()) { + break; + } + String image = page.attr("href"); + // This works around some redirect fuckery xhamster likes to do where visiting m.xhamster.com sends to + // the page chamster.com but displays the mobile site from m.xhamster.com + image = image.replaceAll("://xhamster([^<]*)\\.", "://m.xhamster$1."); + result.add(image); + downloadFile(image); + } + } } else { String imgUrl = doc.select("div.player-container > a").attr("href"); + result.add(imgUrl); downloadFile(imgUrl); } return result; @@ -184,18 +209,18 @@ public void downloadURL(URL url, int index) { private void downloadFile(String url) { try { - addURLToDownload(new URL(url), getPrefix(index)); + addURLToDownload(new URI(url).toURL(), getPrefix(index)); index = index + 1; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("The url \"" + url + "\" is malformed"); } } - + @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title and username as GID - Document doc = getFirstPage(); + Document doc = getCachedFirstPage(); Element user = doc.select("a.author").first(); String username = user.text(); String path = url.getPath(); @@ -209,4 +234,4 @@ public String getAlbumTitle(URL url) throws MalformedURLException { } return super.getAlbumTitle(url); } -} +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java index 0aaacfc4b..2e95c04a9 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XlecxRipper.java @@ -1,36 +1,36 @@ -package com.rarchives.ripme.ripper.rippers; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class XlecxRipper extends XcartxRipper { - - private Pattern p = Pattern.compile("^https?://xlecx.org/([a-zA-Z0-9_\\-]+).html"); - - public XlecxRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return "xlecx"; - } - - @Override - public String getDomain() { - return "xlecx.org"; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(1); - } - throw new MalformedURLException("Expected URL format: http://xlecx.org/comic, got: " + url); - - } -} +package com.rarchives.ripme.ripper.rippers; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class XlecxRipper extends XcartxRipper { + + private Pattern p = Pattern.compile("^https?://xlecx.org/([a-zA-Z0-9_\\-]+).html"); + + public XlecxRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return "xlecx"; + } + + @Override + public String getDomain() { + return "xlecx.org"; + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(1); + } + throw new MalformedURLException("Expected URL format: http://xlecx.org/comic, got: " + url); + + } +} diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java index 0fdef8682..ea19d484b 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/XvideosRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -24,11 +25,6 @@ public XvideosRipper(URL url) throws IOException { super(url); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public String getHost() { return HOST; @@ -109,7 +105,7 @@ public void downloadURL(URL url, int index) { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { Pattern p = Pattern.compile("^https?://[wm.]*xvideos\\.com/profiles/([a-zA-Z0-9_-]+)/photos/(\\d+)/([a-zA-Z0-9_-]+)$"); Matcher m = p.matcher(url.toExternalForm()); if (m.matches()) { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java index e99ffef5e..b3e5f4f0f 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/YoupornRipper.java @@ -40,11 +40,6 @@ public boolean canRip(URL url) { return m.matches(); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(this.url).get(); - } - @Override public List getURLsFromPage(Document doc) { List results = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java index 97365aa80..1fe6513f6 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/YuvutuRipper.java @@ -50,10 +50,6 @@ public String getGID(URL url) throws MalformedURLException { "yuvutu.com/modules.php?name=YuGallery&action=view&set_id=albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - } @Override public List getURLsFromPage(Document doc) { List imageURLs = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java index 35733325e..043d1835d 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/ZizkiRipper.java @@ -2,6 +2,7 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -19,7 +20,6 @@ public class ZizkiRipper extends AbstractHTMLRipper { - private Document albumDoc = null; private Map cookies = new HashMap<>(); public ZizkiRipper(URL url) throws IOException { @@ -46,13 +46,13 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public String getAlbumTitle(URL url) throws MalformedURLException { + public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException { try { // Attempt to use album title as GID - Element titleElement = getFirstPage().select("h1.title").first(); + Element titleElement = getCachedFirstPage().select("h1.title").first(); String title = titleElement.text(); - Element authorSpan = getFirstPage().select("span[class=creator]").first(); + Element authorSpan = getCachedFirstPage().select("span[class=creator]").first(); String author = authorSpan.select("a").first().text(); LOGGER.debug("Author: " + author); return getHost() + "_" + author + "_" + title.trim(); @@ -65,12 +65,9 @@ public String getAlbumTitle(URL url) throws MalformedURLException { @Override public Document getFirstPage() throws IOException { - if (albumDoc == null) { - Response resp = Http.url(url).response(); - cookies.putAll(resp.cookies()); - albumDoc = resp.parse(); - } - return albumDoc; + Response resp = Http.url(url).response(); + cookies.putAll(resp.cookies()); + return resp.parse(); } @Override @@ -87,14 +84,12 @@ public List getURLsFromPage(Document page) { if (thumb.hasAttr("typeof")) { img_type = thumb.attr("typeof"); if (img_type.equals("foaf:Image")) { - LOGGER.debug("Found image with " + img_type); if (thumb.parent() != null && - thumb.parent().parent() != null && - thumb.parent().parent().attr("class") != null && - thumb.parent().parent().attr("class").equals("aimage-center") + thumb.parent().attr("class") != null && + thumb.parent().attr("class").contains("colorbox") ) { - src = thumb.attr("src"); + src = thumb.parent().attr("href"); LOGGER.debug("Found url with " + src); if (!src.contains("zizki.com")) { } else { diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java b/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java index 2c82d849c..d10e12053 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/tamindirmp3.java @@ -40,12 +40,6 @@ public String getGID(URL url) throws MalformedURLException { "tamindir.com/files/albumid - got " + url + "instead"); } - @Override - public Document getFirstPage() throws IOException { - return Http.url(url).get(); - - } - @Override public List getURLsFromPage(Document doc) { List music = new ArrayList<>(); diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java index 16526945a..a9c39a9c9 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/CliphunterRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -54,7 +56,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); String html = Http.url(url).get().html(); String jsonString = html.substring(html.indexOf("var flashVars = {d: '") + 21); @@ -71,7 +73,7 @@ public void rip() throws IOException { vidURL += c; } } - addURLToDownload(new URL(vidURL), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidURL).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java index 678435afc..5a3dcebbe 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/PornhubRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -56,7 +58,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { String vidUrl = ""; LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); @@ -146,7 +148,7 @@ public void rip() throws IOException { if (vidUrl.equals("")) { throw new IOException("Unable to find encrypted video URL at " + this.url); } - addURLToDownload(new URL(vidUrl), HOST + "_" + bestQuality + "p_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + bestQuality + "p_" + getGID(this.url)); waitForThreads(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java index 7c951b23a..8708e5523 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/StickyXXXRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -32,11 +34,6 @@ public boolean canRip(URL url) { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^https?://.*stickyxxx\\.com(/)(.*)/$"); @@ -52,7 +49,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Elements videos = doc.select(".wp-video > video > source"); @@ -60,7 +57,7 @@ public void rip() throws IOException { throw new IOException("Could not find Embed code at " + url); } String vidUrl = videos.attr("src"); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java index d977708a7..bd4ee5563 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/TwitchVideoRipper.java @@ -1,80 +1,77 @@ -package com.rarchives.ripme.ripper.rippers.video; - -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.jsoup.nodes.Document; -import org.jsoup.nodes.Element; -import org.jsoup.select.Elements; - - -import com.rarchives.ripme.ripper.VideoRipper; -import com.rarchives.ripme.utils.Http; - -public class TwitchVideoRipper extends VideoRipper { - - private static final String HOST = "twitch"; - - public TwitchVideoRipper(URL url) throws IOException { - super(url); - } - - @Override - public String getHost() { - return HOST; - } - - @Override - public boolean canRip(URL url) { - Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$"); - Matcher m = p.matcher(url.toExternalForm()); - return m.matches(); - } - - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - - @Override - public String getGID(URL url) throws MalformedURLException { - Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$"); - Matcher m = p.matcher(url.toExternalForm()); - if (m.matches()) { - return m.group(m.groupCount()); - } - - throw new MalformedURLException( - "Expected Twitch.tv format:" - + "https://clips.twitch.tv/####" - + " Got: " + url); - } - - @Override - public void rip() throws IOException { - LOGGER.info("Retrieving " + this.url); - Document doc = Http.url(url).get(); - - //Get user friendly filename from page title - String title = doc.title(); - - Elements script = doc.select("script"); - if (script.isEmpty()) { - throw new IOException("Could not find script code at " + url); - } - //Regex assumes highest quality source is listed first - Pattern p = Pattern.compile("\"source\":\"(.*?)\""); - - for (Element element : script) { - Matcher m = p.matcher(element.data()); - if (m.find()){ - String vidUrl = m.group(1); - addURLToDownload(new URL(vidUrl), HOST + "_" + title); - } - } - waitForThreads(); - } +package com.rarchives.ripme.ripper.rippers.video; + +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.jsoup.nodes.Document; +import org.jsoup.nodes.Element; +import org.jsoup.select.Elements; + + +import com.rarchives.ripme.ripper.VideoRipper; +import com.rarchives.ripme.utils.Http; + +public class TwitchVideoRipper extends VideoRipper { + + private static final String HOST = "twitch"; + + public TwitchVideoRipper(URL url) throws IOException { + super(url); + } + + @Override + public String getHost() { + return HOST; + } + + @Override + public boolean canRip(URL url) { + Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/.*$"); + Matcher m = p.matcher(url.toExternalForm()); + return m.matches(); + } + + @Override + public String getGID(URL url) throws MalformedURLException { + Pattern p = Pattern.compile("^https://clips\\.twitch\\.tv/(.*)$"); + Matcher m = p.matcher(url.toExternalForm()); + if (m.matches()) { + return m.group(m.groupCount()); + } + + throw new MalformedURLException( + "Expected Twitch.tv format:" + + "https://clips.twitch.tv/####" + + " Got: " + url); + } + + @Override + public void rip() throws IOException, URISyntaxException { + LOGGER.info("Retrieving " + this.url); + Document doc = Http.url(url).get(); + + //Get user friendly filename from page title + String title = doc.title(); + + Elements script = doc.select("script"); + if (script.isEmpty()) { + throw new IOException("Could not find script code at " + url); + } + //Regex assumes highest quality source is listed first + Pattern p = Pattern.compile("\"source\":\"(.*?)\""); + + for (Element element : script) { + Matcher m = p.matcher(element.data()); + if (m.find()){ + String vidUrl = m.group(1); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + title); + } + } + waitForThreads(); + } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java index 078b32a5e..279e1d3a8 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/ViddmeRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -52,7 +54,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info(" Retrieving " + this.url.toExternalForm()); Document doc = Http.url(this.url).get(); Elements videos = doc.select("meta[name=twitter:player:stream]"); @@ -61,7 +63,7 @@ public void rip() throws IOException { } String vidUrl = videos.first().attr("content"); vidUrl = vidUrl.replaceAll("&", "&"); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java index 052b2cbe1..3fbb6375c 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VidearnRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; import java.util.regex.Matcher; @@ -33,11 +35,6 @@ public boolean canRip(URL url) { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^https?://[wm.]*videarn\\.com/[a-zA-Z0-9\\-]+/([0-9]+).*$"); @@ -53,15 +50,15 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); List mp4s = Utils.between(doc.html(), "file:\"", "\""); if (mp4s.isEmpty()) { throw new IOException("Could not find files at " + url); } - String vidUrl = mp4s.get(0); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + String vidUrl = mp4s.getFirst(); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } } \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java index 705287271..84206abbc 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/VkRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -51,10 +53,10 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info(" Retrieving " + this.url); String videoURL = getVideoURLAtPage(this.url.toExternalForm()); - addURLToDownload(new URL(videoURL), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(videoURL).toURL(), HOST + "_" + getGID(this.url)); waitForThreads(); } diff --git a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java index 2891efb5b..3fb55b6f0 100644 --- a/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java +++ b/src/main/java/com/rarchives/ripme/ripper/rippers/video/YuvutuRipper.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -34,11 +36,6 @@ public boolean canRip(URL url) { return m.matches(); } - @Override - public URL sanitizeURL(URL url) throws MalformedURLException { - return url; - } - @Override public String getGID(URL url) throws MalformedURLException { Pattern p = Pattern.compile("^http://www\\.yuvutu\\.com/video/[0-9]+/(.*)$"); @@ -54,7 +51,7 @@ public String getGID(URL url) throws MalformedURLException { } @Override - public void rip() throws IOException { + public void rip() throws IOException, URISyntaxException { LOGGER.info("Retrieving " + this.url); Document doc = Http.url(url).get(); Element iframe = doc.select("iframe").first(); @@ -74,7 +71,7 @@ public void rip() throws IOException { Matcher m = p.matcher(element.data()); if (m.find()){ String vidUrl = m.group(1); - addURLToDownload(new URL(vidUrl), HOST + "_" + getGID(this.url)); + addURLToDownload(new URI(vidUrl).toURL(), HOST + "_" + getGID(this.url)); } } waitForThreads(); diff --git a/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java b/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java index 24c46cd49..55b68d652 100644 --- a/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java +++ b/src/main/java/com/rarchives/ripme/ui/ClipboardUtils.java @@ -1,8 +1,8 @@ package com.rarchives.ripme.ui; -import java.awt.HeadlessException; import java.awt.Toolkit; import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.Transferable; import java.awt.datatransfer.UnsupportedFlavorException; import java.io.IOException; import java.util.HashSet; @@ -30,16 +30,13 @@ public static boolean getClipboardAutoRip() { } public static String getClipboardString() { - try { - return (String) Toolkit - .getDefaultToolkit() - .getSystemClipboard() - .getData(DataFlavor.stringFlavor); - } catch (IllegalStateException e) { - e.printStackTrace(); - logger.error("Caught and recovered from IllegalStateException: " + e.getMessage()); - } catch (HeadlessException | IOException | UnsupportedFlavorException e) { - e.printStackTrace(); + Transferable contents = Toolkit.getDefaultToolkit().getSystemClipboard().getContents(null); + if (contents.isDataFlavorSupported(DataFlavor.stringFlavor)) { + try { + return (String) contents.getTransferData(DataFlavor.stringFlavor); + } catch (UnsupportedFlavorException | IOException e) { + logger.debug("ignore this one" + e.getMessage()); + } } return null; } @@ -47,7 +44,7 @@ public static String getClipboardString() { class AutoripThread extends Thread { volatile boolean isRunning = false; - private Set rippedURLs = new HashSet<>(); + private final Set rippedURLs = new HashSet<>(); public void run() { isRunning = true; diff --git a/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java index 943484112..dac3d0a43 100644 --- a/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/ContextMenuMouseListener.java @@ -1,15 +1,16 @@ package com.rarchives.ripme.ui; +import com.rarchives.ripme.uiUtils.ContextActionProtections; + import java.awt.Toolkit; +import java.awt.datatransfer.Clipboard; import java.awt.datatransfer.DataFlavor; -import java.awt.event.ActionEvent; -import java.awt.event.InputEvent; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; - -import javax.swing.AbstractAction; -import javax.swing.Action; -import javax.swing.JPopupMenu; +import java.awt.datatransfer.Transferable; +import java.awt.datatransfer.UnsupportedFlavorException; +import java.awt.event.*; +import java.io.IOException; + +import javax.swing.*; import javax.swing.text.JTextComponent; /** @@ -20,27 +21,72 @@ public class ContextMenuMouseListener extends MouseAdapter { private JPopupMenu popup = new JPopupMenu(); + public String getDebugSavedString() { + return debugSavedString; + } + + private String debugSavedString; + + public Action getCutAction() { + return cutAction; + } + private Action cutAction; private Action copyAction; private Action pasteAction; + + public Action getCopyAction() { + return copyAction; + } + + public Action getPasteAction() { + return pasteAction; + } + + public Action getUndoAction() { + return undoAction; + } + + public Action getSelectAllAction() { + return selectAllAction; + } + private Action undoAction; private Action selectAllAction; + public JTextComponent getTextComponent() { + return textComponent; + } + private JTextComponent textComponent; + + public String getSavedString() { + return savedString; + } + private String savedString = ""; private Actions lastActionSelected; private enum Actions { UNDO, CUT, COPY, PASTE, SELECT_ALL } + @SuppressWarnings("serial") - public ContextMenuMouseListener() { + public ContextMenuMouseListener(JTextField ripTextfield) { + this.textComponent = ripTextfield; + + //Add protection for cntl+v + + generate_popup(); + } + + private void generate_popup() { undoAction = new AbstractAction("Undo") { @Override public void actionPerformed(ActionEvent ae) { textComponent.setText(""); textComponent.replaceSelection(savedString); - + debugSavedString = textComponent.getText(); lastActionSelected = Actions.UNDO; } }; @@ -54,6 +100,7 @@ public void actionPerformed(ActionEvent ae) { public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.CUT; savedString = textComponent.getText(); + debugSavedString = savedString; textComponent.cut(); } }; @@ -65,6 +112,7 @@ public void actionPerformed(ActionEvent ae) { @Override public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.COPY; + debugSavedString = textComponent.getText(); textComponent.copy(); } }; @@ -77,7 +125,8 @@ public void actionPerformed(ActionEvent ae) { public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.PASTE; savedString = textComponent.getText(); - textComponent.paste(); + debugSavedString = savedString; + ContextActionProtections.pasteFromClipboard(textComponent); } }; @@ -89,6 +138,7 @@ public void actionPerformed(ActionEvent ae) { @Override public void actionPerformed(ActionEvent ae) { lastActionSelected = Actions.SELECT_ALL; + debugSavedString = textComponent.getText(); textComponent.selectAll(); } }; @@ -96,9 +146,30 @@ public void actionPerformed(ActionEvent ae) { popup.add(selectAllAction); } + + @Override + public void mousePressed(MouseEvent e) { + showPopup(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + showPopup(e); + } + + private void showPopup(MouseEvent e) { + if (e.isPopupTrigger()) { + if(this.popup == null) { + popup = new JPopupMenu(); + generate_popup(); + } + popup.show(e.getComponent(), e.getX(), e.getY()); + } + } + @Override public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JTextComponent)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/History.java b/src/main/java/com/rarchives/ripme/ui/History.java index f3f9451f8..190eeeb8e 100644 --- a/src/main/java/com/rarchives/ripme/ui/History.java +++ b/src/main/java/com/rarchives/ripme/ui/History.java @@ -100,7 +100,7 @@ private void fromJSON(JSONArray jsonArray) { public void fromFile(String filename) throws IOException { try (InputStream is = new FileInputStream(filename)) { - String jsonString = IOUtils.toString(is); + String jsonString = IOUtils.toString(is, "UTF-8"); JSONArray jsonArray = new JSONArray(jsonString); fromJSON(jsonArray); } catch (JSONException e) { @@ -134,7 +134,7 @@ public boolean isEmpty() { public void toFile(String filename) throws IOException { try (OutputStream os = new FileOutputStream(filename)) { - IOUtils.write(toJSON().toString(2), os); + IOUtils.write(toJSON().toString(2), os, "UTF-8"); } } } diff --git a/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java index 9044531f7..8a69477cc 100644 --- a/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/HistoryMenuMouseListener.java @@ -62,8 +62,17 @@ public void actionPerformed(ActionEvent ae) { } @Override - public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + public void mousePressed(MouseEvent e) { + checkPopupTrigger(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + checkPopupTrigger(e); + } + + private void checkPopupTrigger(MouseEvent e) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JTable)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/MainWindow.java b/src/main/java/com/rarchives/ripme/ui/MainWindow.java index 48e8d8365..8b547d7dc 100644 --- a/src/main/java/com/rarchives/ripme/ui/MainWindow.java +++ b/src/main/java/com/rarchives/ripme/ui/MainWindow.java @@ -1,75 +1,56 @@ package com.rarchives.ripme.ui; +import com.rarchives.ripme.ripper.AbstractRipper; +import com.rarchives.ripme.uiUtils.ContextActionProtections; +import com.rarchives.ripme.utils.RipUtils; +import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; + +import javax.imageio.ImageIO; +import javax.swing.*; +import javax.swing.border.EmptyBorder; +import javax.swing.event.DocumentEvent; +import javax.swing.event.DocumentListener; +import javax.swing.event.ListDataEvent; +import javax.swing.event.ListDataListener; +import javax.swing.table.AbstractTableModel; +import javax.swing.text.*; import java.awt.*; import java.awt.TrayIcon.MessageType; -import java.awt.event.ActionEvent; -import java.awt.event.ActionListener; -import java.awt.event.MouseAdapter; -import java.awt.event.MouseEvent; -import java.awt.event.WindowAdapter; -import java.awt.event.WindowEvent; -import java.io.*; +import java.awt.event.*; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; import java.net.MalformedURLException; import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.*; +import java.util.Collections; +import java.util.Date; import java.util.List; - -import javax.imageio.ImageIO; -import javax.swing.DefaultListModel; -import javax.swing.ImageIcon; -import javax.swing.JButton; -import javax.swing.JCheckBox; -import javax.swing.JComboBox; -import javax.swing.JFileChooser; -import javax.swing.JFrame; -import javax.swing.JLabel; -import javax.swing.JList; -import javax.swing.JOptionPane; -import javax.swing.JPanel; -import javax.swing.JProgressBar; -import javax.swing.JScrollPane; -import javax.swing.JTable; -import javax.swing.JTextField; -import javax.swing.JTextPane; -import javax.swing.ListSelectionModel; -import javax.swing.SwingUtilities; -import javax.swing.UIManager; -import javax.swing.border.EmptyBorder; -import javax.swing.event.DocumentEvent; -import javax.swing.event.DocumentListener; -import javax.swing.event.ListDataEvent; -import javax.swing.event.ListDataListener; -import javax.swing.table.AbstractTableModel; -import javax.swing.text.BadLocationException; -import javax.swing.text.SimpleAttributeSet; -import javax.swing.text.StyleConstants; -import javax.swing.text.StyledDocument; - -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.FileAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; - -import com.rarchives.ripme.ripper.AbstractRipper; -import com.rarchives.ripme.utils.RipUtils; -import com.rarchives.ripme.utils.Utils; - -import javax.swing.UnsupportedLookAndFeelException; +import java.util.stream.Stream; /** * Everything UI-related starts and ends here. */ public final class MainWindow implements Runnable, RipStatusHandler { - private static final Logger LOGGER = Logger.getLogger(MainWindow.class); + private static final Logger LOGGER = LogManager.getLogger(MainWindow.class); private boolean isRipping = false; // Flag to indicate if we're ripping something private static JFrame mainFrame; + private static JTextField ripTextfield; private static JButton ripButton, stopButton; @@ -98,7 +79,6 @@ public final class MainWindow implements Runnable, RipStatusHandler { public static JButton optionQueue; private static JPanel queuePanel; private static DefaultListModel queueListModel; - private static QueueMenuMouseListener queueMenuMouseListener; // Configuration private static JButton optionConfiguration; @@ -111,6 +91,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { private static JLabel configSaveDirLabel; private static JButton configSaveDirButton; private static JTextField configRetriesText; + private JTextField configRetrySleepText; private static JCheckBox configAutoupdateCheckbox; private static JComboBox configLogLevelCombobox; private static JCheckBox configURLHistoryCheckbox; @@ -128,6 +109,7 @@ public final class MainWindow implements Runnable, RipStatusHandler { private static JLabel configThreadsLabel; private static JLabel configTimeoutLabel; private static JLabel configRetriesLabel; + private static JLabel configRetrySleepLabel; // This doesn't really belong here but I have no idea where else to put it private static JButton configUrlFileChooserButton; @@ -144,7 +126,7 @@ private void updateQueue(DefaultListModel model) { model = queueListModel; if (model.size() > 0) { - Utils.setConfigList("queue", (Enumeration) model.elements()); + Utils.setConfigList("queue", model.elements()); Utils.saveConfig(); } @@ -175,7 +157,7 @@ public static void addUrlToQueue(String url) { queueListModel.addElement(url); } - public MainWindow() { + public MainWindow() throws IOException { mainFrame = new JFrame("RipMe v" + UpdateUtils.getThisJarVersion()); mainFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); mainFrame.setLayout(new GridBagLayout()); @@ -271,6 +253,7 @@ private void createUI(Container pane) { try { setupTrayIcon(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } EmptyBorder emptyBorder = new EmptyBorder(5, 5, 5, 5); @@ -292,7 +275,47 @@ private void createUI(Container pane) { } ripTextfield = new JTextField("", 20); - ripTextfield.addMouseListener(new ContextMenuMouseListener()); + ripTextfield.addMouseListener(new ContextMenuMouseListener(ripTextfield)); + + //Add keyboard protection of cntl + v for pasting. + ripTextfield.addKeyListener(new KeyAdapter() { + @Override + public void keyTyped(KeyEvent e) { + if (e.getKeyChar() == 22) { // ASCII code for Ctrl+V + ContextActionProtections.pasteFromClipboard(ripTextfield); + } + } + }); + + /* + Alternatively, just set this, and use + ((AbstractDocument) ripTextfield.getDocument()).setDocumentFilter(new LengthLimitDocumentFilter(256)); + private static class LengthLimitDocumentFilter extends DocumentFilter { + private final int maxLength; + + public LengthLimitDocumentFilter(int maxLength) { + this.maxLength = maxLength; + } + + @Override + public void insertString(FilterBypass fb, int offset, String string, AttributeSet attr) throws BadLocationException { + // if ((fb.getDocument().getLength() + string.length()) <= maxLength) { + super.insertString(fb, offset, string.substring(0, maxLength), attr); + // } + } + + @Override + public void replace(FilterBypass fb, int offset, int length, String text, AttributeSet attrs) throws BadLocationException { + int currentLength = fb.getDocument().getLength(); + int newLength = currentLength - length + text.length(); + + // if (newLength <= maxLength) { + super.replace(fb, offset, length, text.substring(0, maxLength), attrs); + // } + } + } + */ + ImageIcon ripIcon = new ImageIcon(mainIcon); ripButton = new JButton("Rip", ripIcon); stopButton = new JButton("Stop"); @@ -360,6 +383,7 @@ private void createUI(Container pane) { icon = ImageIO.read(getClass().getClassLoader().getResource("gear.png")); optionConfiguration.setIcon(new ImageIcon(icon)); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } gbc.gridx = 0; optionsPanel.add(optionLog, gbc); @@ -480,9 +504,10 @@ public void setValueAt(Object value, int row, int col) { queuePanel.setBorder(emptyBorder); queuePanel.setVisible(false); queuePanel.setPreferredSize(new Dimension(300, 250)); - queueListModel = new DefaultListModel(); + queueListModel = new DefaultListModel<>(); JList queueList = new JList(queueListModel); queueList.setSelectionMode(ListSelectionModel.MULTIPLE_INTERVAL_SELECTION); + QueueMenuMouseListener queueMenuMouseListener; queueList.addMouseListener( queueMenuMouseListener = new QueueMenuMouseListener(d -> updateQueue(queueListModel))); JScrollPane queueListScroll = new JScrollPane(queueList, JScrollPane.VERTICAL_SCROLLBAR_AS_NEEDED, @@ -510,12 +535,15 @@ public void setValueAt(Object value, int row, int col) { configUpdateButton = new JButton(Utils.getLocalizedString("check.for.updates")); configUpdateLabel = new JLabel( Utils.getLocalizedString("current.version") + ": " + UpdateUtils.getThisJarVersion(), JLabel.RIGHT); - configThreadsLabel = new JLabel(Utils.getLocalizedString("max.download.threads") + ":", JLabel.RIGHT); + configThreadsLabel = new JLabel(Utils.getLocalizedString("max.download.threads"), JLabel.RIGHT); configTimeoutLabel = new JLabel(Utils.getLocalizedString("timeout.mill"), JLabel.RIGHT); configRetriesLabel = new JLabel(Utils.getLocalizedString("retry.download.count"), JLabel.RIGHT); - configThreadsText = new JTextField(Integer.toString(Utils.getConfigInteger("threads.size", 3))); - configTimeoutText = new JTextField(Integer.toString(Utils.getConfigInteger("download.timeout", 60000))); - configRetriesText = new JTextField(Integer.toString(Utils.getConfigInteger("download.retries", 3))); + configRetrySleepLabel = new JLabel(Utils.getLocalizedString("retry.sleep.mill"), JLabel.RIGHT); + configThreadsText = configField("threads.size", 3); + configTimeoutText = configField("download.timeout", 60000); + configRetriesText = configField("download.retries", 3); + configRetrySleepText = configField("download.retry.sleep", 5000); + configOverwriteCheckbox = addNewCheckbox(Utils.getLocalizedString("overwrite.existing.files"), "file.overwrite", false); configAutoupdateCheckbox = addNewCheckbox(Utils.getLocalizedString("auto.update"), "auto.update", true); @@ -552,24 +580,27 @@ public void setValueAt(Object value, int row, int col) { configSaveDirLabel.setForeground(Color.BLUE); configSaveDirLabel.setCursor(new Cursor(Cursor.HAND_CURSOR)); } catch (Exception e) { + LOGGER.error(e); } configSaveDirLabel.setToolTipText(configSaveDirLabel.getText()); configSaveDirLabel.setHorizontalAlignment(JLabel.RIGHT); configSaveDirButton = new JButton(Utils.getLocalizedString("select.save.dir") + "..."); - addItemToConfigGridBagConstraints(gbc, 0, configUpdateLabel, configUpdateButton); - addItemToConfigGridBagConstraints(gbc, 1, configAutoupdateCheckbox, configLogLevelCombobox); - addItemToConfigGridBagConstraints(gbc, 2, configThreadsLabel, configThreadsText); - addItemToConfigGridBagConstraints(gbc, 3, configTimeoutLabel, configTimeoutText); - addItemToConfigGridBagConstraints(gbc, 4, configRetriesLabel, configRetriesText); - addItemToConfigGridBagConstraints(gbc, 5, configOverwriteCheckbox, configSaveOrderCheckbox); - addItemToConfigGridBagConstraints(gbc, 6, configPlaySound, configSaveLogs); - addItemToConfigGridBagConstraints(gbc, 7, configShowPopup, configSaveURLsOnly); - addItemToConfigGridBagConstraints(gbc, 8, configClipboardAutorip, configSaveAlbumTitles); - addItemToConfigGridBagConstraints(gbc, 9, configSaveDescriptions, configPreferMp4); - addItemToConfigGridBagConstraints(gbc, 10, configWindowPosition, configURLHistoryCheckbox); - addItemToConfigGridBagConstraints(gbc, 11, configSelectLangComboBox, configUrlFileChooserButton); - addItemToConfigGridBagConstraints(gbc, 12, configSaveDirLabel, configSaveDirButton); + var idx = 0; + addItemToConfigGridBagConstraints(gbc, idx++, configUpdateLabel, configUpdateButton); + addItemToConfigGridBagConstraints(gbc, idx++, configAutoupdateCheckbox, configLogLevelCombobox); + addItemToConfigGridBagConstraints(gbc, idx++, configThreadsLabel, configThreadsText); + addItemToConfigGridBagConstraints(gbc, idx++, configTimeoutLabel, configTimeoutText); + addItemToConfigGridBagConstraints(gbc, idx++, configRetriesLabel, configRetriesText); + addItemToConfigGridBagConstraints(gbc, idx++, configRetrySleepLabel, configRetrySleepText); + addItemToConfigGridBagConstraints(gbc, idx++, configOverwriteCheckbox, configSaveOrderCheckbox); + addItemToConfigGridBagConstraints(gbc, idx++, configPlaySound, configSaveLogs); + addItemToConfigGridBagConstraints(gbc, idx++, configShowPopup, configSaveURLsOnly); + addItemToConfigGridBagConstraints(gbc, idx++, configClipboardAutorip, configSaveAlbumTitles); + addItemToConfigGridBagConstraints(gbc, idx++, configSaveDescriptions, configPreferMp4); + addItemToConfigGridBagConstraints(gbc, idx++, configWindowPosition, configURLHistoryCheckbox); + addItemToConfigGridBagConstraints(gbc, idx++, configSelectLangComboBox, configUrlFileChooserButton); + addItemToConfigGridBagConstraints(gbc, idx++, configSaveDirLabel, configSaveDirButton); emptyPanel = new JPanel(); emptyPanel.setPreferredSize(new Dimension(0, 0)); @@ -600,6 +631,40 @@ public void setValueAt(Object value, int row, int col) { gbc.fill = GridBagConstraints.HORIZONTAL; } + private JTextField configField(String key, int defaultValue) { + final var field = new JTextField(Integer.toString(Utils.getConfigInteger(key, defaultValue))); + field.getDocument().addDocumentListener(new DocumentListener() { + + @Override + public void insertUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + @Override + public void removeUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + @Override + public void changedUpdate(DocumentEvent e) { + checkAndUpdate(); + } + + private void checkAndUpdate() { + final var txt = field.getText(); + try { + final var newValue = Integer.parseInt(txt); + if (newValue>0) { + Utils.setConfigInteger(key, newValue); + } + } catch (final Exception e) { + LOGGER.warn(e.getMessage()); + } + } + }); + return field; + } + private void addItemToConfigGridBagConstraints(GridBagConstraints gbc, int gbcYValue, JLabel thing1ToAdd, JButton thing2ToAdd) { gbc.gridy = gbcYValue; @@ -701,13 +766,13 @@ public void changedUpdate(DocumentEvent e) { private void update() { try { String urlText = ripTextfield.getText().trim(); - if (urlText.equals("")) { + if (urlText.isEmpty()) { return; } if (!urlText.startsWith("http")) { urlText = "http://" + urlText; } - URL url = new URL(urlText); + URL url = new URI(urlText).toURL(); AbstractRipper ripper = AbstractRipper.getRipper(url); statusWithColor(ripper.getHost() + " album detected", Color.GREEN); } catch (Exception e) { @@ -724,8 +789,8 @@ private void update() { statusProgress.setVisible(false); pack(); statusProgress.setValue(0); - status(Utils.getLocalizedString("ripping.interrupted")); - appendLog("Ripper interrupted", Color.RED); + status(Utils.getLocalizedString("download.interrupted")); + appendLog("Download interrupted", Color.RED); } }); optionLog.addActionListener(event -> { @@ -801,6 +866,7 @@ private void update() { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); }); @@ -817,14 +883,12 @@ private void update() { checkChoise.add(noButton); JFrame.setDefaultLookAndFeelDecorated(true); JFrame frame = new JFrame("Are you sure?"); - frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE); frame.add(checkChoise); frame.setSize(405, 70); frame.setVisible(true); frame.setLocationRelativeTo(null); - noButton.addActionListener(e -> { - frame.setVisible(false); - }); + noButton.addActionListener(e -> frame.setVisible(false)); yesButton.addActionListener(ed -> { frame.setVisible(false); Utils.clearURLHistory(); @@ -832,6 +896,7 @@ private void update() { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); }); @@ -841,6 +906,7 @@ private void update() { try { historyTableModel.fireTableDataChanged(); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } saveHistory(); } @@ -871,54 +937,56 @@ private void update() { t.start(); }); configLogLevelCombobox.addActionListener(arg0 -> { - String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); + String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); setLogLevel(level); }); configSelectLangComboBox.addActionListener(arg0 -> { - String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); + String level = ((JComboBox) arg0.getSource()).getSelectedItem().toString(); Utils.setLanguage(level); changeLocale(); }); configSaveDirLabel.addMouseListener(new MouseAdapter() { @Override public void mouseClicked(MouseEvent e) { - File file = new File(Utils.getWorkingDirectory().toString()); - Desktop desktop = Desktop.getDesktop(); + Path file; try { - desktop.open(file); - } catch (Exception e1) { + file = Utils.getWorkingDirectory(); + Desktop desktop = Desktop.getDesktop(); + desktop.open(file.toFile()); + } catch (IOException ex) { + LOGGER.warn(ex.getMessage()); } } }); configSaveDirButton.addActionListener(arg0 -> { UIManager.put("FileChooser.useSystemExtensionHiding", false); - JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory()); + JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory().toString()); + LOGGER.debug("select save directory, current is:" + Utils.getWorkingDirectory()); jfc.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY); int returnVal = jfc.showDialog(null, "select directory"); if (returnVal != JFileChooser.APPROVE_OPTION) { return; } - File chosenFile = jfc.getSelectedFile(); - String chosenPath = null; + Path chosenPath; try { - chosenPath = chosenFile.getCanonicalPath(); + chosenPath = jfc.getSelectedFile().toPath(); } catch (Exception e) { LOGGER.error("Error while getting selected path: ", e); return; } configSaveDirLabel.setText(Utils.shortenPath(chosenPath)); - Utils.setConfigString("rips.directory", chosenPath); + Utils.setConfigString("rips.directory", chosenPath.toString()); }); configUrlFileChooserButton.addActionListener(arg0 -> { UIManager.put("FileChooser.useSystemExtensionHiding", false); - JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory()); + JFileChooser jfc = new JFileChooser(Utils.getWorkingDirectory().toAbsolutePath().toString()); jfc.setFileSelectionMode(JFileChooser.FILES_ONLY); int returnVal = jfc.showDialog(null, "Open"); if (returnVal != JFileChooser.APPROVE_OPTION) { return; } File chosenFile = jfc.getSelectedFile(); - String chosenPath = null; + String chosenPath; try { chosenPath = chosenFile.getCanonicalPath(); } catch (Exception e) { @@ -977,6 +1045,7 @@ public void intervalRemoved(ListDataEvent arg0) { } private void setLogLevel(String level) { + // default level is error, set in case something else is given. Level newLevel = Level.ERROR; level = level.substring(level.lastIndexOf(' ') + 1); switch (level) { @@ -988,21 +1057,12 @@ private void setLogLevel(String level) { break; case "Warn": newLevel = Level.WARN; - break; - case "Error": - newLevel = Level.ERROR; - break; - } - Logger.getRootLogger().setLevel(newLevel); - LOGGER.setLevel(newLevel); - ConsoleAppender ca = (ConsoleAppender) Logger.getRootLogger().getAppender("stdout"); - if (ca != null) { - ca.setThreshold(newLevel); - } - FileAppender fa = (FileAppender) Logger.getRootLogger().getAppender("FILE"); - if (fa != null) { - fa.setThreshold(newLevel); } + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + loggerConfig.setLevel(newLevel); + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. } private void setupTrayIcon() { @@ -1050,6 +1110,7 @@ public void windowIconified(WindowEvent e) { } about.append(""); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } about.append("
And download videos from video sites:"); try { @@ -1066,6 +1127,7 @@ public void windowIconified(WindowEvent e) { } about.append(""); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } about.append("Do you want to visit the project homepage on Github?"); @@ -1114,7 +1176,7 @@ public void mouseClicked(MouseEvent e) { } catch (IOException | AWTException e) { // TODO implement proper stack trace handling this is really just intented as a // placeholder until you implement proper error handling - e.printStackTrace(); + LOGGER.warn(e.getMessage()); } } @@ -1145,6 +1207,7 @@ private void appendLog(final String text, final Color color) { sd.insertString(sd.getLength(), text + "\n", sas); } } catch (BadLocationException e) { + LOGGER.warn(e.getMessage()); } logText.setCaretPosition(sd.getLength()); @@ -1161,8 +1224,8 @@ public void displayAndLogError(String line, Color color) { LOGGER.error(line); } - private void loadHistory() { - File historyFile = new File(Utils.getConfigDir() + File.separator + "history.json"); + private void loadHistory() throws IOException { + File historyFile = new File(Utils.getConfigDir() + "/history.json"); HISTORY.clear(); if (historyFile.exists()) { try { @@ -1181,25 +1244,24 @@ private void loadHistory() { if (HISTORY.toList().isEmpty()) { // Loaded from config, still no entries. // Guess rip history based on rip folder - String[] dirs = Utils.getWorkingDirectory() - .list((dir, file) -> new File(dir.getAbsolutePath() + File.separator + file).isDirectory()); - if (dirs != null) { - for (String dir : dirs) { - String url = RipUtils.urlFromDirectoryName(dir); - if (url != null) { - // We found one, add it to history - HistoryEntry entry = new HistoryEntry(); - entry.url = url; - HISTORY.add(entry); - } + Stream stream = Files.list(Utils.getWorkingDirectory()) + .filter(Files::isDirectory); + + stream.forEach(dir -> { + String url = RipUtils.urlFromDirectoryName(dir.toString()); + if (url != null) { + // We found one, add it to history + HistoryEntry entry = new HistoryEntry(); + entry.url = url; + HISTORY.add(entry); } - } + }); } } } private void saveHistory() { - Path historyFile = Paths.get(Utils.getConfigDir() + File.separator + "history.json"); + Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json"); try { if (!Files.exists(historyFile)) { Files.createDirectories(historyFile.getParent()); @@ -1216,7 +1278,7 @@ private void saveHistory() { private void ripNextAlbum() { isRipping = true; // Save current state of queue to configuration. - Utils.setConfigList("queue", (Enumeration) queueListModel.elements()); + Utils.setConfigList("queue", queueListModel.elements()); if (queueListModel.isEmpty()) { // End of queue @@ -1252,10 +1314,10 @@ private Thread ripAlbum(String urlString) { if (!urlString.startsWith("http")) { urlString = "http://" + urlString; } - URL url = null; + URL url; try { - url = new URL(urlString); - } catch (MalformedURLException e) { + url = new URI(urlString).toURL(); + } catch (MalformedURLException | URISyntaxException e) { LOGGER.error("[!] Could not generate URL for '" + urlString + "'", e); error("Given URL is not valid, expecting http://website.com/page/..."); return null; @@ -1312,7 +1374,7 @@ private boolean canRip(String urlString) { if (!urlText.startsWith("http")) { urlText = "http://" + urlText; } - URL url = new URL(urlText); + URL url = new URI(urlText).toURL(); // Ripper is needed here to throw/not throw an Exception AbstractRipper ripper = AbstractRipper.getRipper(url); return true; @@ -1382,34 +1444,39 @@ private synchronized void handleEvent(StatusEvent evt) { switch (msg.getStatus()) { case LOADING_RESOURCE: case DOWNLOAD_STARTED: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("Downloading " + msg.getObject(), Color.BLACK); } break; case DOWNLOAD_COMPLETE: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("Downloaded " + msg.getObject(), Color.GREEN); } break; case DOWNLOAD_COMPLETE_HISTORY: - if (LOGGER.isEnabledFor(Level.INFO)) { + if (LOGGER.isEnabled(Level.INFO)) { appendLog("" + msg.getObject(), Color.GREEN); } break; case DOWNLOAD_ERRORED: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } break; case DOWNLOAD_WARN: - if (LOGGER.isEnabledFor(Level.WARN)) { + if (LOGGER.isEnabled(Level.WARN)) { appendLog((String) msg.getObject(), Color.ORANGE); } break; + case DOWNLOAD_SKIP: + if (LOGGER.isEnabled(Level.INFO)) { + appendLog((String) msg.getObject(), Color.YELLOW); + } + break; case RIP_ERRORED: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } stopButton.setEnabled(false); @@ -1435,7 +1502,8 @@ private synchronized void handleEvent(StatusEvent evt) { entry.count = rsc.count; try { entry.title = ripper.getAlbumTitle(ripper.getURL()); - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { + LOGGER.warn(e.getMessage()); } HISTORY.add(entry); historyTableModel.fireTableDataChanged(); @@ -1448,7 +1516,7 @@ private synchronized void handleEvent(StatusEvent evt) { statusProgress.setValue(0); statusProgress.setVisible(false); openButton.setVisible(true); - File f = rsc.dir; + Path f = rsc.dir; String prettyFile = Utils.shortenPath(f); openButton.setText(Utils.getLocalizedString("open") + prettyFile); mainFrame.setTitle("RipMe v" + UpdateUtils.getThisJarVersion()); @@ -1456,6 +1524,7 @@ private synchronized void handleEvent(StatusEvent evt) { Image folderIcon = ImageIO.read(getClass().getClassLoader().getResource("folder.png")); openButton.setIcon(new ImageIcon(folderIcon)); } catch (Exception e) { + LOGGER.warn(e.getMessage()); } /* * content key %path% the path to the album folder %url% is the album url @@ -1464,12 +1533,13 @@ private synchronized void handleEvent(StatusEvent evt) { */ if (Utils.getConfigBoolean("enable.finish.command", false)) { try { - String commandToRun = Utils.getConfigString("finish.command", "ls"); - commandToRun = commandToRun.replaceAll("%url%", url); - commandToRun = commandToRun.replaceAll("%path%", f.getAbsolutePath()); + String cmdStr = Utils.getConfigString("finish.command", "ls"); + cmdStr = cmdStr.replaceAll("%url%", url); + cmdStr = cmdStr.replaceAll("%path%", f.toAbsolutePath().toString()); + // java dropped the exec string executor, as the string is only split very trivial. + // do the same at the moment, and split, to get rid of java-21 deprecation warning. + String[] commandToRun = cmdStr.split(" "); LOGGER.info("RUnning command " + commandToRun); - // code from: - // https://stackoverflow.com/questions/5711084/java-runtime-getruntime-getting-output-from-executing-a-command-line-program Process proc = Runtime.getRuntime().exec(commandToRun); BufferedReader stdInput = new BufferedReader(new InputStreamReader(proc.getInputStream())); @@ -1492,7 +1562,7 @@ private synchronized void handleEvent(StatusEvent evt) { LOGGER.error(e.getStackTrace()); } } - appendLog("Rip complete, saved to " + f.getAbsolutePath(), Color.GREEN); + appendLog("Rip complete, saved to " + f, Color.GREEN); openButton.setActionCommand(f.toString()); openButton.addActionListener(event -> { try { @@ -1511,7 +1581,7 @@ private synchronized void handleEvent(StatusEvent evt) { // Update total bytes break; case NO_ALBUM_OR_USER: - if (LOGGER.isEnabledFor(Level.ERROR)) { + if (LOGGER.isEnabled(Level.ERROR)) { appendLog((String) msg.getObject(), Color.RED); } stopButton.setEnabled(false); @@ -1534,14 +1604,6 @@ public static void ripAlbumStatic(String url) { ripButton.doClick(); } - public static void enableWindowPositioning() { - Utils.setConfigBoolean("window.position", true); - } - - public static void disableWindowPositioning() { - Utils.setConfigBoolean("window.position", false); - } - private static boolean hasWindowPositionBug() { String osName = System.getProperty("os.name"); // Java on Windows has a bug where if we try to manually set the position of the diff --git a/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java b/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java index 19911ee26..0be4b46f8 100644 --- a/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java +++ b/src/main/java/com/rarchives/ripme/ui/QueueMenuMouseListener.java @@ -58,10 +58,19 @@ public void actionPerformed(ActionEvent ae) { updateQueue.accept(queueListModel); } - @SuppressWarnings("unchecked") @Override - public void mouseClicked(MouseEvent e) { - if (e.getModifiers() == InputEvent.BUTTON3_MASK) { + public void mousePressed(MouseEvent e) { + checkPopupTrigger(e); + } + + @Override + public void mouseReleased(MouseEvent e) { + checkPopupTrigger(e); + } + + @SuppressWarnings("unchecked") + private void checkPopupTrigger(MouseEvent e) { + if (e.getModifiersEx() == InputEvent.BUTTON3_DOWN_MASK) { if (!(e.getSource() instanceof JList)) { return; } diff --git a/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java b/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java index 720aa9a78..2b8058bb2 100644 --- a/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java +++ b/src/main/java/com/rarchives/ripme/ui/RipStatusComplete.java @@ -1,29 +1,22 @@ package com.rarchives.ripme.ui; -import java.io.File; -import java.io.IOException; +import java.nio.file.Path; public class RipStatusComplete { - File dir = null; + Path dir = null; int count = 0; - public RipStatusComplete(File dir) { + public RipStatusComplete(Path dir) { this.dir = dir; this.count = 1; } - public RipStatusComplete(File dir, int count) { + public RipStatusComplete(Path dir, int count) { this.dir = dir; this.count = count; } public String getDir() { - String result; - try { - result = this.dir.getCanonicalPath(); - } catch (IOException e) { - result = this.dir.toString(); - } - return result; + return this.dir.toString(); } } diff --git a/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java b/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java index 207968d9c..f589e9dbb 100644 --- a/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java +++ b/src/main/java/com/rarchives/ripme/ui/RipStatusMessage.java @@ -13,6 +13,7 @@ public enum STATUS { DOWNLOAD_COMPLETE_HISTORY("Download Complete History"), RIP_COMPLETE("Rip Complete"), DOWNLOAD_WARN("Download problem"), + DOWNLOAD_SKIP("Download Skipped"), TOTAL_BYTES("Total bytes"), COMPLETED_BYTES("Completed bytes"), RIP_ERRORED("Rip Errored"), diff --git a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java index a255496bf..128eabbaf 100644 --- a/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java +++ b/src/main/java/com/rarchives/ripme/ui/UpdateUtils.java @@ -1,48 +1,60 @@ package com.rarchives.ripme.ui; -import java.awt.Dimension; -import java.io.*; -import java.net.URISyntaxException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import javax.swing.JEditorPane; -import javax.swing.JLabel; -import javax.swing.JOptionPane; -import javax.swing.JScrollPane; - -import org.apache.log4j.Logger; +import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.Connection.Response; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; -import com.rarchives.ripme.utils.Utils; +import javax.swing.*; +import java.awt.*; +import java.io.BufferedWriter; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; public class UpdateUtils { - private static final Logger logger = Logger.getLogger(UpdateUtils.class); - private static final String DEFAULT_VERSION = "1.7.95"; - private static final String REPO_NAME = "ripmeapp/ripme"; - private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/master/ripme.json"; - private static String mainFileName; + private static final Logger logger = LogManager.getLogger(UpdateUtils.class); + // do not update the default version without adjusting the unit test. the real version comes from METAINF.MF + private static final String DEFAULT_VERSION = "1.7.94-10-b6345398"; + private static final String REPO_NAME = "ripmeapp2/ripme"; + private static final String updateJsonURL = "https://raw.githubusercontent.com/" + REPO_NAME + "/main/ripme.json"; + private static final Path newFile = Paths.get("ripme.jar.new"); + private static Path mainFile; + private static JSONObject ripmeJson; static { try { - mainFileName = new File(UpdateUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI()).getAbsolutePath(); + mainFile = Paths.get(UpdateUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI()); } catch (URISyntaxException | IllegalArgumentException e) { - mainFileName = "ripme.jar"; + mainFile = Paths.get("ripme.jar"); logger.error("Unable to get path of jar"); e.printStackTrace(); } } - private static final String updateFileName = "ripme.jar.update"; - private static JSONObject ripmeJson; - private static String getUpdateJarURL(String latestVersion) { - return "https://github.com/" + REPO_NAME + "/releases/download/" + latestVersion + "/ripme.jar"; + // this works with a tag created in github, and thus download URLs like: + // https://github.com/ripmeapp2/ripme/releases/download/2.0.4/ripme-2.0.4-12-487e38cc.jar + return "https://github.com/" + + REPO_NAME + + "/releases/download/" + + latestVersion.substring(0, latestVersion.indexOf("-")) + + "/ripme-" + + latestVersion + ".jar"; } public static String getThisJarVersion() { @@ -70,7 +82,7 @@ private static String getChangeList(JSONObject rj) { public static void updateProgramCLI() { logger.info("Checking for update..."); - Document doc = null; + Document doc; try { logger.debug("Retrieving " + UpdateUtils.updateJsonURL); doc = Jsoup.connect(UpdateUtils.updateJsonURL).timeout(10 * 1000).ignoreContentType(true).get(); @@ -93,25 +105,22 @@ public static void updateProgramCLI() { String latestVersion = ripmeJson.getString("latestVersion"); if (UpdateUtils.isNewerVersion(latestVersion)) { logger.info("Found newer version: " + latestVersion); - logger.info("Downloading new version..."); - logger.info("New version found, downloading..."); + logger.info("Downloading" +getUpdateJarURL(latestVersion) + " ..."); try { UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), false); } catch (IOException e) { logger.error("Error while updating: ", e); } } else { - logger.debug("This version (" + UpdateUtils.getThisJarVersion() - + ") is the same or newer than the website's version (" + latestVersion + ")"); - logger.info("v" + UpdateUtils.getThisJarVersion() + " is the latest version"); - logger.debug("Running latest version: " + UpdateUtils.getThisJarVersion()); + logger.info("Running version (" + UpdateUtils.getThisJarVersion() + + ") is not older than release (" + latestVersion + ")"); } } public static void updateProgramGUI(JLabel configUpdateLabel) { configUpdateLabel.setText("Checking for update..."); - Document doc = null; + Document doc; try { logger.debug("Retrieving " + UpdateUtils.updateJsonURL); doc = Jsoup.connect(UpdateUtils.updateJsonURL).timeout(10 * 1000).ignoreContentType(true).get(); @@ -147,7 +156,7 @@ public static void updateProgramGUI(JLabel configUpdateLabel) { return; } configUpdateLabel.setText("Downloading new version..."); - logger.info("New version found, downloading..."); + logger.info("New version found, downloading " + getUpdateJarURL(latestVersion)); try { UpdateUtils.downloadJarAndLaunch(getUpdateJarURL(latestVersion), true); } catch (IOException e) { @@ -157,15 +166,14 @@ public static void updateProgramGUI(JLabel configUpdateLabel) { logger.error("Error while updating: ", e); } } else { - logger.debug("This version (" + UpdateUtils.getThisJarVersion() - + ") is the same or newer than the website's version (" + latestVersion + ")"); + logger.info("Running version (" + UpdateUtils.getThisJarVersion() + + ") is not older than release (" + latestVersion + ")"); configUpdateLabel.setText("v" + UpdateUtils.getThisJarVersion() + " is the latest version"); - logger.debug("Running latest version: " + UpdateUtils.getThisJarVersion()); } } - private static boolean isNewerVersion(String latestVersion) { + static boolean isNewerVersion(String latestVersion) { // If we're testing the update utils we want the program to always try to update if (Utils.getConfigBoolean("testing.always_try_to_update", false)) { logger.info("isNewerVersion is returning true because the key \"testing.always_try_to_update\" is true"); @@ -180,7 +188,7 @@ private static boolean isNewerVersion(String latestVersion) { for (int i = 0; i < oldVersions.length; i++) { if (newVersions[i] > oldVersions[i]) { - logger.debug("oldVersion " + getThisJarVersion() + " < latestVersion" + latestVersion); + logger.debug("oldVersion " + getThisJarVersion() + " < latestVersion " + latestVersion); return true; } else if (newVersions[i] < oldVersions[i]) { logger.debug("oldVersion " + getThisJarVersion() + " > latestVersion " + latestVersion); @@ -194,26 +202,34 @@ private static boolean isNewerVersion(String latestVersion) { } private static int[] versionStringToInt(String version) { - String strippedVersion = version.split("-")[0]; - String[] strVersions = strippedVersion.split("\\."); - int[] intVersions = new int[strVersions.length]; - for (int i = 0; i < strVersions.length; i++) { - intVersions[i] = Integer.parseInt(strVersions[i]); + // a version string looks like 1.7.94, 1.7.94-10-something + // 10 is the number of commits since the 1.7.94 tag, so newer + // the int array returned then contains e.g. 1.7.94.0 or 1.7.94.10 + String[] strVersions = version.split("[.-]"); + // not consider more than 4 components of version, loop only the real number + // of components or maximum 4 components of the version string + int[] intVersions = new int[4]; + for (int i = 0; i < Math.min(4, strVersions.length); i++) { + // if it is an integer, set it, otherwise leave default 0 + if (strVersions[i].matches("\\d+")) { + intVersions[i] = Integer.parseInt(strVersions[i]); + } } return intVersions; } // Code take from https://stackoverflow.com/a/30925550 - public static String createSha256(File file) { + public static String createSha256(Path file) { try { MessageDigest digest = MessageDigest.getInstance("SHA-256"); - InputStream fis = new FileInputStream(file); - int n = 0; - byte[] buffer = new byte[8192]; - while (n != -1) { - n = fis.read(buffer); - if (n > 0) { - digest.update(buffer, 0, n); + try (InputStream fis = Files.newInputStream(file)) { + int n = 0; + byte[] buffer = new byte[8192]; + while (n != -1) { + n = fis.read(buffer); + if (n > 0) { + digest.update(buffer, 0, n); + } } } byte[] hash = digest.digest(); @@ -225,11 +241,9 @@ public static String createSha256(File file) { // As patch.py writes the hash in lowercase this must return the has in // lowercase return sb.toString().toLowerCase(); - } catch (NoSuchAlgorithmException e) { - logger.error("Got error getting file hash " + e.getMessage()); } catch (FileNotFoundException e) { - logger.error("Could not find file: " + file.getName()); - } catch (IOException e) { + logger.error("Could not find file: " + file); + } catch (NoSuchAlgorithmException | IOException e) { logger.error("Got error getting file hash " + e.getMessage()); } return null; @@ -241,13 +255,13 @@ private static void downloadJarAndLaunch(String updateJarURL, Boolean shouldLaun .timeout(Utils.getConfigInteger("download.timeout", 60 * 1000)).maxBodySize(1024 * 1024 * 100) .execute(); - try (FileOutputStream out = new FileOutputStream(updateFileName)) { + try (OutputStream out = Files.newOutputStream(newFile)) { out.write(response.bodyAsBytes()); } // Only check the hash if the user hasn't disabled hash checking if (Utils.getConfigBoolean("security.check_update_hash", true)) { - String updateHash = createSha256(new File(updateFileName)); - logger.info("Download of new version complete; saved to " + updateFileName); + String updateHash = createSha256(newFile); + logger.info("Download of new version complete; saved to " + newFile); logger.info("Checking hash of update"); if (!ripmeJson.getString("currentHash").equals(updateHash)) { @@ -262,19 +276,17 @@ private static void downloadJarAndLaunch(String updateJarURL, Boolean shouldLaun if (System.getProperty("os.name").toLowerCase().contains("win")) { // Windows - final String batchFile = "update_ripme.bat"; - final String batchPath = new File(batchFile).getAbsolutePath(); - String script = "@echo off\r\n" + "timeout 1\r\n" - + "copy \"" + updateFileName + "\" \"" + mainFileName + "\"\r\n" - + "del \"" + updateFileName + "\"\r\n"; - - if (shouldLaunch) - script += "\"" + mainFileName + "\"\r\n"; - script += "del \"" + batchPath + "\"\r\n"; - - final String[] batchExec = new String[] { batchPath }; + final Path batchFile = Paths.get("update_ripme.bat"); + String script = "@echo off\r\n" + "timeout 1\r\n" + + "copy \"" + newFile + "\" \"" + mainFile + "\"\r\n" + + "del \"" + newFile + "\"\r\n"; + + if (shouldLaunch) + script += "\"" + mainFile + "\"\r\n"; + script += "del \"" + batchFile + "\"\r\n"; + // Create updater script - try (BufferedWriter bw = new BufferedWriter(new FileWriter(batchFile))) { + try (BufferedWriter bw = Files.newBufferedWriter(batchFile)) { bw.write(script); bw.flush(); } @@ -284,7 +296,7 @@ private static void downloadJarAndLaunch(String updateJarURL, Boolean shouldLaun Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { logger.info("Executing: " + batchFile); - Runtime.getRuntime().exec(batchExec); + Runtime.getRuntime().exec(String.valueOf(batchFile)); } catch (IOException e) { // TODO implement proper stack trace handling this is really just intented as a // placeholder until you implement proper error handling @@ -298,16 +310,14 @@ private static void downloadJarAndLaunch(String updateJarURL, Boolean shouldLaun // Modifying file and launching it: *nix distributions don't have any issues // with modifying/deleting files // while they are being run - File mainFile = new File(mainFileName); - String mainFilePath = mainFile.getAbsolutePath(); - mainFile.delete(); - new File(updateFileName).renameTo(new File(mainFilePath)); + Files.move(newFile, mainFile, REPLACE_EXISTING); if (shouldLaunch) { // No need to do it during shutdown: the file used will indeed be the new one - Runtime.getRuntime().exec("java -jar " + mainFileName); + logger.info("Executing: " + mainFile); + Runtime.getRuntime().exec(new String[]{"java", "-jar", mainFile.toString()}); } logger.info("Update installed, newer version should be executed upon relaunch"); System.exit(0); } } -} +} \ No newline at end of file diff --git a/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java b/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java new file mode 100644 index 000000000..9237fea90 --- /dev/null +++ b/src/main/java/com/rarchives/ripme/uiUtils/ContextActionProtections.java @@ -0,0 +1,31 @@ +package com.rarchives.ripme.uiUtils; + +import javax.swing.*; +import javax.swing.text.JTextComponent; +import java.awt.*; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.DataFlavor; +import java.awt.datatransfer.Transferable; +import java.awt.datatransfer.UnsupportedFlavorException; +import java.io.IOException; + +public class ContextActionProtections { + public static void pasteFromClipboard(JTextComponent textComponent) { + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + Transferable transferable = clipboard.getContents(new Object()); + + try { + String clipboardContent = (String) transferable.getTransferData(DataFlavor.stringFlavor); + + // TODO check if commenting this causes regression + // Limit the pasted content to 96 characters + // if (clipboardContent.length() > 96) { + // clipboardContent = clipboardContent.substring(0, 96); + // } + // Set the text in the JTextField + textComponent.setText(clipboardContent); + } catch (UnsupportedFlavorException | IOException unable_to_modify_text_on_paste) { + unable_to_modify_text_on_paste.printStackTrace(); + } + } +} diff --git a/src/main/java/com/rarchives/ripme/utils/Http.java b/src/main/java/com/rarchives/ripme/utils/Http.java index d39406e75..374f32e74 100644 --- a/src/main/java/com/rarchives/ripme/utils/Http.java +++ b/src/main/java/com/rarchives/ripme/utils/Http.java @@ -1,38 +1,39 @@ package com.rarchives.ripme.utils; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.HashMap; -import java.util.Map; - +import com.rarchives.ripme.ripper.AbstractRipper; import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.json.JSONArray; import org.json.JSONObject; import org.jsoup.Connection; import org.jsoup.Connection.Method; import org.jsoup.Connection.Response; -import org.jsoup.helper.StringUtil; -import org.jsoup.Jsoup; import org.jsoup.HttpStatusException; +import org.jsoup.Jsoup; import org.jsoup.nodes.Document; -import com.rarchives.ripme.ripper.AbstractRipper; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; /** * Wrapper around the Jsoup connection methods. - * + *

* Benefit is retry logic. */ public class Http { private static final int TIMEOUT = Utils.getConfigInteger("page.timeout", 5 * 1000); - private static final Logger logger = Logger.getLogger(Http.class); + private static final Logger logger = LogManager.getLogger(Http.class); private int retries; - private String url; + private int retrySleep = 0; + private final String url; private Connection connection; // Constructors @@ -40,6 +41,7 @@ public Http(String url) { this.url = url; defaultSettings(); } + private Http(URL url) { this.url = url.toExternalForm(); defaultSettings(); @@ -48,12 +50,14 @@ private Http(URL url) { public static Http url(String url) { return new Http(url); } + public static Http url(URL url) { return new Http(url); } private void defaultSettings() { - this.retries = Utils.getConfigInteger("download.retries", 1); + this.retries = Utils.getConfigInteger("download.retries", 3); + this.retrySleep = Utils.getConfigInteger("download.retry.sleep", 5000); connection = Jsoup.connect(this.url); connection.userAgent(AbstractRipper.USER_AGENT); connection.method(Method.GET); @@ -69,9 +73,9 @@ private void defaultSettings() { private Map cookiesForURL(String u) { Map cookiesParsed = new HashMap<>(); - String cookieDomain = ""; + String cookieDomain = ""; try { - URL parsed = new URL(u); + URL parsed = new URI(u).toURL(); String cookieStr = ""; String[] parts = parsed.getHost().split("\\."); @@ -85,7 +89,7 @@ private Map cookiesForURL(String u) { logger.info("Trying to load cookies from config for " + domain); cookieStr = Utils.getConfigString("cookies." + domain, ""); if (!cookieStr.equals("")) { - cookieDomain = domain; + cookieDomain = domain; // we found something, start parsing break; } @@ -95,7 +99,7 @@ private Map cookiesForURL(String u) { if (!cookieStr.equals("")) { cookiesParsed = RipUtils.getCookiesFromString(cookieStr.trim()); } - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { logger.warn("Parsing url " + u + " while getting cookies", e); } @@ -111,42 +115,52 @@ public Http timeout(int timeout) { connection.timeout(timeout); return this; } + public Http ignoreContentType() { connection.ignoreContentType(true); return this; } - public Http referrer(String ref) { + + public Http referrer(String ref) { connection.referrer(ref); return this; } + public Http referrer(URL ref) { return referrer(ref.toExternalForm()); } - public Http userAgent(String ua) { + + public Http userAgent(String ua) { connection.userAgent(ua); return this; } + public Http retries(int tries) { this.retries = tries; return this; } + public Http header(String name, String value) { - connection.header(name, value); + connection.header(name, value); return this; } - public Http cookies(Map cookies) { + + public Http cookies(Map cookies) { connection.cookies(cookies); return this; } - public Http data(Map data) { + + public Http data(Map data) { connection.data(data); return this; } + public Http data(String name, String value) { - Map data = new HashMap<>(); + Map data = new HashMap<>(); data.put(name, value); return data(data); } + public Http method(Method method) { connection.method(method); return this; @@ -156,6 +170,7 @@ public Http method(Method method) { public Connection connection() { return connection; } + public Document get() throws IOException { connection.method(Method.GET); return response().parse(); @@ -179,7 +194,7 @@ public JSONArray getJSONArray() throws IOException { } public Response response() throws IOException { - Response response = null; + Response response; IOException lastException = null; int retries = this.retries; while (--retries >= 0) { @@ -189,19 +204,27 @@ public Response response() throws IOException { } catch (IOException e) { // Warn users about possibly fixable permission error if (e instanceof org.jsoup.HttpStatusException) { - HttpStatusException ex = (HttpStatusException)e; - + HttpStatusException ex = (HttpStatusException) e; + // These status codes might indicate missing cookies // 401 Unauthorized // 403 Forbidden - int status = ex.getStatusCode(); + int status = ex.getStatusCode(); if (status == 401 || status == 403) { - throw new IOException("Failed to load " + url + ": Status Code " + Integer.toString(status) + ". You might be able to circumvent this error by setting cookies for this domain" , e); + throw new IOException("Failed to load " + url + ": Status Code " + status + ". You might be able to circumvent this error by setting cookies for this domain", e); + } + if (status == 404) { + throw new IOException("File not found " + url + ": Status Code " + status + ". ", e); } } - logger.warn("Error while loading " + url, e); + if (retrySleep > 0 && retries >= 0) { + logger.warn("Error while loading " + url + " waiting "+ retrySleep + " ms before retrying.", e); + Utils.sleep(retrySleep); + } else { + logger.warn("Error while loading " + url, e); + } lastException = e; } } diff --git a/src/main/java/com/rarchives/ripme/utils/Proxy.java b/src/main/java/com/rarchives/ripme/utils/Proxy.java index be3c3b7e8..0275bd5c7 100644 --- a/src/main/java/com/rarchives/ripme/utils/Proxy.java +++ b/src/main/java/com/rarchives/ripme/utils/Proxy.java @@ -1,99 +1,99 @@ -package com.rarchives.ripme.utils; - -import java.net.Authenticator; -import java.net.PasswordAuthentication; -import java.util.Map; -import java.util.HashMap; - -/** - * Proxy/Socks setter - */ -public class Proxy { - private Proxy() { - } - - /** - * Parse the proxy server settings from string, using the format - * [user:password]@host[:port]. - * - * @param fullproxy the string to parse - * @return HashMap containing proxy server, port, user and password - */ - private static Map parseServer(String fullproxy) { - Map proxy = new HashMap(); - - if (fullproxy.lastIndexOf("@") != -1) { - int sservli = fullproxy.lastIndexOf("@"); - String userpw = fullproxy.substring(0, sservli); - String[] usersplit = userpw.split(":"); - proxy.put("user", usersplit[0]); - proxy.put("password", usersplit[1]); - fullproxy = fullproxy.substring(sservli + 1); - } - String[] servsplit = fullproxy.split(":"); - if (servsplit.length == 2) { - proxy.put("port", servsplit[1]); - } - proxy.put("server", servsplit[0]); - return proxy; - } - - /** - * Set a HTTP Proxy. - * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless - * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java - * see https://stackoverflow.com/q/41505219 - * - * @param fullproxy the proxy, using format [user:password]@host[:port] - */ - public static void setHTTPProxy(String fullproxy) { - Map proxyServer = parseServer(fullproxy); - - if (proxyServer.get("user") != null && proxyServer.get("password") != null) { - Authenticator.setDefault(new Authenticator(){ - protected PasswordAuthentication getPasswordAuthentication(){ - PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray()); - return p; - } - }); - System.setProperty("http.proxyUser", proxyServer.get("user")); - System.setProperty("http.proxyPassword", proxyServer.get("password")); - System.setProperty("https.proxyUser", proxyServer.get("user")); - System.setProperty("https.proxyPassword", proxyServer.get("password")); - } - - if (proxyServer.get("port") != null) { - System.setProperty("http.proxyPort", proxyServer.get("port")); - System.setProperty("https.proxyPort", proxyServer.get("port")); - } - - System.setProperty("http.proxyHost", proxyServer.get("server")); - System.setProperty("https.proxyHost", proxyServer.get("server")); - } - - /** - * Set a Socks Proxy Server (globally). - * - * @param fullsocks the socks server, using format [user:password]@host[:port] - */ - public static void setSocks(String fullsocks) { - - Map socksServer = parseServer(fullsocks); - if (socksServer.get("user") != null && socksServer.get("password") != null) { - Authenticator.setDefault(new Authenticator(){ - protected PasswordAuthentication getPasswordAuthentication(){ - PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray()); - return p; - } - }); - System.setProperty("java.net.socks.username", socksServer.get("user")); - System.setProperty("java.net.socks.password", socksServer.get("password")); - } - if (socksServer.get("port") != null) { - System.setProperty("socksProxyPort", socksServer.get("port")); - } - - System.setProperty("socksProxyHost", socksServer.get("server")); - } - -} +package com.rarchives.ripme.utils; + +import java.net.Authenticator; +import java.net.PasswordAuthentication; +import java.util.Map; +import java.util.HashMap; + +/** + * Proxy/Socks setter + */ +public class Proxy { + private Proxy() { + } + + /** + * Parse the proxy server settings from string, using the format + * [user:password]@host[:port]. + * + * @param fullproxy the string to parse + * @return HashMap containing proxy server, port, user and password + */ + private static Map parseServer(String fullproxy) { + Map proxy = new HashMap(); + + if (fullproxy.lastIndexOf("@") != -1) { + int sservli = fullproxy.lastIndexOf("@"); + String userpw = fullproxy.substring(0, sservli); + String[] usersplit = userpw.split(":"); + proxy.put("user", usersplit[0]); + proxy.put("password", usersplit[1]); + fullproxy = fullproxy.substring(sservli + 1); + } + String[] servsplit = fullproxy.split(":"); + if (servsplit.length == 2) { + proxy.put("port", servsplit[1]); + } + proxy.put("server", servsplit[0]); + return proxy; + } + + /** + * Set a HTTP Proxy. + * WARNING: Authenticated HTTP Proxy won't work from jdk1.8.111 unless + * passing the flag -Djdk.http.auth.tunneling.disabledSchemes="" to java + * see https://stackoverflow.com/q/41505219 + * + * @param fullproxy the proxy, using format [user:password]@host[:port] + */ + public static void setHTTPProxy(String fullproxy) { + Map proxyServer = parseServer(fullproxy); + + if (proxyServer.get("user") != null && proxyServer.get("password") != null) { + Authenticator.setDefault(new Authenticator(){ + protected PasswordAuthentication getPasswordAuthentication(){ + PasswordAuthentication p = new PasswordAuthentication(proxyServer.get("user"), proxyServer.get("password").toCharArray()); + return p; + } + }); + System.setProperty("http.proxyUser", proxyServer.get("user")); + System.setProperty("http.proxyPassword", proxyServer.get("password")); + System.setProperty("https.proxyUser", proxyServer.get("user")); + System.setProperty("https.proxyPassword", proxyServer.get("password")); + } + + if (proxyServer.get("port") != null) { + System.setProperty("http.proxyPort", proxyServer.get("port")); + System.setProperty("https.proxyPort", proxyServer.get("port")); + } + + System.setProperty("http.proxyHost", proxyServer.get("server")); + System.setProperty("https.proxyHost", proxyServer.get("server")); + } + + /** + * Set a Socks Proxy Server (globally). + * + * @param fullsocks the socks server, using format [user:password]@host[:port] + */ + public static void setSocks(String fullsocks) { + + Map socksServer = parseServer(fullsocks); + if (socksServer.get("user") != null && socksServer.get("password") != null) { + Authenticator.setDefault(new Authenticator(){ + protected PasswordAuthentication getPasswordAuthentication(){ + PasswordAuthentication p = new PasswordAuthentication(socksServer.get("user"), socksServer.get("password").toCharArray()); + return p; + } + }); + System.setProperty("java.net.socks.username", socksServer.get("user")); + System.setProperty("java.net.socks.password", socksServer.get("password")); + } + if (socksServer.get("port") != null) { + System.setProperty("socksProxyPort", socksServer.get("port")); + } + + System.setProperty("socksProxyHost", socksServer.get("server")); + } + +} diff --git a/src/main/java/com/rarchives/ripme/utils/RipUtils.java b/src/main/java/com/rarchives/ripme/utils/RipUtils.java index 3fcb71c2e..15e4128f7 100644 --- a/src/main/java/com/rarchives/ripme/utils/RipUtils.java +++ b/src/main/java/com/rarchives/ripme/utils/RipUtils.java @@ -2,6 +2,8 @@ import java.io.IOException; import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.*; import java.util.regex.Matcher; @@ -13,16 +15,17 @@ import com.rarchives.ripme.ripper.rippers.ImgurRipper; import com.rarchives.ripme.ripper.rippers.RedgifsRipper; import com.rarchives.ripme.ripper.rippers.VidbleRipper; -import com.rarchives.ripme.ripper.rippers.GfycatRipper; +import com.rarchives.ripme.ripper.rippers.SoundgasmRipper; import org.apache.commons.lang.math.NumberUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; public class RipUtils { - private static final Logger logger = Logger.getLogger(RipUtils.class); + private static final Logger logger = LogManager.getLogger(RipUtils.class); public static List getFilesFromURL(URL url) { List result = new ArrayList<>(); @@ -38,52 +41,28 @@ public static List getFilesFromURL(URL url) { logger.debug("Got imgur image: " + imgurImage.url); result.add(imgurImage.url); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { logger.error("[!] Exception while loading album " + url, e); } return result; - } - else if (url.getHost().endsWith("imgur.com") && url.toExternalForm().contains(",")) { - // Imgur image series. - try { - logger.debug("Fetching imgur series at " + url); - ImgurRipper.ImgurAlbum imgurAlbum = ImgurRipper.getImgurSeries(url); - for (ImgurRipper.ImgurImage imgurImage : imgurAlbum.images) { - logger.debug("Got imgur image: " + imgurImage.url); - result.add(imgurImage.url); - } - } catch (IOException e) { - logger.error("[!] Exception while loading album " + url, e); - } - } else if (url.getHost().endsWith("i.imgur.com") && url.toExternalForm().contains("gifv")) { + } + else if (url.getHost().endsWith("i.imgur.com") && url.toExternalForm().contains("gifv")) { // links to imgur gifvs try { - result.add(new URL(url.toExternalForm().replaceAll(".gifv", ".mp4"))); - } catch (IOException e) { + result.add(new URI(url.toExternalForm().replaceAll(".gifv", ".mp4")).toURL()); + } catch (IOException | URISyntaxException e) { logger.info("Couldn't get gifv from " + url); } return result; } - else if (url.getHost().endsWith("gfycat.com")) { - try { - logger.debug("Fetching gfycat page " + url); - String videoURL = GfycatRipper.getVideoURL(url); - logger.debug("Got gfycat URL: " + videoURL); - result.add(new URL(videoURL)); - } catch (IOException e) { - // Do nothing - logger.warn("Exception while retrieving gfycat page:", e); - } - return result; - } else if (url.getHost().endsWith("redgifs.com") || url.getHost().endsWith("gifdeliverynetwork.com")) { try { logger.debug("Fetching redgifs page " + url); String videoURL = RedgifsRipper.getVideoURL(url); logger.debug("Got redgifs URL: " + videoURL); - result.add(new URL(videoURL)); - } catch (IOException e) { + result.add(new URI(videoURL).toURL()); + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving redgifs page:", e); } @@ -93,7 +72,7 @@ else if (url.toExternalForm().contains("vidble.com/album/") || url.toExternalFor try { logger.info("Getting vidble album " + url); result.addAll(VidbleRipper.getURLsFromPage(url)); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving vidble page:", e); } @@ -103,7 +82,7 @@ else if (url.toExternalForm().contains("eroshare.com")) { try { logger.info("Getting eroshare album " + url); result.addAll(EroShareRipper.getURLs(url)); - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving eroshare page:", e); } @@ -119,14 +98,28 @@ else if (url.toExternalForm().contains("erome.com")) { EromeRipper r = new EromeRipper(url); Document tempDoc = r.getFirstPage(); for (String u : r.getURLsFromPage(tempDoc)) { - result.add(new URL(u)); + result.add(new URI(u).toURL()); } - } catch (IOException e) { + } catch (IOException | URISyntaxException e) { // Do nothing logger.warn("Exception while retrieving eroshare page:", e); } return result; } + else if (url.toExternalForm().contains("soundgasm.net")) { + try { + logger.info("Getting soundgasm page " + url); + SoundgasmRipper r = new SoundgasmRipper(url); + Document tempDoc = r.getFirstPage(); + for (String u : r.getURLsFromPage(tempDoc)) { + result.add(new URI(u).toURL()); + } + } catch (IOException | URISyntaxException e) { + // Do nothing + logger.warn("Exception while retrieving soundgasm page:", e); + } + return result; + } Pattern p = Pattern.compile("https?://i.reddituploads.com/([a-zA-Z0-9]+)\\?.*"); Matcher m = p.matcher(url.toExternalForm()); @@ -134,8 +127,8 @@ else if (url.toExternalForm().contains("erome.com")) { logger.info("URL: " + url.toExternalForm()); String u = url.toExternalForm().replaceAll("&", "&"); try { - result.add(new URL(u)); - } catch (MalformedURLException e) { + result.add(new URI(u).toURL()); + } catch (MalformedURLException | URISyntaxException e) { } return result; } @@ -145,11 +138,11 @@ else if (url.toExternalForm().contains("erome.com")) { m = p.matcher(url.toExternalForm()); if (m.matches()) { try { - URL singleURL = new URL(m.group(1)); + URL singleURL = new URI(m.group(1)).toURL(); logger.debug("Found single URL: " + singleURL); result.add(singleURL); return result; - } catch (MalformedURLException e) { + } catch (MalformedURLException | URISyntaxException e) { logger.error("[!] Not a valid URL: '" + url + "'", e); } } @@ -163,19 +156,19 @@ else if (url.toExternalForm().contains("erome.com")) { .get(); for (Element el : doc.select("meta")) { if (el.attr("property").equals("og:video")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } else if (el.attr("name").equals("twitter:image:src")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } else if (el.attr("name").equals("twitter:image")) { - result.add(new URL(el.attr("content"))); + result.add(new URI(el.attr("content")).toURL()); return result; } } - } catch (IOException ex) { + } catch (IOException | URISyntaxException ex) { logger.error("[!] Error", ex); } diff --git a/src/main/java/com/rarchives/ripme/utils/Utils.java b/src/main/java/com/rarchives/ripme/utils/Utils.java index a009c7a1e..88eb0c5ec 100644 --- a/src/main/java/com/rarchives/ripme/utils/Utils.java +++ b/src/main/java/com/rarchives/ripme/utils/Utils.java @@ -1,17 +1,32 @@ package com.rarchives.ripme.utils; +import com.rarchives.ripme.ripper.AbstractRipper; +import org.apache.commons.configuration.ConfigurationException; +import org.apache.commons.configuration.PropertiesConfiguration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.appender.RollingFileAppender; +import org.apache.logging.log4j.core.appender.rolling.DefaultRolloverStrategy; +import org.apache.logging.log4j.core.appender.rolling.SizeBasedTriggeringPolicy; +import org.apache.logging.log4j.core.appender.rolling.TriggeringPolicy; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; + +import javax.sound.sampled.AudioSystem; +import javax.sound.sampled.Clip; +import javax.sound.sampled.Line; +import javax.sound.sampled.LineEvent; import java.io.File; import java.io.FileNotFoundException; -import java.io.FilenameFilter; import java.io.IOException; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; import java.lang.reflect.Constructor; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.net.URLDecoder; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; import java.nio.file.FileSystems; import java.nio.file.Files; @@ -22,58 +37,42 @@ import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.MissingResourceException; +import java.util.Objects; import java.util.ResourceBundle; import java.util.jar.JarEntry; import java.util.jar.JarFile; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Stream; - -import javax.sound.sampled.AudioSystem; -import javax.sound.sampled.Clip; -import javax.sound.sampled.Line; -import javax.sound.sampled.LineEvent; - -import com.rarchives.ripme.ripper.AbstractRipper; - -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.configuration.PropertiesConfiguration; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PropertyConfigurator; /** * Common utility functions used in various places throughout the project. */ public class Utils { - private static final Pattern pattern = Pattern.compile("LabelsBundle_(?[A-Za-z_]+).properties"); - private static final String DEFAULT_LANG = "en_US"; private static final String RIP_DIRECTORY = "rips"; private static final String CONFIG_FILE = "rip.properties"; private static final String OS = System.getProperty("os.name").toLowerCase(); - private static final Logger LOGGER = Logger.getLogger(Utils.class); + private static final Logger LOGGER = LogManager.getLogger(Utils.class); private static final int SHORTENED_PATH_LENGTH = 12; private static PropertiesConfiguration config; - private static HashMap> cookieCache; - private static HashMap magicHash = new HashMap<>(); + private static final HashMap> cookieCache; + private static final HashMap magicHash = new HashMap<>(); - private static ResourceBundle resourceBundle = null; + private static ResourceBundle resourceBundle; static { cookieCache = new HashMap<>(); try { String configPath = getConfigFilePath(); - File file = new File(configPath); + Path file = Paths.get(configPath); - if (!file.exists()) { + if (!Files.exists(file)) { // Use default bundled with .jar configPath = CONFIG_FILE; } @@ -81,7 +80,7 @@ public class Utils { config = new PropertiesConfiguration(configPath); LOGGER.info("Loaded " + config.getPath()); - if (file.exists()) { + if (Files.exists(file)) { // Config was loaded from file if (!config.containsKey("twitter.auth") || !config.containsKey("twitter.max_requests") || !config.containsKey("tumblr.auth") || !config.containsKey("error.skip404") @@ -91,7 +90,7 @@ public class Utils { // Need to reload the default config // See https://github.com/4pr0n/ripme/issues/158 LOGGER.warn("Config does not contain key fields, deleting old config"); - file.delete(); + Files.delete(file); config = new PropertiesConfiguration(CONFIG_FILE); LOGGER.info("Loaded " + config.getPath()); } @@ -108,21 +107,21 @@ public class Utils { * * @return Root directory to save rips to. */ - public static File getWorkingDirectory() { - String currentDir = ""; - try { - currentDir = getJarDirectory().getCanonicalPath() + File.separator + RIP_DIRECTORY + File.separator; - } catch (IOException e) { - LOGGER.error("Error while finding working dir: ", e); - } + public static Path getWorkingDirectory() { + String currentDir = getJarDirectory() + File.separator + RIP_DIRECTORY + File.separator; if (config != null) { currentDir = getConfigString("rips.directory", currentDir); } - File workingDir = new File(currentDir); - if (!workingDir.exists()) { - workingDir.mkdirs(); + Path workingDir = Paths.get(currentDir); + if (!Files.exists(workingDir)) { + try { + Files.createDirectory(workingDir); + } catch (IOException e) { + LOGGER.error("WorkingDir " + workingDir + " not exists, and could not be created. Set to user.home, continue."); + workingDir = Paths.get(System.getProperty("user.home")); + } } return workingDir; } @@ -239,13 +238,13 @@ private static String getMacOSConfigDir() { + File.separator + "ripme"; } - private static File getJarDirectory() { - File jarDirectory = Utils.class.getResource("/rip.properties").toString().contains("jar:") - ? new File(System.getProperty("java.class.path")).getParentFile() - : new File(System.getProperty("user.dir")); + private static Path getJarDirectory() { + Path jarDirectory = Objects.requireNonNull(Utils.class.getResource("/rip.properties")).toString().contains("jar:") + ? Paths.get(System.getProperty("java.class.path")).getParent() + : Paths.get(System.getProperty("user.dir")); if (jarDirectory == null) - jarDirectory = new File("."); + jarDirectory = Paths.get("."); return jarDirectory; } @@ -254,16 +253,8 @@ private static File getJarDirectory() { * Determines if the app is running in a portable mode. i.e. on a USB stick */ private static boolean portableMode() { - try { - File file = new File(getJarDirectory().getCanonicalPath() + File.separator + CONFIG_FILE); - if (file.exists() && !file.isDirectory()) { - return true; - } - } catch (IOException e) { - return false; - } - - return false; + Path file = getJarDirectory().resolve(CONFIG_FILE); + return Files.exists(file) && !Files.isDirectory(file); } /** @@ -272,7 +263,7 @@ private static boolean portableMode() { public static String getConfigDir() { if (portableMode()) { try { - return getJarDirectory().getCanonicalPath(); + return getJarDirectory().toAbsolutePath().toString(); } catch (Exception e) { return "."; } @@ -286,7 +277,7 @@ public static String getConfigDir() { return getUnixConfigDir(); try { - return getJarDirectory().getCanonicalPath(); + return getJarDirectory().toAbsolutePath().toString(); } catch (Exception e) { return "."; } @@ -296,8 +287,12 @@ public static String getConfigDir() { * Delete the url history file */ public static void clearURLHistory() { - File file = new File(getURLHistoryFile()); - file.delete(); + Path file = Paths.get(getURLHistoryFile()); + try { + Files.delete(file); + } catch (IOException e) { + e.printStackTrace(); + } } /** @@ -324,16 +319,13 @@ private static String getConfigFilePath() { * @param saveAs The File path * @return saveAs in relation to the CWD */ - public static String removeCWD(File saveAs) { - String prettySaveAs = saveAs.toString(); + public static String removeCWD(Path saveAs) { try { - prettySaveAs = saveAs.getCanonicalPath(); - String cwd = new File(".").getCanonicalPath() + File.separator; - prettySaveAs = prettySaveAs.replace(cwd, "." + File.separator); - } catch (Exception e) { - LOGGER.error("Exception: ", e); + return Paths.get(".").toAbsolutePath().relativize(saveAs).toString(); + } + catch (IllegalArgumentException e) { + return saveAs.toString(); } - return prettySaveAs; } /** @@ -359,7 +351,7 @@ public static String stripURLParameter(String url, String parameter) { if (wasFirstParam) { c = "?"; } - url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1, url.length()); + url = url.substring(0, paramIndex) + c + url.substring(nextParam + 1); } else { url = url.substring(0, paramIndex); } @@ -368,16 +360,6 @@ public static String stripURLParameter(String url, String parameter) { return url; } - /** - * Removes the current working directory from a given filename - * - * @param file Path to the file - * @return 'file' without the leading current working directory - */ - public static String removeCWD(String file) { - return removeCWD(new File(file)); - } - /** * Get a list of all Classes within a package. Works with file system projects * and jar files! Borrowed from StackOverflow, but I don't have a link :[ @@ -410,6 +392,7 @@ public static List> getClassesForPackage(String pkgname) { if (directory != null && directory.exists()) { // Get the list of the files contained in the package String[] files = directory.list(); + assert files != null; for (String file : files) { if (file.endsWith(".class") && !file.contains("$")) { String className = pkgname + '.' + file.substring(0, file.length() - 6); @@ -424,7 +407,7 @@ public static List> getClassesForPackage(String pkgname) { // Load from JAR try { String jarPath = fullPath.replaceFirst("[.]jar[!].*", ".jar").replaceFirst("file:", ""); - jarPath = URLDecoder.decode(jarPath, "UTF-8"); + jarPath = URLDecoder.decode(jarPath, StandardCharsets.UTF_8); JarFile jarFile = new JarFile(jarPath); Enumeration entries = jarFile.entries(); while (entries.hasMoreElements()) { @@ -458,21 +441,23 @@ public static List> getClassesForPackage(String pkgname) { * @return The simplified path to the file. */ public static String shortenPath(String path) { - return shortenPath(new File(path)); + return shortenPath(path); } /** * Shortens the path to a file * - * @param file File object that you want the shortened path of. + * @param path File object that you want the shortened path of. * @return The simplified path to the file. */ - public static String shortenPath(File file) { - String path = removeCWD(file); - if (path.length() < SHORTENED_PATH_LENGTH * 2) { - return path; + public static String shortenPath(Path path) { + Path prettyPath = path.normalize(); + if (prettyPath.toString().length() < SHORTENED_PATH_LENGTH * 2) { + return prettyPath.toString(); } - return path.substring(0, SHORTENED_PATH_LENGTH) + "..." + path.substring(path.length() - SHORTENED_PATH_LENGTH); + return prettyPath.toString().substring(0, SHORTENED_PATH_LENGTH) + + "..." + + prettyPath.toString().substring(prettyPath.toString().length() - SHORTENED_PATH_LENGTH); } /** @@ -486,8 +471,15 @@ public static String filesystemSanitized(String text) { return text; } + /** + * Removes any potentially unsafe characters from a string and truncates it on a maximum length of 100 characters. + * Characters considered safe are alpha numerical characters as well as minus, dot, comma, underscore and whitespace. + * + * @param text The potentially unsafe text + * @return a filesystem safe string + */ public static String filesystemSafe(String text) { - text = text.replaceAll("[^a-zA-Z0-9.-]", "_").replaceAll("__", "_").replaceAll("_+$", ""); + text = text.replaceAll("[^a-zA-Z0-9-.,_ ]", "").trim(); if (text.length() > 100) { text = text.substring(0, 99); } @@ -500,7 +492,7 @@ public static String filesystemSafe(String text) { * @param path - original path entered to be ripped * @return path of existing folder or the original path if not present */ - public static String getOriginalDirectory(String path) { + public static String getOriginalDirectory(String path) throws IOException { int index; if (isUnix() || isMacOS()) { @@ -510,13 +502,15 @@ public static String getOriginalDirectory(String path) { return path; } - String original = path; // needs to be checked if lowercase exists - String lastPart = original.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists + String lastPart = path.substring(index + 1).toLowerCase(); // setting lowercase to check if it exists // Get a List of all Directories and check its lowercase // if file exists return it File file = new File(path.substring(0, index)); - ArrayList names = new ArrayList<>(Arrays.asList(file.list())); + if (!(file.isDirectory() && file.canWrite() && file.canExecute())) { + throw new IOException("Original directory \"" + file + "\" is no directory or not writeable."); + } + ArrayList names = new ArrayList<>(Arrays.asList(Objects.requireNonNull(file.list()))); for (String name : names) { if (name.toLowerCase().equals(lastPart)) { @@ -525,7 +519,8 @@ public static String getOriginalDirectory(String path) { } } - return original; + // otherwise return original path + return path; } /** @@ -536,7 +531,7 @@ public static String getOriginalDirectory(String path) { */ public static String bytesToHumanReadable(int bytes) { float fbytes = (float) bytes; - String[] mags = new String[] { "", "K", "M", "G", "T" }; + String[] mags = new String[]{"", "K", "M", "G", "T"}; int magIndex = 0; while (fbytes >= 1024) { fbytes /= 1024; @@ -598,20 +593,32 @@ public static void playSound(String filename) { * Configures root logger, either for FILE output or just console. */ public static void configureLogger() { - LogManager.shutdown(); - String logFile = getConfigBoolean("log.save", false) ? "log4j.file.properties" : "log4j.properties"; - try (InputStream stream = Utils.class.getClassLoader().getResourceAsStream(logFile)) { - if (stream == null) { - PropertyConfigurator.configure("src/main/resources/" + logFile); - } else { - PropertyConfigurator.configure(stream); + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + + // write to ripme.log file if checked in GUI + boolean logSave = getConfigBoolean("log.save", false); + if (logSave) { + LOGGER.debug("add rolling appender ripmelog"); + TriggeringPolicy tp = SizeBasedTriggeringPolicy.createPolicy("20M"); + DefaultRolloverStrategy rs = DefaultRolloverStrategy.newBuilder().withMax("2").build(); + RollingFileAppender rolling = RollingFileAppender.newBuilder() + .setName("ripmelog") + .withFileName("ripme.log") + .withFilePattern("%d{yyyy-MM-dd HH:mm:ss} %p %m%n") + .withPolicy(tp) + .withStrategy(rs) + .build(); + loggerConfig.addAppender(rolling, null, null); + } else { + LOGGER.debug("remove rolling appender ripmelog"); + if (config.getAppender("ripmelog") != null) { + config.getAppender("ripmelog").stop(); } - - LOGGER.info("Loaded " + logFile); - } catch (IOException e) { - LOGGER.error(e.getMessage(), e); + loggerConfig.removeAppender("ripmelog"); } - + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. } /** @@ -655,18 +662,13 @@ public static Map parseUrlQuery(String query) { String[] parts = query.split("&"); int pos; - try { - for (String part : parts) { - if ((pos = part.indexOf('=')) >= 0) { - res.put(URLDecoder.decode(part.substring(0, pos), "UTF-8"), - URLDecoder.decode(part.substring(pos + 1), "UTF-8")); - } else { - res.put(URLDecoder.decode(part, "UTF-8"), ""); - } + for (String part : parts) { + if ((pos = part.indexOf('=')) >= 0) { + res.put(URLDecoder.decode(part.substring(0, pos), StandardCharsets.UTF_8), + URLDecoder.decode(part.substring(pos + 1), StandardCharsets.UTF_8)); + } else { + res.put(URLDecoder.decode(part, StandardCharsets.UTF_8), ""); } - } catch (UnsupportedEncodingException e) { - // Shouldn't happen since UTF-8 is required to be supported - throw new RuntimeException(e); } return res; @@ -687,20 +689,15 @@ public static String parseUrlQuery(String query, String key) { String[] parts = query.split("&"); int pos; - try { - for (String part : parts) { - if ((pos = part.indexOf('=')) >= 0) { - if (URLDecoder.decode(part.substring(0, pos), "UTF-8").equals(key)) { - return URLDecoder.decode(part.substring(pos + 1), "UTF-8"); - } - - } else if (URLDecoder.decode(part, "UTF-8").equals(key)) { - return ""; + for (String part : parts) { + if ((pos = part.indexOf('=')) >= 0) { + if (URLDecoder.decode(part.substring(0, pos), StandardCharsets.UTF_8).equals(key)) { + return URLDecoder.decode(part.substring(pos + 1), StandardCharsets.UTF_8); } + + } else if (URLDecoder.decode(part, StandardCharsets.UTF_8).equals(key)) { + return ""; } - } catch (UnsupportedEncodingException e) { - // Shouldn't happen since UTF-8 is required to be supported - throw new RuntimeException(e); } return null; @@ -731,20 +728,19 @@ public static Map getCookies(String host) { * of the UI. * * @return Returns the default resource bundle using the language specified in - * the config file. + * the config file. */ public static ResourceBundle getResourceBundle(String langSelect) { if (langSelect == null) { if (!getConfigString("lang", "").equals("")) { - String[] langCode = getConfigString("lang", "").split("_"); LOGGER.info("Setting locale to " + getConfigString("lang", "")); - return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), + return ResourceBundle.getBundle("LabelsBundle", Locale.forLanguageTag(getConfigString("lang", "")), new UTF8Control()); } } else { - String[] langCode = langSelect.split("_"); - LOGGER.info("Setting locale to " + langSelect); - return ResourceBundle.getBundle("LabelsBundle", new Locale(langCode[0], langCode[1]), new UTF8Control()); + String[] langCode = langSelect.split("-"); + LOGGER.info("set locale, langcoe: {}, selected langauge: {}, locale: {}", langCode, langSelect, Locale.forLanguageTag(langSelect)); + return ResourceBundle.getBundle("LabelsBundle", Locale.forLanguageTag(langSelect), new UTF8Control()); } try { LOGGER.info("Setting locale to default"); @@ -757,6 +753,7 @@ public static ResourceBundle getResourceBundle(String langSelect) { public static void setLanguage(String langSelect) { resourceBundle = getResourceBundle(langSelect); + LOGGER.info("Selected resource bundle locale: {}, from {}", resourceBundle.getLocale().toString(), langSelect); } public static String getSelectedLanguage() { @@ -765,13 +762,15 @@ public static String getSelectedLanguage() { // All the langs ripme has been translated into public static String[] getSupportedLanguages() { + final Pattern pattern = Pattern.compile("LabelsBundle_(?[A-Za-z_]+).properties"); + final String DEFAULT_LANG = "en-US"; ArrayList filesList = new ArrayList<>(); try { - URI uri = Utils.class.getResource("/rip.properties").toURI(); + URI uri = Objects.requireNonNull(Utils.class.getResource("/rip.properties")).toURI(); Path myPath; if (uri.getScheme().equals("jar")) { - FileSystem fileSystem = FileSystems.newFileSystem(uri, Collections.emptyMap()); + FileSystem fileSystem = FileSystems.newFileSystem(uri, Collections.emptyMap()); myPath = fileSystem.getPath("/"); } else { myPath = Paths.get(uri).getParent(); @@ -784,19 +783,19 @@ public static String[] getSupportedLanguages() { for (int i = 0; i < filesList.size(); i++) { Matcher matcher = pattern.matcher(filesList.get(i).toString()); if (matcher.find()) - langs[i] = matcher.group("lang"); + langs[i] = matcher.group("lang").replace("_", "-"); } return langs; } catch (Exception e) { e.printStackTrace(); // On error return default language - return new String[] { DEFAULT_LANG }; + return new String[]{DEFAULT_LANG}; } } public static String getLocalizedString(String key) { - LOGGER.debug(String.format("Getting key %s in %s value %s", key, getSelectedLanguage(), + LOGGER.debug(String.format("Key %s in %s is: %s", key, getSelectedLanguage(), resourceBundle.getString(key))); return resourceBundle.getString(key); } @@ -809,11 +808,10 @@ public static String getLocalizedString(String key) { * @param bytesCompleted How many bytes have been downloaded * @param bytesTotal The total size of the file that is being * downloaded - * @return Returns the formatted status text for rippers using the byte progress - * bar + * @return Returns the formatted status text for rippers using the byte progresbar */ public static String getByteStatusText(int completionPercentage, int bytesCompleted, int bytesTotal) { - return String.valueOf(completionPercentage) + "% - " + Utils.bytesToHumanReadable(bytesCompleted) + " / " + return completionPercentage + "% - " + Utils.bytesToHumanReadable(bytesCompleted) + " / " + Utils.bytesToHumanReadable(bytesTotal); } @@ -830,46 +828,29 @@ public static String getEXTFromMagic(byte[] magic) { } private static void initialiseMagicHashMap() { - magicHash.put(ByteBuffer.wrap(new byte[] { -1, -40, -1, -37, 0, 0, 0, 0 }), "jpeg"); - magicHash.put(ByteBuffer.wrap(new byte[] { -119, 80, 78, 71, 13, 0, 0, 0 }), "png"); + magicHash.put(ByteBuffer.wrap(new byte[]{-1, -40, -1, -37, 0, 0, 0, 0}), "jpeg"); + magicHash.put(ByteBuffer.wrap(new byte[]{-119, 80, 78, 71, 13, 0, 0, 0}), "png"); } // Checks if a file exists ignoring it's extension. - // Code from: https://stackoverflow.com/a/17698068 - public static boolean fuzzyExists(File folder, String fileName) { - if (!folder.exists()) { - return false; - } - File[] listOfFiles = folder.listFiles(); - if (listOfFiles == null) { - return false; - } + public static boolean fuzzyExists(Path folder, String filename) { + return Files.exists(folder.resolve(filename)); + } - for (File file : listOfFiles) { - if (file.isFile()) { - String[] filename = file.getName().split("\\.(?=[^\\.]+$)"); // split filename from it's extension - if (filename[0].equalsIgnoreCase(fileName)) { - return true; - } - } - } - return false; + public static Path getPath(String pathToSanitize) { + return Paths.get(sanitizeSaveAs(pathToSanitize)); } public static String sanitizeSaveAs(String fileNameToSan) { - return fileNameToSan.replaceAll("[\\\\/:*?\"<>|]", "_"); + return fileNameToSan.replaceAll("[\\\\:*?\"<>|]", "_"); } - public static File shortenSaveAsWindows(String ripsDirPath, String fileName) throws FileNotFoundException { - // int ripDirLength = ripsDirPath.length(); - // int maxFileNameLength = 260 - ripDirLength; - // LOGGER.info(maxFileNameLength); + public static Path shortenSaveAsWindows(String ripsDirPath, String fileName) throws FileNotFoundException { LOGGER.error("The filename " + fileName + " is to long to be saved on this file system."); LOGGER.info("Shortening filename"); String fullPath = ripsDirPath + File.separator + fileName; // How long the path without the file name is int pathLength = ripsDirPath.length(); - int fileNameLength = fileName.length(); if (pathLength == 260) { // We've reached the max length, there's nothing more we can do throw new FileNotFoundException("File path is too long for this OS"); @@ -879,11 +860,17 @@ public static File shortenSaveAsWindows(String ripsDirPath, String fileName) thr // file extension String fileExt = saveAsSplit[saveAsSplit.length - 1]; // The max limit for paths on Windows is 260 chars - LOGGER.info(fullPath.substring(0, 259 - pathLength - fileExt.length() + 1) + "." + fileExt); fullPath = fullPath.substring(0, 259 - pathLength - fileExt.length() + 1) + "." + fileExt; LOGGER.info(fullPath); LOGGER.info(fullPath.length()); - return new File(fullPath); + return Paths.get(fullPath); } + public static void sleep(long time) { + try { + Thread.sleep(time); + } catch (final InterruptedException e1) { + e1.printStackTrace(); + } + } } diff --git a/src/main/resources/LabelsBundle.properties b/src/main/resources/LabelsBundle.properties index 575f4f8e6..983086c29 100644 --- a/src/main/resources/LabelsBundle.properties +++ b/src/main/resources/LabelsBundle.properties @@ -12,7 +12,8 @@ check.for.updates = Check for updates auto.update = Auto-update? max.download.threads = Maximum download threads: timeout.mill = Timeout (in milliseconds): -retry.download.count = Retry download count +retry.download.count = Retry download count: +retry.sleep.mill = Wait between retries (in milliseconds): overwrite.existing.files = Overwrite existing files? sound.when.rip.completes = Sound when rip completes preserve.order = Preserve order diff --git a/src/main/resources/LabelsBundle_de_DE.properties b/src/main/resources/LabelsBundle_de_DE.properties index 61461abaf..b13a0bb9d 100644 --- a/src/main/resources/LabelsBundle_de_DE.properties +++ b/src/main/resources/LabelsBundle_de_DE.properties @@ -10,9 +10,9 @@ Configuration = Konfiguration current.version = Aktuelle Version check.for.updates = Suche nach Aktualisierungen auto.update = Automatisch Aktualisieren? -max.download.threads = Maximum download threads -timeout.mill = Timeout (in milliseconds): -retry.download.count = Anzahl der Downloadversuche +max.download.threads = Maximale Download-Threads: +timeout.mill = Timeout (in Milliseconds): +retry.download.count = Anzahl der Downloadversuche: overwrite.existing.files = Überschreibe bereits existierende Dateien? sound.when.rip.completes = Ton abspielen bei fertigem Download preserve.order = Reihenfolge beibehalten @@ -24,7 +24,7 @@ autorip.from.clipboard = Automatisch Downloaden von der Zwischenablage save.descriptions = Speichere Beschreibungen prefer.mp4.over.gif = Bevorzuge MP4 über GIF restore.window.position = Wieder herstellen der Fensterposition -remember.url.history = Erinnere URL Verlauf +remember.url.history = Speichere URL Verlauf loading.history.from = Lade Verlauf von # Misc UI keys @@ -32,11 +32,11 @@ loading.history.from = Lade Verlauf von loading.history.from.configuration = Lade Verlauf aus Konfiguration interrupted.while.waiting.to.rip.next.album = Unterbrochen während Download des nächsten Albums inactive = Inaktiv -re-rip.checked = Re-rip Überprüft +re-rip.checked = Re-rip Ausgewählte remove = Entfernen clear = Leeren -download.url.list = Download url list -select.save.dir = Select Save Directory +download.url.list = Download URL Liste +select.save.dir = Wähle Zielverzeichnis # Keys for the logs generated by DownloadFileThread @@ -54,4 +54,4 @@ http.status.exception = HTTP status exception exception.while.downloading.file = Exception while downloading file failed.to.download = Failed to download skipping = Skipping -file.already.exists = file already exists \ No newline at end of file +file.already.exists = file already exists diff --git a/src/main/resources/LabelsBundle_el_GR.properties b/src/main/resources/LabelsBundle_el_GR.properties index 573e76e68..14656e877 100644 --- a/src/main/resources/LabelsBundle_el_GR.properties +++ b/src/main/resources/LabelsBundle_el_GR.properties @@ -12,7 +12,7 @@ check.for.updates = Έλεγχος για ενημερώσεις auto.update = Αυτόματη ενημέρωση? max.download.threads = Μέγιστος αριθμός παράλληλων συνδέσεων: timeout.mill = Λήξη (σε χιλιοστά του δευτερολέπτου): -retry.download.count = Αριθμός επανάληψεων μεταφόρτωσης +retry.download.count = Αριθμός επανάληψεων μεταφόρτωσης: overwrite.existing.files = Να αντικατασταθούν τα υπάρχοντα αρχεία? sound.when.rip.completes = Ήχος όταν το rip ολοκληρωθεί preserve.order = Διατήρηση σειράς diff --git a/src/main/resources/LabelsBundle_es_ES.properties b/src/main/resources/LabelsBundle_es_ES.properties index c178ec79c..fea84e5d5 100644 --- a/src/main/resources/LabelsBundle_es_ES.properties +++ b/src/main/resources/LabelsBundle_es_ES.properties @@ -4,59 +4,73 @@ created = creado modified = modificado queue = Cola Configuration = Configuracion +open = Abrir # Keys for the Configuration menu - current.version = Version Actual check.for.updates = Buscar actualizaciones -auto.update = Auto-actualizar? -max.download.threads = Maximos procesos de descarga -timeout.mill = Timeout (in milliseconds): -retry.download.count = Numero de reintentos de descarga +auto.update = Actualizar automáticamente? +max.download.threads = Número de descargas simultáneas: +timeout.mill = Tiempo máximo de espera (milisegundos): +retry.download.count = Número de reintentos de descarga: +retry.sleep.mill = Espera entre reintentos (milisegundos): overwrite.existing.files = Sobreescribir archivos existentes? -sound.when.rip.completes = Sonar cuando el Rip termina -preserve.order = Mantener orden +sound.when.rip.completes = Notificar cuando el rip termina +preserve.order = Mantener el orden save.logs = Guardar logs -notification.when.rip.starts = Notificar cuando el Rip comienza +notification.when.rip.starts = Notificar cuando el rip comienza save.urls.only = Guardar solamente URLs -save.album.titles = Guardar titulos de albunes -autorip.from.clipboard = Autorip desde Portapapeles +save.album.titles = Guardar títulos de álbumes +autorip.from.clipboard = Autorip desde el portapapeles save.descriptions = Guardar descripciones prefer.mp4.over.gif = Preferir MP4 sobre GIF -restore.window.position = Restaurar posicion de ventana +restore.window.position = Restaurar posicion de la ventana remember.url.history = Recordar historia URL loading.history.from = Cargando historia desde # Queue keys queue.remove.all = Eliminar todos los elementos -queue.validation = ¿Esta seguro que desea eliminar todos los elementos de la lista? +queue.validation = ¿Está seguro que desea eliminar todos los elementos de la lista? queue.remove.selected = Eliminar elementos seleccionados -# Misc UI keys +# History +re-rip.checked = Re-rip Marcados +remove = Remover +clear = Limpiar +history.check.all = Marcar Todos +history.check.none = Desmarcar Todos +history.check.selected = Marcar Seleccionados +history.uncheck.selected = Desmarcar Seleccionados +history.load.failed.warning = RipMe falló al cargar la historia de historyFile.getAbsolutePath() \n\nError: %s\n\nSi cierras RipMe los contenidos de este archivo se sobreescribirán,\nhaz un backup antes de cerrar RipMe! +history.load.none = El historial está vacío. Ripea algunos álbumes primero +history.load.none.checked = Ninguna entrada del historial fue 'marcada'. Selecciona una entrada clickeando la casilla a la derecha de la URL o haz click derecho sobre una URL para marcar/desmarcar todas las entradas. + +# TrayIcon +tray.show = Mostrar +tray.hide = Esconder +tray.autorip = Autorip desde el portapapeles +tray.exit = Salida -loading.history.from.configuration = Cargando historia desde la configuracion -interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el Rip del proximo album +# Misc UI keys +loading.history.from.configuration = Cargando historia desde la configuración +interrupted.while.waiting.to.rip.next.album = Interrumpido esperando el rip del próximo álbum inactive = Inactivo -re-rip.checked = Re-rip marcado -remove = Quitar -clear = Limpiar -download.url.list = Download url list -select.save.dir = Select Save Directory +download.url.list = Lista de URLs a descargar +select.save.dir = Seleccione el directorio de guardado # Keys for the logs generated by DownloadFileThread - -nonretriable.status.code = Non-retriable status code -retriable.status.code = Retriable status code -server.doesnt.support.resuming.downloads = Server doesn't support resuming downloads +nonretriable.status.code = Código de estado no recuperable +retriable.status.code = Código de estado recuperable +server.doesnt.support.resuming.downloads = El servidor no soporta resumir las descargas # A "magic number" can also be called a file signature -was.unable.to.get.content.type.using.magic.number = Was unable to get content type using magic number -magic.number.was = Magic number was -deleting.existing.file = Deleting existing file -request.properties = Request properties -download.interrupted = Download interrupted -exceeded.maximum.retries = Exceeded maximum retries -http.status.exception = HTTP status exception -exception.while.downloading.file = Exception while downloading file -failed.to.download = Failed to download -skipping = Skipping -file.already.exists = file already exists \ No newline at end of file +was.unable.to.get.content.type.using.magic.number = Imposible obtener el tipo de contenido utilizando el número mágico +magic.number.was = El número mágico era +deleting.existing.file = Eliminando el archivo existente +request.properties = Propiedades del pedido +download.interrupted = Descarga interrumpida +exceeded.maximum.retries = Máximo número de reintentos excedido +http.status.exception = Error de estado HTTP +exception.while.downloading.file = Error al descargar archivo +failed.to.download = Descarga fallida +skipping = Saltando +file.already.exists = el fichero ya existe \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_fi_FI.properties b/src/main/resources/LabelsBundle_fi_FI.properties index 6edd4e45f..c823cf05d 100644 --- a/src/main/resources/LabelsBundle_fi_FI.properties +++ b/src/main/resources/LabelsBundle_fi_FI.properties @@ -10,9 +10,9 @@ Configuration = Asetukset current.version = Nykyinen versio check.for.updates = Tarkista päivitykset auto.update = Automaattipäivitys? -max.download.threads = Yhtäaikaiset lataukset +max.download.threads = Yhtäaikaiset lataukset: timeout.mill = Aikakatkaisu (millisekunneissa): -retry.download.count = Latauksen uudelleenyritykset +retry.download.count = Latauksen uudelleenyritykset: overwrite.existing.files = Korvaa nykyiset tiedostot? sound.when.rip.completes = Valmistumisääni preserve.order = Pidä järjestys diff --git a/src/main/resources/LabelsBundle_porrisavvo_FI.properties b/src/main/resources/LabelsBundle_fi_FI_porrisavo.properties similarity index 95% rename from src/main/resources/LabelsBundle_porrisavvo_FI.properties rename to src/main/resources/LabelsBundle_fi_FI_porrisavo.properties index a2ba056e4..653709ab8 100644 --- a/src/main/resources/LabelsBundle_porrisavvo_FI.properties +++ b/src/main/resources/LabelsBundle_fi_FI_porrisavo.properties @@ -10,9 +10,9 @@ Configuration = Assetuksse current.version = Nykyne versijjo check.for.updates = Tarkist update auto.update = Automaatpäivvitys? -max.download.threads = Yht'aikasse ripi +max.download.threads = Yht'aikasse ripi: timeout.mill = Timeout (millisekois): -retry.download.count = Ripi retry count +retry.download.count = Ripi retry count: overwrite.existing.files = Korvvaa nykysse filu? sound.when.rip.completes = Valmistummis'ään preserve.order = Pir järestys diff --git a/src/main/resources/LabelsBundle_fr_CH.properties b/src/main/resources/LabelsBundle_fr_CH.properties index b489e3e3a..1b035dac5 100644 --- a/src/main/resources/LabelsBundle_fr_CH.properties +++ b/src/main/resources/LabelsBundle_fr_CH.properties @@ -10,9 +10,9 @@ Configuration = Configuration current.version = Version actuelle check.for.updates = Vérifier mises à jour auto.update = Mises à jour automatiques? -max.download.threads = Nombre de téléchargements parallèles maximum +max.download.threads = Nombre de téléchargements parallèles maximum: timeout.mill = Délai d'expiration (en millisecondes): -retry.download.count = Nombre d'essais téléchargement +retry.download.count = Nombre d'essais téléchargement: overwrite.existing.files = Remplacer fichiers existants ? sound.when.rip.completes = Son lorsque le rip est terminé preserve.order = Conserver l'ordre diff --git a/src/main/resources/LabelsBundle_in_ID.properties b/src/main/resources/LabelsBundle_in_ID.properties index b5e773d5b..778e72abf 100644 --- a/src/main/resources/LabelsBundle_in_ID.properties +++ b/src/main/resources/LabelsBundle_in_ID.properties @@ -10,9 +10,9 @@ Configuration = Pengaturan current.version = Versi saat ini check.for.updates = Periksa update auto.update = Update otomatis? -max.download.threads = Thread unduh maksimal +max.download.threads = Thread unduh maksimal: timeout.mill = Batas waktu (dalam milidetik): -retry.download.count = Jumlah percobaan unduh +retry.download.count = Jumlah percobaan unduh: overwrite.existing.files = Timpa file yang ada? sound.when.rip.completes = Hidupkan suara saat rip selesai preserve.order = Pertahankan urutan diff --git a/src/main/resources/LabelsBundle_it_IT.properties b/src/main/resources/LabelsBundle_it_IT.properties index de00612b7..192d777a2 100644 --- a/src/main/resources/LabelsBundle_it_IT.properties +++ b/src/main/resources/LabelsBundle_it_IT.properties @@ -12,7 +12,7 @@ check.for.updates = Controlla aggiornamenti auto.update = Aggiornamento automatico? max.download.threads = Thread di download massimi: timeout.mill = Timeout (in millisecondi): -retry.download.count = Tentativi di download +retry.download.count = Tentativi di download: overwrite.existing.files = Sovrascrivi file esistenti? sound.when.rip.completes = Suono al completamento del rip preserve.order = Preserva ordine diff --git a/src/main/resources/LabelsBundle_kr_KR.properties b/src/main/resources/LabelsBundle_kr_KR.properties index e01100552..984da15b4 100644 --- a/src/main/resources/LabelsBundle_kr_KR.properties +++ b/src/main/resources/LabelsBundle_kr_KR.properties @@ -10,9 +10,9 @@ Configuration = \uAD6C\uC131 current.version = \uD604\uC7AC \uBC84\uC804 check.for.updates = \uC5C5\uB370\uC774\uD2B8 \uD655\uC778 auto.update = \uC790\uB3D9 \uC5C5\uB370\uC774\uD2B8 -max.download.threads = \uCD5C\uB300 \uB2E4\uC6B4\uB85C\uB4DC \uC4F0\uB808\uB4DC \uC218 +max.download.threads = \uCD5C\uB300 \uB2E4\uC6B4\uB85C\uB4DC \uC4F0\uB808\uB4DC \uC218: timeout.mill = \uC2DC\uAC04 \uC81C\uD55C (\uBC00\uB9AC\uCD08): -retry.download.count = \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uC2DC\uB3C4 \uD68C\uC218 +retry.download.count = \uB2E4\uC6B4\uB85C\uB4DC \uC7AC\uC2DC\uB3C4 \uD68C\uC218: overwrite.existing.files = \uC911\uBCF5\uD30C\uC77C \uB36E\uC5B4\uC4F0\uAE30 sound.when.rip.completes = \uC644\uB8CC\uC2DC \uC54C\uB9BC preserve.order = \uBA85\uB839 \uAE30\uC5B5\uD558\uAE30 diff --git a/src/main/resources/LabelsBundle_nl_NL.properties b/src/main/resources/LabelsBundle_nl_NL.properties index e1d9d61c3..6cec1f73f 100644 --- a/src/main/resources/LabelsBundle_nl_NL.properties +++ b/src/main/resources/LabelsBundle_nl_NL.properties @@ -10,9 +10,9 @@ Configuration = Configuratie current.version = Huidige versie check.for.updates = Controleer op updates auto.update = Auto-update? -max.download.threads = Maximale downloadthreads +max.download.threads = Maximale downloadthreads: timeout.mill = Timeout (in milliseconden): -retry.download.count = Aantal keren opnieuw proberen te downloaden +retry.download.count = Aantal keren opnieuw proberen te downloaden: overwrite.existing.files = Bestaande bestanden overschrijven? sound.when.rip.completes = Geluid wanneer rip klaar is preserve.order = Volgorde behouden diff --git a/src/main/resources/LabelsBundle_pl_PL.properties b/src/main/resources/LabelsBundle_pl_PL.properties index 4ba4590ea..a3bbbb380 100644 --- a/src/main/resources/LabelsBundle_pl_PL.properties +++ b/src/main/resources/LabelsBundle_pl_PL.properties @@ -1,59 +1,59 @@ -Log = Logi -History = Historia -created = Stworzono -modified = Zmodyfikowano -queue = Kolejka -Configuration = Konfiguracja - -# Keys for the Configuration menu - -current.version = Obecna Wersja -check.for.updates = Sprawdź dostępność aktualizacji -auto.update = Auto Aktualizacja? -max.download.threads = Maksymalna Ilośc Pobieranych Plików: -timeout.mill = Opóźnienie (w milisekundach): -retry.download.count = Liczba ponownych pobrań -overwrite.existing.files = Nadpisać istniejące pliki? -sound.when.rip.completes = Dźwięk po zakończeniu -preserve.order = Zachować porządek -save.logs = Zapisz Logi -notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania -save.urls.only = Zapisz tylko linki -save.album.titles = Zapisz nazwy albumów -autorip.from.clipboard = Auto pobieranie ze schowka -save.descriptions = Zapisz opis -prefer.mp4.over.gif = Preferuj MP4 od GIF -restore.window.position = Przywróć pozycję okna -remember.url.history = Zapamiętaj historię linków -loading.history.from = Załaduj historię z... - -# Misc UI keys - -loading.history.from.configuration = Załaduj historię z ustawień -interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu -inactive = Nieaktywny -re-rip.checked = Sprawdź pobrane ripy -remove = Usuń -clear = Wyczyść -download.url.list = Pobierz listę linków -select.save.dir = Wybierz ścieżkę zapisu - -# Keys for the logs generated by DownloadFileThread - -nonretriable.status.code = Nieodwracalny kod statusu -retriable.status.code = Odzyskiwanie kodu statusu -server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania - -# A "magic number" can also be called a file signature - -was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby -magic.number.was = Magiczną liczbą była -deleting.existing.file = Usuwanie istniejących plików -request.properties = Poproś o uprawnienia -download.interrupted = Pobieranie przerwane -exceeded.maximum.retries = Spodziewana ilośc powtórzeń -http.status.exception = Wyjątek statusu http -exception.while.downloading.file = Wystąpił problem podczas pobierania pliku -failed.to.download = Nie można pobrać pliku -skipping = Pomijanie +Log = Logi +History = Historia +created = Stworzono +modified = Zmodyfikowano +queue = Kolejka +Configuration = Konfiguracja + +# Keys for the Configuration menu + +current.version = Obecna Wersja +check.for.updates = Sprawdź dostępność aktualizacji +auto.update = Auto Aktualizacja? +max.download.threads = Maksymalna Ilośc Pobieranych Plików: +timeout.mill = Opóźnienie (w milisekundach): +retry.download.count = Liczba ponownych pobrań: +overwrite.existing.files = Nadpisać istniejące pliki? +sound.when.rip.completes = Dźwięk po zakończeniu +preserve.order = Zachować porządek +save.logs = Zapisz Logi +notification.when.rip.starts = Powiadomienie przy uruchomieniu pobierania +save.urls.only = Zapisz tylko linki +save.album.titles = Zapisz nazwy albumów +autorip.from.clipboard = Auto pobieranie ze schowka +save.descriptions = Zapisz opis +prefer.mp4.over.gif = Preferuj MP4 od GIF +restore.window.position = Przywróć pozycję okna +remember.url.history = Zapamiętaj historię linków +loading.history.from = Załaduj historię z... + +# Misc UI keys + +loading.history.from.configuration = Załaduj historię z ustawień +interrupted.while.waiting.to.rip.next.album = Przerwany podczas oczekiwania na zgrywanie następnego albumu +inactive = Nieaktywny +re-rip.checked = Sprawdź pobrane ripy +remove = Usuń +clear = Wyczyść +download.url.list = Pobierz listę linków +select.save.dir = Wybierz ścieżkę zapisu + +# Keys for the logs generated by DownloadFileThread + +nonretriable.status.code = Nieodwracalny kod statusu +retriable.status.code = Odzyskiwanie kodu statusu +server.doesnt.support.resuming.downloads = Serwer nie obsługuje wznowienia pobierania + +# A "magic number" can also be called a file signature + +was.unable.to.get.content.type.using.magic.number = Nie udało się uzyskać typu zawartości za pomocą magicznej liczby +magic.number.was = Magiczną liczbą była +deleting.existing.file = Usuwanie istniejących plików +request.properties = Poproś o uprawnienia +download.interrupted = Pobieranie przerwane +exceeded.maximum.retries = Spodziewana ilośc powtórzeń +http.status.exception = Wyjątek statusu http +exception.while.downloading.file = Wystąpił problem podczas pobierania pliku +failed.to.download = Nie można pobrać pliku +skipping = Pomijanie file.already.exists = Plik już istnieje \ No newline at end of file diff --git a/src/main/resources/LabelsBundle_pt_BR.properties b/src/main/resources/LabelsBundle_pt_BR.properties index 882092353..91c262b7f 100644 --- a/src/main/resources/LabelsBundle_pt_BR.properties +++ b/src/main/resources/LabelsBundle_pt_BR.properties @@ -12,7 +12,7 @@ check.for.updates = Verificar atualizações auto.update = Atualização automática? max.download.threads = Número máximo de conexões: timeout.mill = Tempo limite (em milissegundos): -retry.download.count = Número de tentativas +retry.download.count = Número de tentativas: overwrite.existing.files = Sobrescrever arquivos existentes? sound.when.rip.completes = Som quando terminar o rip preserve.order = Preservar ordem diff --git a/src/main/resources/LabelsBundle_pt_PT.properties b/src/main/resources/LabelsBundle_pt_PT.properties index a0058524e..500049ce9 100644 --- a/src/main/resources/LabelsBundle_pt_PT.properties +++ b/src/main/resources/LabelsBundle_pt_PT.properties @@ -10,9 +10,9 @@ open = Abrir current.version = Versão atual check.for.updates = Verificar atualizações auto.update = Atualização automática? -max.download.threads = Número máximo de processos de transferência +max.download.threads = Número máximo de processos de transferência: timeout.mill = Tempo de espera (em milissegundos): -retry.download.count = Número de novas tentativas de transferência +retry.download.count = Número de novas tentativas de transferência: overwrite.existing.files = Sobrescrever ficheiros existentes? sound.when.rip.completes = Notificar quando o rip é concluído preserve.order = Manter a ordem diff --git a/src/main/resources/LabelsBundle_ru_RU.properties b/src/main/resources/LabelsBundle_ru_RU.properties index a3100df85..f354d15a7 100644 --- a/src/main/resources/LabelsBundle_ru_RU.properties +++ b/src/main/resources/LabelsBundle_ru_RU.properties @@ -12,7 +12,7 @@ check.for.updates = Проверить обновления auto.update = Автообновление max.download.threads = Максимальное число потоков: timeout.mill = Задержка (в миллисекундах): -retry.download.count = Число повторов +retry.download.count = Число повторов: overwrite.existing.files = Перезаписать существующие файлы? sound.when.rip.completes = Звук при завершении preserve.order = Сохранять порядок diff --git a/src/LabelsBundle_zh_CN.properties b/src/main/resources/LabelsBundle_zh_CN.properties similarity index 96% rename from src/LabelsBundle_zh_CN.properties rename to src/main/resources/LabelsBundle_zh_CN.properties index 994efcf8b..7cf6d7810 100644 --- a/src/LabelsBundle_zh_CN.properties +++ b/src/main/resources/LabelsBundle_zh_CN.properties @@ -1,75 +1,75 @@ -Log = 日志 -History = 历史 -created = 创建时间 -modified = 修改时间 -queue = 队列 -Configuration = 配置 -open = 打开 - -# Keys for the Configuration menu -current.version = 当前版本 -check.for.updates = 检查更新 -auto.update = 自动更新? -max.download.threads = 最大下载线程数: -timeout.mill = 超时(毫秒): -retry.download.count = 重试下载次数 -overwrite.existing.files = 覆盖现有文件? -sound.when.rip.completes = 抓取完成时播放声音 -preserve.order = 保持顺序 -save.logs = 保存日志 -notification.when.rip.starts = 通知抓取开始 -save.urls.only = 仅保存 URL -save.album.titles = 保存专辑标题 -autorip.from.clipboard = 监视剪贴板上的 URL -save.descriptions = 保存描述 -prefer.mp4.over.gif = 首选 MP4 而非 GIF -restore.window.position = 恢复窗口位置 -remember.url.history = 记住 URL 历史 -loading.history.from = 加载历史从 - -# Queue keys -queue.remove.all = 移除全部 -queue.validation = 您确定要移除队列内的全部项目? -queue.remove.selected = 移除所选项目 - -# History -re-rip.checked = 重新抓取选中的项目 -remove = 移除 -clear = 清除 -history.check.all = 选中全部 -history.check.none = 取消选中全部 -history.check.selected = 选中所选项目 -history.uncheck.selected = 取消选中所选项目 -history.load.failed.warning = RipMe 加载位于 historyFile.getAbsolutePath() 的历史文件失败\n\n错误:%s\n\n关闭 RipMe 会自动覆盖此文件的内容,\n请在关闭 RipMe 前备份它! -history.load.none = 无可重新抓取的历史条目。请先抓取一些专辑 -history.load.none.checked = 未 '选中' 任何历史条目,请通过选中所需 URL 前面的复选框或URL 的右键菜单以选中所需条目 - -# TrayIcon -tray.show = 显示 -tray.hide = 隐藏 -tray.autorip = 监视剪贴板上的 URL -tray.exit = 退出 - -# Misc UI keys -loading.history.from.configuration = 从配置加载历史 -interrupted.while.waiting.to.rip.next.album = 等候抓取下一专辑期间发生中断 -inactive = 非活动 -download.url.list = 下载 URL 列表 -select.save.dir = 选择保存目录 - -# Keys for the logs generated by DownloadFileThread -nonretriable.status.code = 非可重试状态代码 -retriable.status.code = 可重试状态代码 -server.doesnt.support.resuming.downloads = 服务器不支持继续下载(续传) -# A "magic number" can also be called a file signature -was.unable.to.get.content.type.using.magic.number = 不能使用幻数获取内容类型 -magic.number.was = 幻数为 -deleting.existing.file = 删除现有文件 -request.properties = 请求属性 -download.interrupted = 下载中断 -exceeded.maximum.retries = 超过最大重试次数 -http.status.exception = HTTP 状态意外 -exception.while.downloading.file = 下载文件时发生意外 -failed.to.download = 下载失败 -skipping = 跳过 +Log = 日志 +History = 历史 +created = 创建时间 +modified = 修改时间 +queue = 队列 +Configuration = 配置 +open = 打开 + +# Keys for the Configuration menu +current.version = 当前版本 +check.for.updates = 检查更新 +auto.update = 自动更新? +max.download.threads = 最大下载线程数: +timeout.mill = 超时(毫秒): +retry.download.count = 重试下载次数: +overwrite.existing.files = 覆盖现有文件? +sound.when.rip.completes = 抓取完成时播放声音 +preserve.order = 保持顺序 +save.logs = 保存日志 +notification.when.rip.starts = 通知抓取开始 +save.urls.only = 仅保存 URL +save.album.titles = 保存专辑标题 +autorip.from.clipboard = 监视剪贴板上的 URL +save.descriptions = 保存描述 +prefer.mp4.over.gif = 首选 MP4 而非 GIF +restore.window.position = 恢复窗口位置 +remember.url.history = 记住 URL 历史 +loading.history.from = 加载历史从 + +# Queue keys +queue.remove.all = 移除全部 +queue.validation = 您确定要移除队列内的全部项目? +queue.remove.selected = 移除所选项目 + +# History +re-rip.checked = 重新抓取选中的项目 +remove = 移除 +clear = 清除 +history.check.all = 选中全部 +history.check.none = 取消选中全部 +history.check.selected = 选中所选项目 +history.uncheck.selected = 取消选中所选项目 +history.load.failed.warning = RipMe 加载位于 historyFile.getAbsolutePath() 的历史文件失败\n\n错误:%s\n\n关闭 RipMe 会自动覆盖此文件的内容,\n请在关闭 RipMe 前备份它! +history.load.none = 无可重新抓取的历史条目。请先抓取一些专辑 +history.load.none.checked = 未 '选中' 任何历史条目,请通过选中所需 URL 前面的复选框或URL 的右键菜单以选中所需条目 + +# TrayIcon +tray.show = 显示 +tray.hide = 隐藏 +tray.autorip = 监视剪贴板上的 URL +tray.exit = 退出 + +# Misc UI keys +loading.history.from.configuration = 从配置加载历史 +interrupted.while.waiting.to.rip.next.album = 等候抓取下一专辑期间发生中断 +inactive = 非活动 +download.url.list = 下载 URL 列表 +select.save.dir = 选择保存目录 + +# Keys for the logs generated by DownloadFileThread +nonretriable.status.code = 非可重试状态代码 +retriable.status.code = 可重试状态代码 +server.doesnt.support.resuming.downloads = 服务器不支持继续下载(续传) +# A "magic number" can also be called a file signature +was.unable.to.get.content.type.using.magic.number = 不能使用幻数获取内容类型 +magic.number.was = 幻数为 +deleting.existing.file = 删除现有文件 +request.properties = 请求属性 +download.interrupted = 下载中断 +exceeded.maximum.retries = 超过最大重试次数 +http.status.exception = HTTP 状态意外 +exception.while.downloading.file = 下载文件时发生意外 +failed.to.download = 下载失败 +skipping = 跳过 file.already.exists = 文件已存在 \ No newline at end of file diff --git a/src/main/resources/log4j.properties b/src/main/resources/log4j.properties deleted file mode 100644 index 409dd3035..000000000 --- a/src/main/resources/log4j.properties +++ /dev/null @@ -1,10 +0,0 @@ - -# define the console appender -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.Threshold = info -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern = %m%n - -# now map our console appender as a root logger, means all log messages will go to this appender -log4j.rootLogger = debug, stdout \ No newline at end of file diff --git a/src/main/resources/log4j2-example.xml b/src/main/resources/log4j2-example.xml new file mode 100644 index 000000000..dbc0888cd --- /dev/null +++ b/src/main/resources/log4j2-example.xml @@ -0,0 +1,30 @@ + + + + # Console appender + + # Pattern of log message for console appender + + + + # Rolling appender + + + %d{yyyy-MM-dd HH:mm:ss} %p %m%n + + + + + + + + + + + + + + + diff --git a/src/main/resources/rip.properties b/src/main/resources/rip.properties index cac0c1f15..484cacac4 100644 --- a/src/main/resources/rip.properties +++ b/src/main/resources/rip.properties @@ -6,7 +6,7 @@ threads.size = 5 file.overwrite = false # Number of retries on failed downloads -download.retries = 1 +download.retries = 3 # File download timeout (in milliseconds) download.timeout = 60000 @@ -17,6 +17,9 @@ page.timeout = 5000 # Maximum size of downloaded files in bytes (required) download.max_size = 104857600 +# Any URLs ending with one of these comma-separated values will be skipped +#download.ignore_extensions = mp4,gif,m4v,webm,html + # Don't retry on 404 errors error.skip404 = true diff --git a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java index a388151c1..7eb3df432 100644 --- a/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/AbstractRipperTest.java @@ -4,7 +4,8 @@ import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -12,20 +13,20 @@ public class AbstractRipperTest { @Test - public void testGetFileName() throws IOException { - String fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", "test"); + public void testGetFileName() throws IOException, URISyntaxException { + String fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(),null, "test", "test"); assertEquals("test.test", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), "test", null); + fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(), null,"test", null); assertEquals("test", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D"), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.tsumino.com/Image/Object?name=U1EieteEGwm6N1dGszqCpA%3D%3D").toURL(), null,null, null); assertEquals("Object", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file.png"), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.test.com/file.png").toURL(), null,null, null); assertEquals("file.png", fileName); - fileName = AbstractRipper.getFileName(new URL("http://www.test.com/file."), null, null); + fileName = AbstractRipper.getFileName(new URI("http://www.test.com/file.").toURL(), null,null, null); assertEquals("file.", fileName); } diff --git a/src/test/java/com/rarchives/ripme/tst/UtilsTest.java b/src/test/java/com/rarchives/ripme/tst/UtilsTest.java index d87eca556..c43fa76af 100644 --- a/src/test/java/com/rarchives/ripme/tst/UtilsTest.java +++ b/src/test/java/com/rarchives/ripme/tst/UtilsTest.java @@ -1,16 +1,27 @@ package com.rarchives.ripme.tst; -import java.io.File; import java.io.FileNotFoundException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import com.rarchives.ripme.utils.Utils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; public class UtilsTest { + private final Logger LOGGER = LogManager.getLogger(UtilsTest.class); + + @Test + public void testConfigureLogger() { + Utils.configureLogger(); + LOGGER.warn("this is a warning messaage."); + } + public void testGetEXTFromMagic() { Assertions.assertEquals("jpeg", Utils.getEXTFromMagic(new byte[] { -1, -40, -1, -37, 0, 0, 0, 0 })); @@ -50,8 +61,8 @@ public void testBetween() { public void testShortenFileNameWindows() throws FileNotFoundException { String filename = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff.png"; // Test filename shortening for windows - File f = Utils.shortenSaveAsWindows("D:/rips/test/reddit/deep", filename); - Assertions.assertEquals(new File( + Path f = Utils.shortenSaveAsWindows("D:/rips/test/reddit/deep", filename); + Assertions.assertEquals(Paths.get( "D:/rips/test/reddit/deep/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff.png"), f); } diff --git a/src/test/java/com/rarchives/ripme/tst/proxyTest.java b/src/test/java/com/rarchives/ripme/tst/proxyTest.java index 721408535..0576b8e20 100644 --- a/src/test/java/com/rarchives/ripme/tst/proxyTest.java +++ b/src/test/java/com/rarchives/ripme/tst/proxyTest.java @@ -1,55 +1,57 @@ -package com.rarchives.ripme.tst; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.utils.Proxy; -import com.rarchives.ripme.utils.Utils; -import com.rarchives.ripme.utils.Http; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertFalse; - -public class proxyTest { - - - // This test will only run on machines where the user has added a entry for proxy.socks - @Test - public void testSocksProxy() throws IOException { - // Unset proxy before testing - System.setProperty("http.proxyHost", ""); - System.setProperty("https.proxyHost", ""); - System.setProperty("socksProxyHost", ""); - URL url = new URL("https://icanhazip.com"); - String proxyConfig = Utils.getConfigString("proxy.socks", ""); - if (!proxyConfig.equals("")) { - String ip1 = Http.url(url).ignoreContentType().get().text(); - Proxy.setSocks(Utils.getConfigString("proxy.socks", "")); - String ip2 = Http.url(url).ignoreContentType().get().text(); - assertFalse(ip1.equals(ip2)); - } else { - System.out.println("Skipping testSocksProxy"); - assert(true); - } - } - - // This test will only run on machines where the user has added a entry for proxy.http - @Test - public void testHTTPProxy() throws IOException { - // Unset proxy before testing - System.setProperty("http.proxyHost", ""); - System.setProperty("https.proxyHost", ""); - System.setProperty("socksProxyHost", ""); - URL url = new URL("https://icanhazip.com"); - String proxyConfig = Utils.getConfigString("proxy.http", ""); - if (!proxyConfig.equals("")) { - String ip1 = Http.url(url).ignoreContentType().get().text(); - Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", "")); - String ip2 = Http.url(url).ignoreContentType().get().text(); - assertFalse(ip1.equals(ip2)); - } else { - System.out.println("Skipping testHTTPProxy"); - assert(true); - } - } - -} +package com.rarchives.ripme.tst; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.utils.Proxy; +import com.rarchives.ripme.utils.Utils; +import com.rarchives.ripme.utils.Http; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; + +public class proxyTest { + + + // This test will only run on machines where the user has added a entry for proxy.socks + @Test + public void testSocksProxy() throws IOException, URISyntaxException { + // Unset proxy before testing + System.setProperty("http.proxyHost", ""); + System.setProperty("https.proxyHost", ""); + System.setProperty("socksProxyHost", ""); + URL url = new URI("https://icanhazip.com").toURL(); + String proxyConfig = Utils.getConfigString("proxy.socks", ""); + if (!proxyConfig.equals("")) { + String ip1 = Http.url(url).ignoreContentType().get().text(); + Proxy.setSocks(Utils.getConfigString("proxy.socks", "")); + String ip2 = Http.url(url).ignoreContentType().get().text(); + assertFalse(ip1.equals(ip2)); + } else { + System.out.println("Skipping testSocksProxy"); + assert(true); + } + } + + // This test will only run on machines where the user has added a entry for proxy.http + @Test + public void testHTTPProxy() throws IOException, URISyntaxException { + // Unset proxy before testing + System.setProperty("http.proxyHost", ""); + System.setProperty("https.proxyHost", ""); + System.setProperty("socksProxyHost", ""); + URL url = new URI("https://icanhazip.com").toURL(); + String proxyConfig = Utils.getConfigString("proxy.http", ""); + if (!proxyConfig.equals("")) { + String ip1 = Http.url(url).ignoreContentType().get().text(); + Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", "")); + String ip2 = Http.url(url).ignoreContentType().get().text(); + assertFalse(ip1.equals(ip2)); + } else { + System.out.println("Skipping testHTTPProxy"); + assert(true); + } + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java index c4c2a7a85..4c0bd8330 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AerisdiesRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.AerisdiesRipper; @@ -12,27 +14,29 @@ public class AerisdiesRipperTest extends RippersTest { @Test @Tag("flaky") - public void testAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/alb_1097_1.html")); + public void testAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/alb_1097_1.html").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void testSubAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/alb_3692_1.html")); + public void testSubAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/alb_3692_1.html").toURL()); testRipper(ripper); } @Test - public void testDjAlbum() throws IOException { - AerisdiesRipper ripper = new AerisdiesRipper(new URL("http://www.aerisdies.com/html/lb/douj_5230_1.html")); + @Tag("flaky") + public void testDjAlbum() throws IOException, URISyntaxException { + AerisdiesRipper ripper = new AerisdiesRipper(new URI("http://www.aerisdies.com/html/lb/douj_5230_1.html").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.aerisdies.com/html/lb/douj_5230_1.html"); + @Tag("flaky") + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.aerisdies.com/html/lb/douj_5230_1.html").toURL(); AerisdiesRipper ripper = new AerisdiesRipper(url); Assertions.assertEquals("5230", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java index f84660722..20b79c0be 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/AllporncomicRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.AllporncomicRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class AllporncomicRipperTest extends RippersTest { @Test - public void testAlbum() throws IOException { - AllporncomicRipper ripper = new AllporncomicRipper(new URL("https://allporncomic.com/porncomic/dnd-pvp-dungeons-dragons-fred-perry/1-dnd-pvp")); + @Tag("flaky") + public void testAlbum() throws IOException, URISyntaxException { + AllporncomicRipper ripper = new AllporncomicRipper(new URI("https://allporncomic.com/porncomic/dnd-pvp-dungeons-dragons-fred-perry/1-dnd-pvp").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java index 693ce6191..63b9d69b8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtAlleyRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ArtAlleyRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; public class ArtAlleyRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - ArtAlleyRipper ripper = new ArtAlleyRipper(new URL("https://artalley.social/@curator/media")); + @Disabled("website switched off") + public void testRip() throws IOException, URISyntaxException { + ArtAlleyRipper ripper = new ArtAlleyRipper(new URI("https://artalley.social/@curator/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java index e29a32eda..6450cad1c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtStationRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -14,11 +16,11 @@ public class ArtStationRipperTest extends RippersTest { @Test @Tag("flaky") - public void testArtStationProjects() throws IOException { + public void testArtStationProjects() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.artstation.com/artwork/the-dwarf-mortar")); - contentURLs.add(new URL("https://www.artstation.com/artwork/K36GR")); - contentURLs.add(new URL("http://artstation.com/artwork/5JJQw")); + contentURLs.add(new URI("https://www.artstation.com/artwork/the-dwarf-mortar").toURL()); + contentURLs.add(new URI("https://www.artstation.com/artwork/K36GR").toURL()); + contentURLs.add(new URI("http://artstation.com/artwork/5JJQw").toURL()); for (URL url : contentURLs) { ArtStationRipper ripper = new ArtStationRipper(url); testRipper(ripper); @@ -27,11 +29,11 @@ public void testArtStationProjects() throws IOException { @Test @Tag("flaky") - public void testArtStationUserProfiles() throws IOException { + public void testArtStationUserProfiles() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.artstation.com/heitoramatsu")); - contentURLs.add(new URL("https://artstation.com/kuvshinov_ilya")); - contentURLs.add(new URL("http://artstation.com/givemeapiggy")); + contentURLs.add(new URI("https://www.artstation.com/heitoramatsu").toURL()); + contentURLs.add(new URI("https://artstation.com/kuvshinov_ilya").toURL()); + contentURLs.add(new URI("http://artstation.com/givemeapiggy").toURL()); for (URL url : contentURLs) { ArtStationRipper ripper = new ArtStationRipper(url); testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java index 7ce919dab..ee8621c29 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ArtstnRipperTest.java @@ -1,24 +1,28 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.ArtstnRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class ArtstnRipperTest extends RippersTest { - @Test - public void testSingleProject() throws IOException { - URL url = new URL("https://artstn.co/p/JlE15Z"); - testRipper(new ArtstnRipper(url)); - } - - @Test - @Disabled("Failed with cloudflare protection") - public void testUserPortfolio() throws IOException { - URL url = new URL("https://artstn.co/m/rv37"); - testRipper(new ArtstnRipper(url)); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +import com.rarchives.ripme.ripper.rippers.ArtstnRipper; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +public class ArtstnRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testSingleProject() throws IOException, URISyntaxException { + URL url = new URI("https://artstn.co/p/JlE15Z").toURL(); + testRipper(new ArtstnRipper(url)); + } + + @Test + @Disabled("Failed with cloudflare protection") + public void testUserPortfolio() throws IOException, URISyntaxException { + URL url = new URI("https://artstn.co/m/rv37").toURL(); + testRipper(new ArtstnRipper(url)); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java index 57105a9af..7b987b74f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BaraagRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.BaraagRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class BaraagRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - BaraagRipper ripper = new BaraagRipper(new URL("https://baraag.net/@darkshadow777/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + BaraagRipper ripper = new BaraagRipper(new URI("https://baraag.net/@darkshadow777/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java index 3ceb2ac79..6849f0e17 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BatoRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.BatoRipper; @@ -13,22 +15,22 @@ public class BatoRipperTest extends RippersTest { @Test @Disabled("cloudlare? gets unavailable in test but works in browser") - public void testRip() throws IOException { - BatoRipper ripper = new BatoRipper(new URL("https://bato.to/chapter/1207152")); + public void testRip() throws IOException, URISyntaxException { + BatoRipper ripper = new BatoRipper(new URI("https://bato.to/chapter/1207152").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://bato.to/chapter/1207152"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://bato.to/chapter/1207152").toURL(); BatoRipper ripper = new BatoRipper(url); Assertions.assertEquals("1207152", ripper.getGID(url)); } @Test @Disabled("cloudlare? gets unavailable in test but works in browser") - public void testGetAlbumTitle() throws IOException { - URL url = new URL("https://bato.to/chapter/1207152"); + public void testGetAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://bato.to/chapter/1207152").toURL(); BatoRipper ripper = new BatoRipper(url); Assertions.assertEquals("bato_1207152_I_Messed_Up_by_Teaching_at_a_Black_Gyaru_School!_Ch.2", ripper.getAlbumTitle(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java index 8c31ffd4a..3140c0564 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BcfakesRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.BcfakesRipper; @@ -11,8 +12,8 @@ public class BcfakesRipperTest extends RippersTest { @Test @Disabled("21/06/2018 This test was disbaled as the site has experienced notable downtime") - public void testRip() throws IOException { - BcfakesRipper ripper = new BcfakesRipper(new URL("http://www.bcfakes.com/celebritylist/olivia-wilde/")); + public void testRip() throws IOException, URISyntaxException { + BcfakesRipper ripper = new BcfakesRipper(new URI("http://www.bcfakes.com/celebritylist/olivia-wilde/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java index 0bf11d58d..c28cc52db 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BlackbrickroadofozRipperTest.java @@ -6,14 +6,15 @@ import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class BlackbrickroadofozRipperTest extends RippersTest { @Test @Disabled("Commented out on 02/04/2019 because the serve has been down for a while") - public void testRip() throws IOException { + public void testRip() throws IOException, URISyntaxException { BlackbrickroadofozRipper ripper = new BlackbrickroadofozRipper( - new URL("http://www.blackbrickroadofoz.com/comic/beginning")); + new URI("http://www.blackbrickroadofoz.com/comic/beginning").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java index f7918aad9..89efef4e9 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/BooruRipperTest.java @@ -1,20 +1,24 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; import com.rarchives.ripme.ripper.rippers.BooruRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class BooruRipperTest extends RippersTest { @Test - public void testRip() throws IOException { + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { List passURLs = new ArrayList<>(); - passURLs.add(new URL("https://xbooru.com/index.php?page=post&s=list&tags=furry")); - passURLs.add(new URL("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears")); + passURLs.add(new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL()); + passURLs.add(new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL()); for (URL url : passURLs) { BooruRipper ripper = new BooruRipper(url); @@ -23,9 +27,9 @@ public void testRip() throws IOException { } @Test - public void testGetGID() throws IOException { - URL xbooruUrl = new URL("https://xbooru.com/index.php?page=post&s=list&tags=furry"); - URL gelbooruUrl = new URL("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears"); + public void testGetGID() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); @@ -35,9 +39,9 @@ public void testGetGID() throws IOException { } @Test - public void testGetDomain() throws IOException { - URL xbooruUrl = new URL("https://xbooru.com/index.php?page=post&s=list&tags=furry"); - URL gelbooruUrl = new URL("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears"); + public void testGetDomain() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); @@ -47,9 +51,9 @@ public void testGetDomain() throws IOException { } @Test - public void testGetHost() throws IOException { - URL xbooruUrl = new URL("https://xbooru.com/index.php?page=post&s=list&tags=furry"); - URL gelbooruUrl = new URL("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears"); + public void testGetHost() throws IOException, URISyntaxException { + URL xbooruUrl = new URI("https://xbooru.com/index.php?page=post&s=list&tags=furry").toURL(); + URL gelbooruUrl = new URI("https://gelbooru.com/index.php?page=post&s=list&tags=animal_ears").toURL(); BooruRipper xbooruRipper = new BooruRipper(xbooruUrl); BooruRipper gelbooruRipper = new BooruRipper(gelbooruUrl); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java index 3e6dad946..95f7ec2ee 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CfakeRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.CfakeRipper; public class CfakeRipperTest extends RippersTest { - public void testRip() throws IOException { - CfakeRipper ripper = new CfakeRipper(new URL("http://cfake.com/picture/Zooey_Deschanel/1264")); + public void testRip() throws IOException, URISyntaxException { + CfakeRipper ripper = new CfakeRipper(new URI("http://cfake.com/picture/Zooey_Deschanel/1264").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java index e7b285fcb..ed023d474 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ChanRipperTest.java @@ -1,44 +1,67 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import com.rarchives.ripme.ripper.rippers.ChanRipper; import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite; import com.rarchives.ripme.utils.Http; import org.jsoup.nodes.Document; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ChanRipperTest extends RippersTest { @Test - public void testChanURLPasses() throws IOException { + @Tag("flaky") + public void testChanURLPasses() throws IOException, URISyntaxException { List passURLs = new ArrayList<>(); // URLs that should work - passURLs.add(new URL("http://desuchan.net/v/res/7034.html")); - passURLs.add(new URL("https://boards.4chan.org/hr/thread/3015701")); - passURLs.add(new URL("https://boards.420chan.org/420/res/232066.php")); - passURLs.add(new URL("http://7chan.org/gif/res/25873.html")); - passURLs.add(new URL("https://rbt.asia/g/thread/70643087/")); //must work with TLDs with len of 4 + passURLs.add(new URI("http://desuchan.net/v/res/7034.html").toURL()); + passURLs.add(new URI("https://boards.4chan.org/hr/thread/3015701").toURL()); + passURLs.add(new URI("https://boards.420chan.org/420/res/232066.php").toURL()); + passURLs.add(new URI("http://7chan.org/gif/res/25873.html").toURL()); + passURLs.add(new URI("https://rbt.asia/g/thread/70643087/").toURL()); //must work with TLDs with len of 4 for (URL url : passURLs) { ChanRipper ripper = new ChanRipper(url); - ripper.setup(); + // Use CompletableFuture to run setup() asynchronously + CompletableFuture setupFuture = CompletableFuture.runAsync(() -> { + try { + ripper.setup(); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); + } + }); + + try { + // Wait for up to 5 seconds for setup() to complete + setupFuture.get(5, TimeUnit.SECONDS); + } catch (InterruptedException | ExecutionException | + TimeoutException e) { + e.printStackTrace(); // Handle exceptions as needed + } assert (ripper.canRip(url)); Assertions.assertNotNull(ripper.getWorkingDir(), "Ripper for " + url + " did not have a valid working directory."); deleteDir(ripper.getWorkingDir()); } } @Test - public void testChanStringParsing() throws IOException { + public void testChanStringParsing() throws IOException, URISyntaxException { List site1 = Arrays.asList("site1.com"); List site1Cdns = Arrays.asList("cnd1.site1.com", "cdn2.site2.biz"); List site2 = Arrays.asList("site2.co.uk"); List site2Cdns = Arrays.asList("cdn.site2.co.uk"); - ChanRipper ripper = new ChanRipper(new URL("http://desuchan.net/v/res/7034.html")); + ChanRipper ripper = new ChanRipper(new URI("http://desuchan.net/v/res/7034.html").toURL()); List chansFromConfig = ripper .getChansFromConfig("site1.com[cnd1.site1.com|cdn2.site2.biz],site2.co.uk[cdn.site2.co.uk]"); Assertions.assertEquals(chansFromConfig.get(0).getDomains(), site1); @@ -48,23 +71,19 @@ public void testChanStringParsing() throws IOException { Assertions.assertEquals(chansFromConfig.get(1).getCdns(), site2Cdns); } @Test - public void testChanRipper() throws IOException { + public void testChanRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL(getRandomThreadDesuarchive())); + contentURLs.add(getRandomThreadDesuarchive()); for (URL url : contentURLs) { ChanRipper ripper = new ChanRipper(url); testChanRipper(ripper); } } - /** - * - * @return String returns a url to a active desuarchive.org tread as a string - */ - public String getRandomThreadDesuarchive() { + public URL getRandomThreadDesuarchive() throws URISyntaxException { try { - Document doc = Http.url(new URL("https://desuarchive.org/wsg/")).get(); - return doc.select("div.post_data > a").first().attr("href"); + Document doc = Http.url(new URI("https://desuarchive.org/wsg/").toURL()).get(); + return new URI(doc.select("div.post_data > a").first().attr("href")).toURL(); } catch (IOException e) { e.printStackTrace(); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java index 420fcb001..6d893527d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CheveretoRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.CheveretoRipper; import org.junit.jupiter.api.Tag; @@ -9,14 +10,15 @@ public class CheveretoRipperTest extends RippersTest { @Test - public void testTagFox() throws IOException { - CheveretoRipper ripper = new CheveretoRipper(new URL("http://tag-fox.com/album/Thjb")); + @Tag("flaky") + public void testTagFox() throws IOException, URISyntaxException { + CheveretoRipper ripper = new CheveretoRipper(new URI("http://tag-fox.com/album/Thjb").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void testSubdirAlbum() throws IOException { - CheveretoRipper ripper = new CheveretoRipper(new URL("https://kenzato.uk/booru/album/TnEc")); + public void testSubdirAlbum() throws IOException, URISyntaxException { + CheveretoRipper ripper = new CheveretoRipper(new URI("https://kenzato.uk/booru/album/TnEc").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java index 0769e2951..e01ae6e00 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ComicextraRipperTest.java @@ -1,24 +1,28 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.ComicextraRipper; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class ComicextraRipperTest extends RippersTest { - @Test - public void testComicUrl() throws IOException { - URL url = new URL("https://www.comicextra.com/comic/karma-police"); - ComicextraRipper ripper = new ComicextraRipper(url); - testRipper(ripper); - } - @Test - @Disabled("no images found error, broken ripper?") - public void testChapterUrl() throws IOException { - URL url = new URL("https://www.comicextra.com/v-for-vendetta/chapter-1"); - ComicextraRipper ripper = new ComicextraRipper(url); - testRipper(ripper); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.ripper.rippers.ComicextraRipper; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +public class ComicextraRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testComicUrl() throws IOException, URISyntaxException { + URL url = new URI("https://www.comicextra.com/comic/karma-police").toURL(); + ComicextraRipper ripper = new ComicextraRipper(url); + testRipper(ripper); + } + @Test + @Disabled("no images found error, broken ripper?") + public void testChapterUrl() throws IOException, URISyntaxException { + URL url = new URI("https://www.comicextra.com/v-for-vendetta/chapter-1").toURL(); + ComicextraRipper ripper = new ComicextraRipper(url); + testRipper(ripper); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java new file mode 100644 index 000000000..c35822a81 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CoomerPartyRipperTest.java @@ -0,0 +1,40 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.rarchives.ripme.ripper.rippers.CoomerPartyRipper; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class CoomerPartyRipperTest extends RippersTest { + @Test + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://coomer.su/onlyfans/user/soogsx").toURL(); + CoomerPartyRipper ripper = new CoomerPartyRipper(url); + testRipper(ripper); + } + + @Test + public void testUrlParsing() throws IOException, URISyntaxException { + String expectedGid = "onlyfans_soogsx"; + String[] urls = new String[]{ + "https://coomer.su/onlyfans/user/soogsx", // normal url + "http://coomer.su/onlyfans/user/soogsx", // http, not https + "https://coomer.su/onlyfans/user/soogsx/", // with slash at the end + "https://coomer.su/onlyfans/user/soogsx?whatever=abc", // with url params + "https://coomer.party/onlyfans/user/soogsx", // alternate domain + }; + for (String stringUrl : urls) { + URL url = new URI(stringUrl).toURL(); + CoomerPartyRipper ripper = new CoomerPartyRipper(url); + assertTrue(ripper.canRip(url)); + assertEquals(expectedGid, ripper.getGID(url)); + } + } +} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java index 4d0776285..14fcef072 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/CyberdropRipperTest.java @@ -1,51 +1,55 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.CyberdropRipper; -import com.rarchives.ripme.utils.Http; -import org.jsoup.nodes.Document; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class CyberdropRipperTest extends RippersTest { - @Test - public void testScrolllerGID() throws IOException { - Map testURLs = new HashMap<>(); - - testURLs.put(new URL("https://cyberdrop.me/a/n4umdBjw"), "n4umdBjw"); - testURLs.put(new URL("https://cyberdrop.me/a/iLtp4BjW"), "iLtp4BjW"); - for (URL url : testURLs.keySet()) { - CyberdropRipper ripper = new CyberdropRipper(url); - ripper.setup(); - Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); - deleteDir(ripper.getWorkingDir()); - } - } - - @Test - public void testCyberdropNumberOfFiles() throws IOException { - List testURLs = new ArrayList(); - - testURLs.add(new URL("https://cyberdrop.me/a/n4umdBjw")); - testURLs.add(new URL("https://cyberdrop.me/a/iLtp4BjW")); - for (URL url : testURLs) { - Assertions.assertTrue(willDownloadAllFiles(url)); - } - } - - public boolean willDownloadAllFiles(URL url) throws IOException { - Document doc = Http.url(url).get(); - long numberOfLinks = doc.getElementsByClass("image").stream().count(); - int numberOfFiles = Integer.parseInt(doc.getElementById("totalFilesAmount").text()); - return numberOfLinks == numberOfFiles; - } - - - +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.CyberdropRipper; +import com.rarchives.ripme.utils.Http; +import org.jsoup.nodes.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class CyberdropRipperTest extends RippersTest { + @Test + public void testScrolllerGID() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://cyberdrop.me/a/n4umdBjw").toURL(), "n4umdBjw"); + testURLs.put(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL(), "iLtp4BjW"); + for (URL url : testURLs.keySet()) { + CyberdropRipper ripper = new CyberdropRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); + deleteDir(ripper.getWorkingDir()); + } + } + + @Test + @Tag("flaky") + public void testCyberdropNumberOfFiles() throws IOException, URISyntaxException { + List testURLs = new ArrayList(); + + testURLs.add(new URI("https://cyberdrop.me/a/n4umdBjw").toURL()); + testURLs.add(new URI("https://cyberdrop.me/a/iLtp4BjW").toURL()); + for (URL url : testURLs) { + Assertions.assertTrue(willDownloadAllFiles(url)); + } + } + + public boolean willDownloadAllFiles(URL url) throws IOException { + Document doc = Http.url(url).get(); + long numberOfLinks = doc.getElementsByClass("image").stream().count(); + int numberOfFiles = Integer.parseInt(doc.getElementById("totalFilesAmount").text()); + return numberOfLinks == numberOfFiles; + } + + + } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java index 575864a58..dd6e71639 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DanbooruRipperTest.java @@ -2,19 +2,23 @@ import com.rarchives.ripme.ripper.rippers.DanbooruRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; public class DanbooruRipperTest extends RippersTest { @Test - public void testRip() throws IOException { + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { List passURLs = new ArrayList<>(); - passURLs.add(new URL("https://danbooru.donmai.us/posts?tags=brown_necktie")); - passURLs.add(new URL("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest")); + passURLs.add(new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL()); + passURLs.add(new URI("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest").toURL()); for (URL url : passURLs) { DanbooruRipper danbooruRipper = new DanbooruRipper(url); @@ -23,9 +27,9 @@ public void testRip() throws IOException { } @Test - public void testGetGID() throws IOException { - URL danBooruUrl = new URL("https://danbooru.donmai.us/posts?tags=brown_necktie"); - URL danBooruUrl2 = new URL("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest"); + public void testGetGID() throws IOException, URISyntaxException { + URL danBooruUrl = new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL(); + URL danBooruUrl2 = new URI("https://danbooru.donmai.us/posts?page=1&tags=pink_sweater_vest").toURL(); DanbooruRipper danbooruRipper = new DanbooruRipper(danBooruUrl); DanbooruRipper danbooruRipper2 = new DanbooruRipper(danBooruUrl2); @@ -35,8 +39,8 @@ public void testGetGID() throws IOException { } @Test - public void testGetHost() throws IOException { - URL danBooruUrl = new URL("https://danbooru.donmai.us/posts?tags=brown_necktie"); + public void testGetHost() throws IOException, URISyntaxException { + URL danBooruUrl = new URI("https://danbooru.donmai.us/posts?tags=brown_necktie").toURL(); DanbooruRipper danbooruRipper = new DanbooruRipper(danBooruUrl); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java index 33003e5de..23dbe6790 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DeviantartRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -15,30 +17,31 @@ public class DeviantartRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testDeviantartAlbum() throws IOException { - DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/airgee/gallery/")); + public void testDeviantartAlbum() throws IOException, URISyntaxException { + DeviantartRipper ripper = new DeviantartRipper(new URI("https://www.deviantart.com/airgee/gallery/").toURL()); testRipper(ripper); } @Test @Disabled("Broken ripper") - public void testDeviantartNSFWAlbum() throws IOException { + public void testDeviantartNSFWAlbum() throws IOException, URISyntaxException { // NSFW gallery - DeviantartRipper ripper = new DeviantartRipper(new URL("https://www.deviantart.com/faterkcx/gallery/")); + DeviantartRipper ripper = new DeviantartRipper(new URI("https://www.deviantart.com/faterkcx/gallery/").toURL()); testRipper(ripper); } @Test @Disabled("Broken ripper") - public void testGetGID() throws IOException { - URL url = new URL("https://www.deviantart.com/airgee/gallery/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.deviantart.com/airgee/gallery/").toURL(); DeviantartRipper ripper = new DeviantartRipper(url); Assertions.assertEquals("airgee", ripper.getGID(url)); } @Test - public void testGetGalleryIDAndUsername() throws IOException { - URL url = new URL("https://www.deviantart.com/airgee/gallery/"); + @Disabled("Broken ripper") + public void testGetGalleryIDAndUsername() throws IOException, URISyntaxException { + URL url = new URI("https://www.deviantart.com/airgee/gallery/").toURL(); DeviantartRipper ripper = new DeviantartRipper(url); Document doc = Http.url(url).get(); // Had to comment because of refactoring/style change @@ -48,11 +51,11 @@ public void testGetGalleryIDAndUsername() throws IOException { @Test @Disabled("Broken ripper") - public void testSanitizeURL() throws IOException { + public void testSanitizeURL() throws IOException, URISyntaxException { List urls = new ArrayList(); - urls.add(new URL("https://www.deviantart.com/airgee/")); - urls.add(new URL("https://www.deviantart.com/airgee")); - urls.add(new URL("https://www.deviantart.com/airgee/gallery/")); + urls.add(new URI("https://www.deviantart.com/airgee/").toURL()); + urls.add(new URI("https://www.deviantart.com/airgee").toURL()); + urls.add(new URI("https://www.deviantart.com/airgee/gallery/").toURL()); for (URL url : urls) { DeviantartRipper ripper = new DeviantartRipper(url); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java index bd4321fcc..4a6bf37ba 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DribbbleRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.DribbbleRipper; import org.junit.jupiter.api.Disabled; @@ -10,8 +11,8 @@ public class DribbbleRipperTest extends RippersTest { @Test @Disabled("test or ripper broken") - public void testDribbbleRip() throws IOException { - DribbbleRipper ripper = new DribbbleRipper(new URL("https://dribbble.com/typogriff")); + public void testDribbbleRip() throws IOException, URISyntaxException { + DribbbleRipper ripper = new DribbbleRipper(new URI("https://dribbble.com/typogriff").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java deleted file mode 100644 index e4b17cb15..000000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DuckmoviesRipperTest.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.DuckmoviesRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - -public class DuckmoviesRipperTest extends RippersTest { - @Test - @Disabled("Broken ripper") - public void testRip() throws IOException { - DuckmoviesRipper ripper = new DuckmoviesRipper( - new URL("https://palapaja.com/spyfam-stepbro-gives-in-to-stepsis-asian-persuasion/")); - testRipper(ripper); - } - -} \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java index 4c8d6416e..a2855e981 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/DynastyscansRipperTest.java @@ -1,22 +1,25 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.DynastyscansRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class DynastyscansRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + DynastyscansRipper ripper = new DynastyscansRipper(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - DynastyscansRipper ripper = new DynastyscansRipper(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01")); - Assertions.assertEquals("under_one_roof_ch01", ripper.getGID(new URL("https://dynasty-scans.com/chapters/under_one_roof_ch01"))); + public void testGetGID() throws IOException, URISyntaxException { + DynastyscansRipper ripper = new DynastyscansRipper(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL()); + Assertions.assertEquals("under_one_roof_ch01", ripper.getGID(new URI("https://dynasty-scans.com/chapters/under_one_roof_ch01").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java index 632494233..4859ade0a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/E621RipperTest.java @@ -1,25 +1,29 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.E621Ripper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class E621RipperTest extends RippersTest { - public void testRip() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/posts?tags=beach")); + public void testRip() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/posts?tags=beach").toURL()); testRipper(ripper); } @Test - public void testFlashOrWebm() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/posts?page=4&tags=gif+rating%3As+3d")); + @Tag("flaky") + public void testFlashOrWebm() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/posts?page=4&tags=gif+rating%3As+3d").toURL()); testRipper(ripper); } @Test - public void testGetNextPage() throws IOException { - E621Ripper nextPageRipper = new E621Ripper(new URL("https://e621.net/posts?tags=cosmicminerals")); + @Tag("flaky") + public void testGetNextPage() throws IOException, URISyntaxException { + E621Ripper nextPageRipper = new E621Ripper(new URI("https://e621.net/posts?tags=cosmicminerals").toURL()); try { nextPageRipper.getNextPage(nextPageRipper.getFirstPage()); assert (true); @@ -27,7 +31,7 @@ public void testGetNextPage() throws IOException { throw e; } - E621Ripper noNextPageRipper = new E621Ripper(new URL("https://e621.net/post/index/1/cosmicminerals")); + E621Ripper noNextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); try { noNextPageRipper.getNextPage(noNextPageRipper.getFirstPage()); } catch (IOException e) { @@ -35,18 +39,21 @@ public void testGetNextPage() throws IOException { } } @Test - public void testOldRip() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/post/index/1/beach")); + @Tag("flaky") + public void testOldRip() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/post/index/1/beach").toURL()); testRipper(ripper); } @Test - public void testOldFlashOrWebm() throws IOException { - E621Ripper ripper = new E621Ripper(new URL("https://e621.net/post/index/1/gif")); + @Tag("flaky") + public void testOldFlashOrWebm() throws IOException, URISyntaxException { + E621Ripper ripper = new E621Ripper(new URI("https://e621.net/post/index/1/gif").toURL()); testRipper(ripper); } @Test - public void testOldGetNextPage() throws IOException { - E621Ripper nextPageRipper = new E621Ripper(new URL("https://e621.net/post/index/1/cosmicminerals")); + @Tag("flaky") + public void testOldGetNextPage() throws IOException, URISyntaxException { + E621Ripper nextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); try { nextPageRipper.getNextPage(nextPageRipper.getFirstPage()); assert (true); @@ -54,7 +61,7 @@ public void testOldGetNextPage() throws IOException { throw e; } - E621Ripper noNextPageRipper = new E621Ripper(new URL("https://e621.net/post/index/1/cosmicminerals")); + E621Ripper noNextPageRipper = new E621Ripper(new URI("https://e621.net/post/index/1/cosmicminerals").toURL()); try { noNextPageRipper.getNextPage(noNextPageRipper.getFirstPage()); } catch (IOException e) { diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java index 021e892fc..31fee74ed 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EhentaiRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; @@ -11,15 +13,15 @@ public class EhentaiRipperTest extends RippersTest { @Test - public void testEHentaiAlbum() throws IOException { - EHentaiRipper ripper = new EHentaiRipper(new URL("https://e-hentai.org/g/1144492/e823bdf9a5/")); + public void testEHentaiAlbum() throws IOException, URISyntaxException { + EHentaiRipper ripper = new EHentaiRipper(new URI("https://e-hentai.org/g/1144492/e823bdf9a5/").toURL()); testRipper(ripper); } // Test the tag black listing @Test - public void testTagBlackList() throws IOException { - URL url = new URL("https://e-hentai.org/g/1228503/1a2f455f96/"); + public void testTagBlackList() throws IOException, URISyntaxException { + URL url = new URI("https://e-hentai.org/g/1228503/1a2f455f96/").toURL(); EHentaiRipper ripper = new EHentaiRipper(url); List tagsOnPage = ripper.getTags(ripper.getFirstPage()); // Test multiple blacklisted tags diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java index e3e9bcb7a..2a016b66a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EightmusesRipperTest.java @@ -1,33 +1,36 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.EightmusesRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class EightmusesRipperTest extends RippersTest { @Test - public void testEightmusesAlbum() throws IOException { + @Tag("flaky") + public void testEightmusesAlbum() throws IOException, URISyntaxException { // A simple image album - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); testRipper(ripper); // Test the new url format - ripper = new EightmusesRipper(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); + ripper = new EightmusesRipper(new URI("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); testRipper(ripper); // Test pages with subalbums - ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor")); + ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Blacknwhitecomics_com-Comix/BlacknWhiteComics/The-Mayor").toURL()); testRipper(ripper); } @Test - public void testGID() throws IOException { - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); - Assertions.assertEquals("Affect3D-Comics", ripper.getGID(new URL("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore"))); + public void testGID() throws IOException, URISyntaxException { + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); + Assertions.assertEquals("Affect3D-Comics", ripper.getGID(new URI("https://www.8muses.com/comics/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL())); } @Test - public void testGetSubdir() throws IOException { - EightmusesRipper ripper = new EightmusesRipper(new URL("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore")); + public void testGetSubdir() throws IOException, URISyntaxException { + EightmusesRipper ripper = new EightmusesRipper(new URI("https://www.8muses.com/comix/album/Affect3D-Comics/TheDude3DX/Lust-Unleashed-The-Urge-To-Explore").toURL()); Assertions.assertEquals("After-Party-Issue-1", ripper.getSubdir("After Party - Issue 1")); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java index 780460ceb..98d6be8f7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EroShareRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.EroShareRipper; import com.rarchives.ripme.ripper.rippers.RedditRipper; @@ -14,46 +15,46 @@ public class EroShareRipperTest extends RippersTest { // single image posts @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshareFromRedditRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL( - "https://www.reddit.com/r/BestOfEroshare/comments/5z7foo/good_morning_who_likes_abstract_asian_artwork_f/")); + public void testImageEroshareFromRedditRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI( + "https://www.reddit.com/r/BestOfEroshare/comments/5z7foo/good_morning_who_likes_abstract_asian_artwork_f/").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshareRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshare.com/i/5j2qln3f")); + public void testImageEroshareRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshare.com/i/5j2qln3f").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testImageEroshaeRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshae.com/i/5j2qln3f")); + public void testImageEroshaeRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshae.com/i/5j2qln3f").toURL()); testRipper(ripper); } // video album post @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumFromRedditRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL( - "https://www.reddit.com/r/BestOfEroshare/comments/5vyfnw/asian_mf_heard_i_should_post_here_date_night_her/")); + public void testVideoAlbumFromRedditRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI( + "https://www.reddit.com/r/BestOfEroshare/comments/5vyfnw/asian_mf_heard_i_should_post_here_date_night_her/").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumEroshareRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshare.com/wqnl6f00")); + public void testVideoAlbumEroshareRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshare.com/wqnl6f00").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/306 : EroShareRipper broken (even for eroshae links)") - public void testVideoAlbumEroshaeRip() throws IOException { - EroShareRipper ripper = new EroShareRipper(new URL("https://eroshae.com/wqnl6f00")); + public void testVideoAlbumEroshaeRip() throws IOException, URISyntaxException { + EroShareRipper ripper = new EroShareRipper(new URI("https://eroshae.com/wqnl6f00").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java index 6acd6e08f..a06f0e70e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErofusRipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ErofusRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ErofusRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - ErofusRipper ripper = new ErofusRipper(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1")); + @Tag("flaky") // if url does not exist, erofusripper test ends in out of memory + public void testRip() throws IOException, URISyntaxException { + ErofusRipper ripper = new ErofusRipper(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - ErofusRipper ripper = new ErofusRipper(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1")); - Assertions.assertEquals("be-story-club-comics", ripper.getGID(new URL("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1"))); + @Tag("flaky") + public void testGetGID() throws IOException, URISyntaxException { + ErofusRipper ripper = new ErofusRipper(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL()); + Assertions.assertEquals("be-story-club-comics", ripper.getGID(new URI("https://www.erofus.com/comics/be-story-club-comics/a-kiss/issue-1").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java index b18762e95..18ddf4bbd 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/EromeRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.MalformedURLException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.EromeRipper; @@ -10,39 +11,39 @@ public class EromeRipperTest extends RippersTest { @Test - public void testGetGIDProfilePage() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testGetGIDProfilePage() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); Assertions.assertEquals("Jay-Jenna", ripper.getGID(url)); } @Test - public void testGetGIDAlbum() throws IOException { - URL url = new URL("https://www.erome.com/a/KbDAM1XT"); + public void testGetGIDAlbum() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/KbDAM1XT").toURL(); EromeRipper ripper = new EromeRipper(url); Assertions.assertEquals("KbDAM1XT", ripper.getGID(url)); } @Test - public void testGetAlbumsToQueue() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testGetAlbumsToQueue() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); assert (2 >= ripper.getAlbumsToQueue(ripper.getFirstPage()).size()); } @Test - public void testPageContainsAlbums() throws IOException { - URL url = new URL("https://www.erome.com/Jay-Jenna"); + public void testPageContainsAlbums() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/Jay-Jenna").toURL(); EromeRipper ripper = new EromeRipper(url); assert (ripper.pageContainsAlbums(url)); - assert (!ripper.pageContainsAlbums(new URL("https://www.erome.com/a/KbDAM1XT"))); + assert (!ripper.pageContainsAlbums(new URI("https://www.erome.com/a/KbDAM1XT").toURL())); } - public void testRip() throws IOException { - URL url = new URL("https://www.erome.com/a/vlefBdsg"); + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/vlefBdsg").toURL(); EromeRipper ripper = new EromeRipper(url); testRipper(ripper); } @Test - public void testGetURLsFromPage() throws IOException { - URL url = new URL("https://www.erome.com/a/Tak8F2h6"); + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL url = new URI("https://www.erome.com/a/Tak8F2h6").toURL(); EromeRipper ripper = new EromeRipper(url); assert (35 == ripper.getURLsFromPage(ripper.getFirstPage()).size()); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java index b4afdd674..11f2b59f7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ErotivRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ErotivRipper; @@ -9,22 +11,22 @@ public class ErotivRipperTest extends RippersTest { @Test - public void testGetGID() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); assert("1568314255".equals(ripper.getGID(url))); } - public void testRip() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + public void testRip() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); testRipper(ripper); } @Test @Disabled("test or ripper broken") - public void testGetURLsFromPage() throws IOException { - URL url = new URL("https://erotiv.io/e/1568314255"); + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL url = new URI("https://erotiv.io/e/1568314255").toURL(); ErotivRipper ripper = new ErotivRipper(url); assert(1 == ripper.getURLsFromPage(ripper.getFirstPage()).size()); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java index 5520441a0..3f295a2c7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FemjoyhunterRipperTest.java @@ -1,17 +1,20 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FemjoyhunterRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class FemjoyhunterRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - FemjoyhunterRipper ripper = new FemjoyhunterRipper(new URL( - "https://www.femjoyhunter.com/alisa-i-got-nice-big-breasts-and-fine-ass-so-she-seems-to-be-a-hottest-brunette-5936/")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + FemjoyhunterRipper ripper = new FemjoyhunterRipper(new URI( + "https://www.femjoyhunter.com/alisa-i-got-nice-big-breasts-and-fine-ass-so-she-seems-to-be-a-hottest-brunette-5936/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java index 0392b36ed..c4be94d96 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FivehundredpxRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FivehundredpxRipper; import org.junit.jupiter.api.Disabled; @@ -9,8 +10,8 @@ public class FivehundredpxRipperTest extends RippersTest { @Test @Disabled("Ripper is broken. See https://github.com/RipMeApp/ripme/issues/438") - public void test500pxAlbum() throws IOException { - FivehundredpxRipper ripper = new FivehundredpxRipper(new URL("https://marketplace.500px.com/alexander_hurman")); + public void test500pxAlbum() throws IOException, URISyntaxException { + FivehundredpxRipper ripper = new FivehundredpxRipper(new URI("https://marketplace.500px.com/alexander_hurman").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java index 02268d646..22a507be0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FlickrRipperTest.java @@ -1,19 +1,21 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FlickrRipper; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class FlickrRipperTest extends RippersTest { @Test - @Disabled("https://github.com/RipMeApp/ripme/issues/243") - public void testFlickrAlbum() throws IOException { + @Tag("slow") + public void testFlickrAlbum() throws IOException, URISyntaxException { FlickrRipper ripper = new FlickrRipper( - new URL("https://www.flickr.com/photos/leavingallbehind/sets/72157621895942720/")); + new URI("https://www.flickr.com/photos/leavingallbehind/sets/72157621895942720/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java index 9384aebf4..d4a51a685 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FolioRipperTest.java @@ -1,29 +1,30 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.FolioRipper; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class FolioRipperTest extends RippersTest { - /** - * Test for folio.ink ripper - * @throws IOException - */ - @Test - @Disabled("test or ripper broken") - public void testFolioRip() throws IOException { - FolioRipper ripper = new FolioRipper(new URL("https://folio.ink/DmBe6i")); - testRipper(ripper); - } - - @Test - public void testGetGID() throws IOException { - URL url = new URL("https://folio.ink/DmBe6i"); - FolioRipper ripper = new FolioRipper(url); - Assertions.assertEquals("DmBe6i", ripper.getGID(url)); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import com.rarchives.ripme.ripper.rippers.FolioRipper; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class FolioRipperTest extends RippersTest { + /** + * Test for folio.ink ripper + */ + @Test + @Disabled("test or ripper broken") + public void testFolioRip() throws IOException, URISyntaxException { + FolioRipper ripper = new FolioRipper(new URI("https://folio.ink/DmBe6i").toURL()); + testRipper(ripper); + } + + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://folio.ink/DmBe6i").toURL(); + FolioRipper ripper = new FolioRipper(url); + Assertions.assertEquals("DmBe6i", ripper.getGID(url)); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java index 10131c194..3e873ed64 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FooktubeRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FooktubeRipper; import org.junit.jupiter.api.Disabled; @@ -10,8 +11,8 @@ public class FooktubeRipperTest extends RippersTest { @Test @Disabled("test or ripper broken") - public void testFooktubeVideo() throws IOException { - FooktubeRipper ripper = new FooktubeRipper(new URL("https://fooktube.com/video/641/in-the-cinema")); //pick any video from the front page + public void testFooktubeVideo() throws IOException, URISyntaxException { + FooktubeRipper ripper = new FooktubeRipper(new URI("https://fooktube.com/video/641/in-the-cinema").toURL()); //pick any video from the front page testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java index 8e2e359ae..fd21aff87 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuraffinityRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.FuraffinityRipper; @@ -11,26 +13,27 @@ public class FuraffinityRipperTest extends RippersTest { @Test @Tag("slow") - public void testFuraffinityAlbum() throws IOException { - FuraffinityRipper ripper = new FuraffinityRipper(new URL("https://www.furaffinity.net/gallery/spencerdragon/")); + public void testFuraffinityAlbum() throws IOException, URISyntaxException { + FuraffinityRipper ripper = new FuraffinityRipper(new URI("https://www.furaffinity.net/gallery/spencerdragon/").toURL()); testRipper(ripper); } @Test @Tag("slow") - public void testFuraffinityScrap() throws IOException { - FuraffinityRipper ripper = new FuraffinityRipper(new URL("http://www.furaffinity.net/scraps/sssonic2/")); + public void testFuraffinityScrap() throws IOException, URISyntaxException { + FuraffinityRipper ripper = new FuraffinityRipper(new URI("http://www.furaffinity.net/scraps/sssonic2/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.furaffinity.net/gallery/mustardgas/").toURL(); FuraffinityRipper ripper = new FuraffinityRipper(url); Assertions.assertEquals("mustardgas", ripper.getGID(url)); } @Test - public void testLogin() throws IOException { - URL url = new URL("https://www.furaffinity.net/gallery/mustardgas/"); + @Tag("flaky") + public void testLogin() throws IOException, URISyntaxException { + URL url = new URI("https://www.furaffinity.net/gallery/mustardgas/").toURL(); FuraffinityRipper ripper = new FuraffinityRipper(url); // Check if the first page contain the username of ripmes shared account boolean containsUsername = ripper.getFirstPage().html().contains("ripmethrowaway"); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java index e73f35b47..25656d793 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/FuskatorRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.FuskatorRipper; import org.junit.jupiter.api.Disabled; @@ -10,14 +11,14 @@ public class FuskatorRipperTest extends RippersTest { @Test @Disabled("test or ripper broken") - public void testFuskatorAlbum() throws IOException { - FuskatorRipper ripper = new FuskatorRipper(new URL("https://fuskator.com/thumbs/hqt6pPXAf9z/Shaved-Blonde-Babe-Katerina-Ambre.html")); + public void testFuskatorAlbum() throws IOException, URISyntaxException { + FuskatorRipper ripper = new FuskatorRipper(new URI("https://fuskator.com/thumbs/hqt6pPXAf9z/Shaved-Blonde-Babe-Katerina-Ambre.html").toURL()); testRipper(ripper); } @Test @Disabled("test or ripper broken") - public void testUrlsWithTiled() throws IOException { - FuskatorRipper ripper = new FuskatorRipper(new URL("https://fuskator.com/thumbs/hsrzk~UIFmJ/Blonde-Babe-Destiny-Dixon-Playing-With-Black-Dildo.html")); + public void testUrlsWithTiled() throws IOException, URISyntaxException { + FuskatorRipper ripper = new FuskatorRipper(new URI("https://fuskator.com/thumbs/hsrzk~UIFmJ/Blonde-Babe-Destiny-Dixon-Playing-With-Black-Dildo.html").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java deleted file mode 100644 index 39c146734..000000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatRipperTest.java +++ /dev/null @@ -1,56 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.GfycatRipper; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - - -public class GfycatRipperTest extends RippersTest { - - /** - * Rips correctly formatted URL directly from Gfycat - * @throws IOException - */ - @Test - public void testGfycatGoodURL() throws IOException{ - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/TemptingExcellentIchthyosaurs")); - testRipper(ripper); - } - /** - * Rips badly formatted URL directly from Gfycat - * @throws IOException - */ - public void testGfycatBadURL() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/gifs/detail/limitedtestyamericancrow")); - testRipper(ripper); - } - - /** - * Rips a Gfycat profile - * @throws IOException - */ - public void testGfycatProfile() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/@golbanstorage")); - testRipper(ripper); - } - - /** - * Rips a Gfycat amp link - * @throws IOException - */ - public void testGfycatAmp() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/amp/TemptingExcellentIchthyosaurs")); - testRipper(ripper); - } - - /** - * Rips a Gfycat profile with special characters in username - * @throws IOException - */ - public void testGfycatSpecialChar() throws IOException { - GfycatRipper ripper = new GfycatRipper(new URL("https://gfycat.com/@rsss.kr")); - testRipper(ripper); - } -} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java index 5b8c45581..ed000e1d1 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GfycatporntubeRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.GfycatporntubeRipper; @@ -11,14 +13,14 @@ public class GfycatporntubeRipperTest extends RippersTest { @Test @Tag("flaky") - public void testRip() throws IOException { - GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/")); + public void testRip() throws IOException, URISyntaxException { + GfycatporntubeRipper ripper = new GfycatporntubeRipper(new URI("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://gfycatporntube.com/blowjob-bunny-puts-on-a-show/").toURL(); GfycatporntubeRipper ripper = new GfycatporntubeRipper(url); Assertions.assertEquals("blowjob-bunny-puts-on-a-show", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java index 07fb86161..59ba21848 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/GirlsOfDesireRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.GirlsOfDesireRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class GirlsOfDesireRipperTest extends RippersTest { @Test - public void testGirlsofdesireAlbum() throws IOException { - GirlsOfDesireRipper ripper = new GirlsOfDesireRipper(new URL("http://www.girlsofdesire.org/galleries/krillia/")); + @Tag("flaky") + public void testGirlsofdesireAlbum() throws IOException, URISyntaxException { + GirlsOfDesireRipper ripper = new GirlsOfDesireRipper(new URI("http://www.girlsofdesire.org/galleries/krillia/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java index 35b8ffa69..291ac782b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HbrowseRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HbrowseRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HbrowseRipperTest extends RippersTest { @Test - public void testPahealRipper() throws IOException { - HbrowseRipper ripper = new HbrowseRipper(new URL("https://www.hbrowse.com/21013/c00001")); + @Tag("flaky") + public void testPahealRipper() throws IOException, URISyntaxException { + HbrowseRipper ripper = new HbrowseRipper(new URI("https://www.hbrowse.com/21013/c00001").toURL()); testRipper(ripper); } -} \ No newline at end of file +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java index c6e2d3def..d6cbb9d07 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Hentai2readRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.Hentai2readRipper; import org.junit.jupiter.api.Tag; @@ -10,8 +11,8 @@ public class Hentai2readRipperTest extends RippersTest { @Test @Tag("flaky") - public void testHentai2readAlbum() throws IOException { - Hentai2readRipper ripper = new Hentai2readRipper(new URL("https://hentai2read.com/sm_school_memorial/1/")); + public void testHentai2readAlbum() throws IOException, URISyntaxException { + Hentai2readRipper ripper = new Hentai2readRipper(new URI("https://hentai2read.com/sm_school_memorial/1/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java index 555c26623..e8c39d076 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaicafeRipperTest.java @@ -1,24 +1,28 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaiCafeRipper; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HentaicafeRipperTest extends RippersTest { @Test @Tag("flaky") - public void testHentaiCafeAlbum() throws IOException { - HentaiCafeRipper ripper = new HentaiCafeRipper(new URL("https://hentai.cafe/kikuta-the-oni-in-the-room/")); + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testHentaiCafeAlbum() throws IOException, URISyntaxException { + HentaiCafeRipper ripper = new HentaiCafeRipper(new URI("https://hentai.cafe/kikuta-the-oni-in-the-room/").toURL()); testRipper(ripper); } // This album has a line break (
) in the url. Test it to make sure ripme can handle these invalid urls @Test @Tag("flaky") - public void testAlbumWithInvalidChars() throws IOException { - HentaiCafeRipper ripper = new HentaiCafeRipper(new URL("https://hentai.cafe/chobipero-club/")); + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testAlbumWithInvalidChars() throws IOException, URISyntaxException { + HentaiCafeRipper ripper = new HentaiCafeRipper(new URI("https://hentai.cafe/chobipero-club/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java index b8924078c..0283f9b78 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaidudeRipperTest.java @@ -1,15 +1,17 @@ package com.rarchives.ripme.tst.ripper.rippers; import com.rarchives.ripme.ripper.rippers.HentaidudeRipper; -import com.rarchives.ripme.utils.Utils; +import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class HentaidudeRipperTest extends RippersTest{ - public void testRip() throws IOException { - HentaidudeRipper ripper = new HentaidudeRipper(new URL("https://hentaidude.com/girlfriends-4ever-dlc-2/")); + @Test + public void testRip() throws IOException, URISyntaxException { + HentaidudeRipper ripper = new HentaidudeRipper(new URI("https://hentaidude.com/girlfriends-4ever-dlc-2/").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java index 7623c61d1..2e360a9c5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoundryRipperTest.java @@ -1,27 +1,32 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaifoundryRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class HentaifoundryRipperTest extends RippersTest { @Test - public void testHentaifoundryRip() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/pictures/user/personalami")); + @Tag("flaky") + public void testHentaifoundryRip() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/pictures/user/personalami").toURL()); testRipper(ripper); } @Test - public void testHentaifoundryGetGID() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/stories/user/Rakked")); + @Tag("flaky") + public void testHentaifoundryGetGID() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL()); testRipper(ripper); - Assertions.assertEquals("Rakked", ripper.getGID(new URL("https://www.hentai-foundry.com/stories/user/Rakked"))); + Assertions.assertEquals("Rakked", ripper.getGID(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL())); } @Test - public void testHentaifoundryPdfRip() throws IOException { - HentaifoundryRipper ripper = new HentaifoundryRipper(new URL("https://www.hentai-foundry.com/stories/user/Rakked")); + @Tag("flaky") + public void testHentaifoundryPdfRip() throws IOException, URISyntaxException { + HentaifoundryRipper ripper = new HentaifoundryRipper(new URI("https://www.hentai-foundry.com/stories/user/Rakked").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java index 02515956f..edda81628 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaifoxRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaifoxRipper; public class HentaifoxRipperTest extends RippersTest { - public void testRip() throws IOException { - HentaifoxRipper ripper = new HentaifoxRipper(new URL("https://hentaifox.com/gallery/38544/")); + public void testRip() throws IOException, URISyntaxException { + HentaifoxRipper ripper = new HentaifoxRipper(new URI("https://hentaifox.com/gallery/38544/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java index 3fab101d6..7ccc00297 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentaiimageRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HentaiimageRipper; import com.rarchives.ripme.utils.Utils; @@ -9,9 +10,9 @@ public class HentaiimageRipperTest extends RippersTest { @Test - public void testHentaifoundryRip() throws IOException { + public void testHentaifoundryRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - HentaiimageRipper ripper = new HentaiimageRipper(new URL("https://hentai-image.com/image/afrobull-gerudo-ongoing-12/")); + HentaiimageRipper ripper = new HentaiimageRipper(new URI("https://hentai-image.com/image/afrobull-gerudo-ongoing-12/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java index 00340eba7..3389408f3 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HentainexusRipperTest.java @@ -1,25 +1,30 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; import com.rarchives.ripme.ripper.rippers.HentaiNexusRipper; import org.json.JSONObject; -import org.junit.Assert; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class HentainexusRipperTest extends RippersTest { @Test @Tag("flaky") - public void testHentaiNexusJson() throws IOException { + @Disabled("20/05/2021 This test was disabled as the site has experienced notable downtime") + public void testHentaiNexusJson() throws IOException, URISyntaxException { List testURLs = new ArrayList<>(); - testURLs.add(new URL("https://hentainexus.com/view/9202")); - testURLs.add(new URL("https://hentainexus.com/read/9202")); - testURLs.add(new URL("https://hentainexus.com/view/9202#001")); - testURLs.add(new URL("https://hentainexus.com/read/9202#001")); + testURLs.add(new URI("https://hentainexus.com/view/9202").toURL()); + testURLs.add(new URI("https://hentainexus.com/read/9202").toURL()); + testURLs.add(new URI("https://hentainexus.com/view/9202#001").toURL()); + testURLs.add(new URI("https://hentainexus.com/read/9202#001").toURL()); for (URL url : testURLs) { @@ -39,7 +44,7 @@ public void testHentaiNexusJson() throws IOException { testOK = false; } - Assert.assertEquals(true, testOK); + assertEquals(true, testOK); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java index 646c7f46d..5587f7733 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HitomiRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.HitomiRipper; @@ -12,9 +13,9 @@ public class HitomiRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testRip() throws IOException { - HitomiRipper ripper = new HitomiRipper(new URL("https://hitomi.la/galleries/975973.html")); + public void testRip() throws IOException, URISyntaxException { + HitomiRipper ripper = new HitomiRipper(new URI("https://hitomi.la/galleries/975973.html").toURL()); testRipper(ripper); - Assertions.assertTrue(ripper.getGID(new URL("https://hitomi.la/galleries/975973.html")).equals("975973")); + Assertions.assertTrue(ripper.getGID(new URI("https://hitomi.la/galleries/975973.html").toURL()).equals("975973")); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java index c6aebc835..c978f71c8 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HqpornerRipperTest.java @@ -6,36 +6,38 @@ import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; public class HqpornerRipperTest extends RippersTest { - public void testRip() throws IOException { + public void testRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { HqpornerRipper ripper = new HqpornerRipper( - new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html")); + new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL()); testRipper(ripper); } } - public void testGetGID() throws IOException { - URL poolURL = new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html"); + public void testGetGID() throws IOException, URISyntaxException { + URL poolURL = new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL(); HqpornerRipper ripper = new HqpornerRipper(poolURL); Assertions.assertEquals("84636-pool_lesson_with_a_cheating_husband", ripper.getGID(poolURL)); } @Test - public void testGetURLsFromPage() throws IOException { - URL actressUrl = new URL("https://hqporner.com/actress/kali-roses"); + public void testGetURLsFromPage() throws IOException, URISyntaxException { + URL actressUrl = new URI("https://hqporner.com/actress/kali-roses").toURL(); HqpornerRipper ripper = new HqpornerRipper(actressUrl); assert (ripper.getURLsFromPage(ripper.getFirstPage()).size() >= 2); } @Test - public void testGetNextPage() throws IOException { - URL multiPageUrl = new URL("https://hqporner.com/category/tattooed"); + public void testGetNextPage() throws IOException, URISyntaxException { + URL multiPageUrl = new URI("https://hqporner.com/category/tattooed").toURL(); HqpornerRipper multiPageRipper = new HqpornerRipper(multiPageUrl); assert (multiPageRipper.getNextPage(multiPageRipper.getFirstPage()) != null); - URL singlePageUrl = new URL("https://hqporner.com/actress/amy-reid"); + URL singlePageUrl = new URI("https://hqporner.com/actress/amy-reid").toURL(); HqpornerRipper ripper = new HqpornerRipper(singlePageUrl); try { ripper.getNextPage(ripper.getFirstPage()); @@ -44,26 +46,26 @@ public void testGetNextPage() throws IOException { } } @Test - public void testMyDaddyVideoHost() throws IOException { + public void testMyDaddyVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL myDaddyUrl = new URL("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html"); + URL myDaddyUrl = new URI("https://hqporner.com/hdporn/84636-pool_lesson_with_a_cheating_husband.html").toURL(); HqpornerRipper myDaddyRipper = new HqpornerRipper(myDaddyUrl); testRipper(myDaddyRipper); } } @Test - public void testFlyFlvVideoHost() throws IOException { + public void testFlyFlvVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL flyFlvUrl = new URL( - "https://hqporner.com/hdporn/69862-bangbros_-_amy_reid_taking_off_a_tight_sexy_swimsuit.html"); + URL flyFlvUrl = new URI( + "https://hqporner.com/hdporn/69862-bangbros_-_amy_reid_taking_off_a_tight_sexy_swimsuit.html").toURL(); HqpornerRipper flyFlvRipper = new HqpornerRipper(flyFlvUrl); testRipper(flyFlvRipper); } } @Test - public void testUnknownVideoHost() throws IOException { + public void testUnknownVideoHost() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - URL unknownHostUrl = new URL("https://hqporner.com/hdporn/79528-Kayden_Kross_-_Serious_Masturbation.html"); // howq.cc + URL unknownHostUrl = new URI("https://hqporner.com/hdporn/79528-Kayden_Kross_-_Serious_Masturbation.html").toURL(); // howq.cc HqpornerRipper unknownHostRipper = new HqpornerRipper(unknownHostUrl); testRipper(unknownHostRipper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java index 001b3d632..46ba5828d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/HypnohubRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.HypnohubRipper; @@ -11,21 +13,21 @@ public class HypnohubRipperTest extends RippersTest { @Test @Disabled("wants a hunman") - public void testRip() throws IOException { - URL poolURL = new URL("http://hypnohub.net/pool/show/2303"); - URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-"); + public void testRip() throws IOException, URISyntaxException { + URL poolURL = new URI("http://hypnohub.net/pool/show/2303").toURL(); + URL postURL = new URI("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-").toURL(); HypnohubRipper ripper = new HypnohubRipper(poolURL); testRipper(ripper); ripper = new HypnohubRipper(postURL); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL poolURL = new URL("http://hypnohub.net/pool/show/2303"); + public void testGetGID() throws IOException, URISyntaxException { + URL poolURL = new URI("http://hypnohub.net/pool/show/2303").toURL(); HypnohubRipper ripper = new HypnohubRipper(poolURL); Assertions.assertEquals("2303", ripper.getGID(poolURL)); - URL postURL = new URL("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-"); + URL postURL = new URI("http://hypnohub.net/post/show/63464/black_hair-bracelet-collar-corruption-female_only-").toURL(); Assertions.assertEquals("63464_black_hair-bracelet-collar-corruption-female_only-", ripper.getGID(postURL)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java deleted file mode 100644 index 69b6d8996..000000000 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagearnRipperTest.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.ImagearnRipper; -import org.junit.jupiter.api.Test; - -public class ImagearnRipperTest extends RippersTest { - @Test - public void testImagearnRip() throws IOException { - ImagearnRipper ripper = new ImagearnRipper(new URL("http://imagearn.com//gallery.php?id=578682")); - testRipper(ripper); - } -} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java index 5ecfe3f61..5f1e9786f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagebamRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ImagebamRipper; import org.junit.jupiter.api.Tag; @@ -10,8 +11,8 @@ public class ImagebamRipperTest extends RippersTest { @Test @Tag("flaky") - public void testImagebamRip() throws IOException { - ImagebamRipper ripper = new ImagebamRipper(new URL("http://www.imagebam.com/gallery/488cc796sllyf7o5srds8kpaz1t4m78i")); + public void testImagebamRip() throws IOException, URISyntaxException { + ImagebamRipper ripper = new ImagebamRipper(new URI("http://www.imagebam.com/gallery/488cc796sllyf7o5srds8kpaz1t4m78i").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java index 19061e347..2af7d4999 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagefapRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.HashMap; import java.util.Map; @@ -13,16 +15,12 @@ public class ImagefapRipperTest extends RippersTest { @Test @Tag("flaky") - public void testImagefapAlbums() throws IOException { + public void testImagefapAlbums() throws IOException, URISyntaxException { Map testURLs = new HashMap<>(); // Album with specific title - testURLs.put(new URL("http://www.imagefap.com/pictures/4649440/Frozen-%28Elsa-and-Anna%29?view=2"), - "Frozen (Elsa and Anna)"); - - // New URL format - testURLs.put(new URL("http://www.imagefap.com/gallery.php?pgid=fffd68f659befa5535cf78f014e348f1"), - "imagefap_fffd68f659befa5535cf78f014e348f1"); + testURLs.put(new URI("https://www.imagefap.com/pictures/11365460/Cartoons").toURL(), + "Cartoons"); for (URL url : testURLs.keySet()) { ImagefapRipper ripper = new ImagefapRipper(url); @@ -31,9 +29,9 @@ public void testImagefapAlbums() throws IOException { } @Test @Tag("flaky") - public void testImagefapGetAlbumTitle() throws IOException { - URL url = new URL("https://www.imagefap.com/gallery.php?gid=7789753"); + public void testImagefapGetAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://www.imagefap.com/pictures/11365460/Cartoons").toURL(); ImagefapRipper ripper = new ImagefapRipper(url); - Assertions.assertEquals("imagefap_Red.Heels.Lover.In.Love_7789753", ripper.getAlbumTitle(url)); + Assertions.assertEquals("imagefap_Cartoons_11365460", ripper.getAlbumTitle(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java index 43d211a72..f604d1f74 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImagevenueRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ImagevenueRipper; @@ -12,15 +14,15 @@ public class ImagevenueRipperTest extends RippersTest { @Test @Disabled("See https://github.com/RipMeApp/ripme/issues/1202") - public void testImagevenueRip() throws IOException { + public void testImagevenueRip() throws IOException, URISyntaxException { ImagevenueRipper ripper = new ImagevenueRipper( - new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo")); + new URI("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://img120.imagevenue.com/galshow.php?gal=gallery_1373818527696_191lo").toURL(); ImagevenueRipper ripper = new ImagevenueRipper(url); Assertions.assertEquals("gallery_1373818527696_191lo", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java index 3b6bb782c..98e3dfc59 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgboxRipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ImgboxRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class ImgboxRipperTest extends RippersTest { @Test - public void testImgboxRip() throws IOException { - ImgboxRipper ripper = new ImgboxRipper(new URL("https://imgbox.com/g/FJPF7t26FD")); + @Tag("flaky") + public void testImgboxRip() throws IOException, URISyntaxException { + ImgboxRipper ripper = new ImgboxRipper(new URI("https://imgbox.com/g/FJPF7t26FD").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://imgbox.com/g/FJPF7t26FD"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://imgbox.com/g/FJPF7t26FD").toURL(); ImgboxRipper ripper = new ImgboxRipper(url); Assertions.assertEquals("FJPF7t26FD", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java index 37ef50eb5..86dc334b2 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java @@ -5,25 +5,28 @@ import com.rarchives.ripme.utils.RipUtils; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; public class ImgurRipperTest extends RippersTest { @Test - public void testImgurURLFailures() throws IOException { + public void testImgurURLFailures() throws IOException, URISyntaxException { List failURLs = new ArrayList<>(); // Imgur urls that should not work - failURLs.add(new URL("http://imgur.com")); - failURLs.add(new URL("http://imgur.com/")); - failURLs.add(new URL("http://i.imgur.com")); - failURLs.add(new URL("http://i.imgur.com/")); - failURLs.add(new URL("http://imgur.com/image.jpg")); - failURLs.add(new URL("http://i.imgur.com/image.jpg")); + failURLs.add(new URI("http://imgur.com").toURL()); + failURLs.add(new URI("http://imgur.com/").toURL()); + failURLs.add(new URI("http://i.imgur.com").toURL()); + failURLs.add(new URI("http://i.imgur.com/").toURL()); + failURLs.add(new URI("http://imgur.com/image.jpg").toURL()); + failURLs.add(new URI("http://i.imgur.com/image.jpg").toURL()); + // Imgur seems not to support URLs with lists of images anymore. + failURLs.add(new URI("http://imgur.com/758qD43,C6iVJex,bP7flAu,J3l85Ri,1U7fhu5,MbuAUCM,JF4vOXQ").toURL()); for (URL url : failURLs) { try { new ImgurRipper(url); @@ -35,22 +38,23 @@ public void testImgurURLFailures() throws IOException { } @Test - public void testImgurAlbums() throws IOException { + public void testImgurAlbums() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); // URLs that should return more than 1 image - //contentURLs.add(new URL("http://imgur.com/a/dS9OQ#0")); // Horizontal layout - //contentURLs.add(new URL("http://imgur.com/a/YpsW9#0")); // Grid layout - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/vertical#0")); - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/horizontal#0")); - contentURLs.add(new URL("http://imgur.com/a/WxG6f/layout/grid#0")); - contentURLs.add(new URL("http://imgur.com/gallery/FmP2o")); // Gallery URL - // Imgur seems not to support URLs with lists of images anymore. - // contentURLs.add(new - // URL("http://imgur.com/758qD43,C6iVJex,bP7flAu,J3l85Ri,1U7fhu5,MbuAUCM,JF4vOXQ")); + contentURLs.add(new URI("http://imgur.com/gallery/FmP2o").toURL()); + // URLs with /gallery path + contentURLs.add(new URI("http://imgur.com/gallery/nAl13J6").toURL()); + contentURLs.add(new URI("https://imgur.com/gallery/another-brendan-fraser-reaction-from-bedazzled-intergalactic-quality-nAl13J6").toURL()); + // URLs with /a path + contentURLs.add(new URI("http://imgur.com/a/G058j5F").toURL()); + contentURLs.add(new URI("https://imgur.com/a/thanks-batman-G058j5F").toURL()); + contentURLs.add(new URI("https://imgur.com/a/thanks-batman-G058j5F/layout/grid#0").toURL()); + contentURLs.add(new URI("https://imgur.com/a/G058j5F/layout/grid#0").toURL()); + contentURLs.add(new URI("https://imgur.com/a/G058j5F/layout/horizontal#0").toURL()); // Sometimes hangs up - // contentURLs.add(new URL("http://imgur.com/r/nsfw_oc/top/all")); - // contentURLs.add(new URL("http://imgur.com/a/bXQpH")); // Album with - // titles/descriptions + // contentURLs.add(new URI("http://imgur.com/r/nsfw_oc/top/all").toURL()); + // Album with titles/descriptions + contentURLs.add(new URI("http://imgur.com/a/bXQpH").toURL()); for (URL url : contentURLs) { ImgurRipper ripper = new ImgurRipper(url); testRipper(ripper); @@ -58,11 +62,25 @@ public void testImgurAlbums() throws IOException { } @Test - @Disabled("test or ripper broken") - public void testImgurSingleImage() throws IOException { + public void testImgurUserAccount() throws IOException, URISyntaxException { + List contentURLs = new ArrayList<>(); + // URL with albums + contentURLs.add("https://RockStarBrew.imgur.com"); + // New URL format + contentURLs.add("https://imgur.com/user/RockStarBrew/"); + // And URL with images + contentURLs.add("https://imgur.com/user/counter2strike"); + for (var url : contentURLs) { + ImgurRipper ripper = new ImgurRipper(new URI(url).toURL()); + testRipper(ripper); + } + } + + @Test + public void testImgurSingleImage() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://imgur.com/qbfcLyG")); // Single image URL - contentURLs.add(new URL("https://imgur.com/KexUO")); // Single image URL + contentURLs.add(new URI("http://imgur.com/qbfcLyG").toURL()); // Single image URL + contentURLs.add(new URI("https://imgur.com/KexUO").toURL()); // Single image URL for (URL url : contentURLs) { ImgurRipper ripper = new ImgurRipper(url); testRipper(ripper); @@ -70,23 +88,23 @@ public void testImgurSingleImage() throws IOException { } @Test - public void testImgurAlbumWithMoreThan20Pictures() throws IOException { - ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/HUMsq")); + public void testImgurAlbumWithMoreThan20Pictures() throws IOException, URISyntaxException { + ImgurAlbum album = ImgurRipper.getImgurAlbum(new URI("http://imgur.com/a/HUMsq").toURL()); Assertions.assertTrue(album.images.size() >= 20, "Failed to find 20 files from " + album.url.toExternalForm() + ", only got " + album.images.size()); } @Test - public void testImgurAlbumWithMoreThan100Pictures() throws IOException { - ImgurAlbum album = ImgurRipper.getImgurAlbum(new URL("https://imgur.com/a/HX3JSrD")); + public void testImgurAlbumWithMoreThan100Pictures() throws IOException, URISyntaxException { + ImgurAlbum album = ImgurRipper.getImgurAlbum(new URI("https://imgur.com/a/HX3JSrD").toURL()); Assertions.assertTrue(album.images.size() >= 100, "Failed to find 100 files from " + album.url.toExternalForm() + ", only got " + album.images.size()); } @Test public void testImgurVideoFromGetFilesFromURL() throws Exception { - List urls = RipUtils.getFilesFromURL(new URL("https://i.imgur.com/4TtwxRN.gifv")); - Assertions.assertEquals("https://i.imgur.com/4TtwxRN.mp4", urls.get(0).toExternalForm()); + List urls = RipUtils.getFilesFromURL(new URI("https://i.imgur.com/7qoW0Mo.gifv").toURL()); + Assertions.assertEquals("https://i.imgur.com/7qoW0Mo.mp4", urls.get(0).toExternalForm()); } /* @@ -95,7 +113,7 @@ public void testImgurVideoFromGetFilesFromURL() throws Exception { * "over capacity" warning on the page. // I wonder if our testing automation is * what is putting this album over capacity? // See issue #376. public void * testImgurAlbumWithMoreThan1000Pictures() throws IOException { ImgurAlbum - * album = ImgurRipper.getImgurAlbum(new URL("http://imgur.com/a/vsuh5")); + * album = ImgurRipper.getImgurAlbum(new URI("http://imgur.com/a/vsuh5").toURL()); * assertTrue("Failed to find 1000 files from " + album.url.toExternalForm() + * ", only got " + album.images.size(), album.images.size() >= 1000); } */ diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java index 85b3b248a..5b929fafc 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/InstagramRipperTest.java @@ -3,9 +3,12 @@ import com.rarchives.ripme.ripper.rippers.InstagramRipper; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.HashMap; @@ -14,19 +17,19 @@ public class InstagramRipperTest extends RippersTest { @Test - public void testInstagramGID() throws IOException { + public void testInstagramGID() throws IOException, URISyntaxException { Map testURLs = new HashMap<>(); - testURLs.put(new URL("http://instagram.com/Test_User"), "Test_User"); - testURLs.put(new URL("http://instagram.com/_test_user_"), "_test_user_"); - testURLs.put(new URL("http://instagram.com/_test_user_/?pinned"), "_test_user__pinned"); - testURLs.put(new URL("http://instagram.com/stories/_test_user_/"), "_test_user__stories"); - testURLs.put(new URL("http://instagram.com/_test_user_/tagged"), "_test_user__tagged"); - testURLs.put(new URL("http://instagram.com/_test_user_/channel"), "_test_user__igtv"); - testURLs.put(new URL("http://instagram.com/explore/tags/test_your_tag"), "tag_test_your_tag"); - testURLs.put(new URL("https://www.instagram.com/p/BZ4egP7njW5/?hl=en"), "post_BZ4egP7njW5"); - testURLs.put(new URL("https://www.instagram.com/p/BZ4egP7njW5"), "post_BZ4egP7njW5"); - testURLs.put(new URL("https://www.instagram.com/p/BaNPpaHn2zU/?taken-by=hilaryduff"), "post_BaNPpaHn2zU"); - testURLs.put(new URL("https://www.instagram.com/p/BaNPpaHn2zU/"), "post_BaNPpaHn2zU"); + testURLs.put(new URI("http://instagram.com/Test_User").toURL(), "Test_User"); + testURLs.put(new URI("http://instagram.com/_test_user_").toURL(), "_test_user_"); + testURLs.put(new URI("http://instagram.com/_test_user_/?pinned").toURL(), "_test_user__pinned"); + testURLs.put(new URI("http://instagram.com/stories/_test_user_/").toURL(), "_test_user__stories"); + testURLs.put(new URI("http://instagram.com/_test_user_/tagged").toURL(), "_test_user__tagged"); + testURLs.put(new URI("http://instagram.com/_test_user_/channel").toURL(), "_test_user__igtv"); + testURLs.put(new URI("http://instagram.com/explore/tags/test_your_tag").toURL(), "tag_test_your_tag"); + testURLs.put(new URI("https://www.instagram.com/p/BZ4egP7njW5/?hl=en").toURL(), "post_BZ4egP7njW5"); + testURLs.put(new URI("https://www.instagram.com/p/BZ4egP7njW5").toURL(), "post_BZ4egP7njW5"); + testURLs.put(new URI("https://www.instagram.com/p/BaNPpaHn2zU/?taken-by=hilaryduff").toURL(), "post_BaNPpaHn2zU"); + testURLs.put(new URI("https://www.instagram.com/p/BaNPpaHn2zU/").toURL(), "post_BaNPpaHn2zU"); for (URL url : testURLs.keySet()) { InstagramRipper ripper = new InstagramRipper(url); ripper.setup(); @@ -37,15 +40,23 @@ public void testInstagramGID() throws IOException { @Test @Disabled("Ripper broken for single items") - public void testInstagramAlbums() throws IOException { + public void testInstagramSingle() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - // This unit test is a bit flaky - //contentURLs.add(new URL("https://www.instagram.com/Test_User/")); - contentURLs.add(new URL("https://www.instagram.com/p/BaNPpaHn2zU/?hl=en")); - contentURLs.add(new URL("https://www.instagram.com/p/BaNPpaHn2zU/")); + contentURLs.add(new URI("https://www.instagram.com/p/BaNPpaHn2zU/?hl=en").toURL()); + contentURLs.add(new URI("https://www.instagram.com/p/BaNPpaHn2zU/").toURL()); for (URL url : contentURLs) { InstagramRipper ripper = new InstagramRipper(url); testRipper(ripper); } } + + @Test + @Tag("flaky") + public void testInstagramAlbums() throws IOException, URISyntaxException { + // do not test, in case of rate limit 200/hr since 2021. see + // https://github.com/ripmeapp2/ripme/issues/32 + URL url = new URI("https://www.instagram.com/Test_User/").toURL(); + InstagramRipper ripper = new InstagramRipper(url); + testRipper(ripper); + } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java index af4314c88..3f5c199eb 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/JagodibujaRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.JagodibujaRipper; import org.junit.jupiter.api.Disabled; @@ -10,9 +11,9 @@ public class JagodibujaRipperTest extends RippersTest { @Test @Disabled("fails on github ubuntu automated PR check 2020-07-29") - public void testJagodibujaRipper() throws IOException { + public void testJagodibujaRipper() throws IOException, URISyntaxException { // a photo set - JagodibujaRipper ripper = new JagodibujaRipper(new URL("http://www.jagodibuja.com/comic-in-me/")); + JagodibujaRipper ripper = new JagodibujaRipper(new URI("http://www.jagodibuja.com/comic-in-me/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java index bea92e8b4..ae543c364 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/KingcomixRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.KingcomixRipper; @@ -13,14 +15,14 @@ public class KingcomixRipperTest extends RippersTest { @Test @Disabled("test or ripper broken") - public void testRip() throws IOException { - KingcomixRipper ripper = new KingcomixRipper(new URL("https://kingcomix.com/aunt-cumming-tracy-scops/")); + public void testRip() throws IOException, URISyntaxException { + KingcomixRipper ripper = new KingcomixRipper(new URI("https://kingcomix.com/aunt-cumming-tracy-scops/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://kingcomix.com/aunt-cumming-tracy-scops/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://kingcomix.com/aunt-cumming-tracy-scops/").toURL(); KingcomixRipper ripper = new KingcomixRipper(url); Assertions.assertEquals("aunt-cumming-tracy-scops", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java index aba41af38..4516f2c89 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ListalRipperTest.java @@ -1,29 +1,42 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; -import com.rarchives.ripme.ripper.rippers.ListalRipper; - -public class ListalRipperTest extends RippersTest { - - /** - * Test for list type url. - * @throws IOException - */ - public void testRipListType() throws IOException { - ListalRipper ripper = - new ListalRipper(new URL("https://www.listal.com/list/evolution-emma-stone")); - testRipper(ripper); - } - - /** - * Test for folder type url. - * @throws IOException - */ - public void testRipFolderType() throws IOException { - ListalRipper ripper = - new ListalRipper(new URL("https://www.listal.com/chet-atkins/pictures")); - testRipper(ripper); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.ListalRipper; +import org.junit.jupiter.api.Test; + +public class ListalRipperTest extends RippersTest { + + /** + * Test for list type url. + */ + @Test + public void testPictures() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/emma-stone_iii/pictures").toURL()); + testRipper(ripper); + } + + /** + * Test for list type url. + */ + @Test + public void testRipListType() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/list/evolution-emma-stone").toURL()); + testRipper(ripper); + } + + /** + * Test for folder type url. + */ + @Test + public void testRipFolderType() throws IOException, URISyntaxException { + ListalRipper ripper = + new ListalRipper(new URI("https://www.listal.com/chet-atkins/pictures").toURL()); + testRipper(ripper); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java index 7c797b4b0..bc8594d66 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/LusciousRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.LusciousRipper; @@ -11,33 +13,17 @@ public class LusciousRipperTest extends RippersTest { @Test @Disabled("test or ripper broken") - public void testPahealRipper() throws IOException { + public void testLusciousRipper() throws IOException, URISyntaxException { // a photo set LusciousRipper ripper = new LusciousRipper( - new URL("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/")); + new URI("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://luscious.net/albums/h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609/").toURL(); LusciousRipper ripper = new LusciousRipper(url); Assertions.assertEquals("h-na-alice-wa-suki-desu-ka-do-you-like-alice-when_321609", ripper.getGID(url)); } - - @Test - @Disabled("test or ripper broken") - public void testGetNextPage() throws IOException { - URL multiPageAlbumUrl = new URL("https://luscious.net/albums/women-of-color_58/"); - LusciousRipper multiPageRipper = new LusciousRipper(multiPageAlbumUrl); - assert (multiPageRipper.getNextPage(multiPageRipper.getFirstPage()) != null); - - URL singlePageAlbumUrl = new URL("https://members.luscious.net/albums/bakaneko-navidarks_332097/"); - LusciousRipper singlePageRipper = new LusciousRipper(singlePageAlbumUrl); - try { - singlePageRipper.getNextPage(singlePageRipper.getFirstPage()); - } catch (IOException e) { - Assertions.assertEquals("No next page found.", e.getMessage()); - } - } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java index 62aabf7cd..3bcec8c8f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MangadexRipperTest.java @@ -2,21 +2,26 @@ import com.rarchives.ripme.ripper.rippers.MangadexRipper; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; -public class MangadexRipperTest extends RippersTest{ - public void testRip() throws IOException { - MangadexRipper ripper = new MangadexRipper(new URL("https://mangadex.org/chapter/467904/")); +public class MangadexRipperTest extends RippersTest { + @Test + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MangadexRipper ripper = new MangadexRipper(new URI("https://mangadex.org/chapter/467904/").toURL()); testRipper(ripper); } - public class testMangaRip extends RippersTest{ - public void testRip() throws IOException { - MangadexRipper ripper = new MangadexRipper(new URL("https://mangadex.org/title/44625/this-croc-will-die-in-100-days")); - testRipper(ripper); - } + @Test + @Tag("flaky") + public void test2() throws IOException, URISyntaxException { + MangadexRipper ripper = new MangadexRipper(new URI("https://mangadex.org/title/44625/this-croc-will-die-in-100-days").toURL()); + testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java index 378181214..5095553c6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ManganeloRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ManganeloRipper; @@ -11,14 +13,14 @@ public class ManganeloRipperTest extends RippersTest { @Test @Disabled("no images found, test or ripper broken") - public void testRip() throws IOException { - ManganeloRipper ripper = new ManganeloRipper(new URL("https://manganelo.com/manga/demonic_housekeeper")); + public void testRip() throws IOException, URISyntaxException { + ManganeloRipper ripper = new ManganeloRipper(new URI("https://manganelo.com/manga/demonic_housekeeper").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://manganelo.com/manga/demonic_housekeeper"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://manganelo.com/manga/demonic_housekeeper").toURL(); ManganeloRipper ripper = new ManganeloRipper(url); Assertions.assertEquals("demonic_housekeeper", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java index 4b5643007..9e50e9a51 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MastodonRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MastodonRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - MastodonRipper ripper = new MastodonRipper(new URL("https://mastodon.social/@pythonhub/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MastodonRipper ripper = new MastodonRipper(new URI("https://mastodon.social/@pythonhub/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java index 2f500d6ad..0ad1b3f1f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MastodonXyzRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MastodonXyzRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MastodonXyzRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - MastodonXyzRipper ripper = new MastodonXyzRipper(new URL("https://mastodon.xyz/@artwo/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + MastodonXyzRipper ripper = new MastodonXyzRipper(new URI("https://mastodon.xyz/@artwo/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java index 883b73e36..441fd5c61 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MeituriRipperTest.java @@ -1,25 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MeituriRipper; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MeituriRipperTest extends RippersTest { @Test - @Disabled("Broken ripper") - public void testMeituriRip() throws IOException { - MeituriRipper ripper = new MeituriRipper(new URL("https://www.meituri.com/a/14449/")); + @Tag("flaky") + public void testMeituriRip() throws IOException, URISyntaxException { + MeituriRipper ripper = new MeituriRipper(new URI("https://www.tujigu.com/a/14449/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://www.meituri.com/a/14449/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.tujigu.com/a/14449/").toURL(); MeituriRipper ripper = new MeituriRipper(url); Assertions.assertEquals("14449", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java index 9e81102a3..c8c10ce6a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelmayhemRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ModelmayhemRipper; @@ -13,16 +14,16 @@ public class ModelmayhemRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testModelmayhemRip() throws IOException { + public void testModelmayhemRip() throws IOException, URISyntaxException { ModelmayhemRipper ripper = new ModelmayhemRipper( - new URL("https://www.modelmayhem.com/portfolio/520206/viewall")); + new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { + public void testGetGID() throws IOException, URISyntaxException { ModelmayhemRipper ripper = new ModelmayhemRipper( - new URL("https://www.modelmayhem.com/portfolio/520206/viewall")); - Assertions.assertEquals("520206", ripper.getGID(new URL("https://www.modelmayhem.com/portfolio/520206/viewall"))); + new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL()); + Assertions.assertEquals("520206", ripper.getGID(new URI("https://www.modelmayhem.com/portfolio/520206/viewall").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java index 50fec37b4..38f572a8a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ModelxRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ModelxRipper; @@ -11,11 +13,10 @@ public class ModelxRipperTest extends RippersTest { @Test @Disabled("ModelxRipper domain has been changes. Commenting to avoid build failure.") - public void testModelxAlbum() throws IOException { - ModelxRipper ripper = new ModelxRipper(new URL( - "http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/")); - System.out.println(ripper.getGID(new URL( - "http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/"))); + public void testModelxAlbum() throws IOException, URISyntaxException { + URL url = new URI("http://www.modelx.org/graphis-collection-2002-2016/ai-yuzuki-%e6%9f%9a%e6%9c%88%e3%81%82%e3%81%84-yuzuiro/").toURL(); + ModelxRipper ripper = new ModelxRipper(url); + System.out.println(ripper.getGID(url)); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java index 97f48a5fd..98c65f079 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MotherlessRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.MotherlessRipper; @@ -11,8 +12,8 @@ public class MotherlessRipperTest extends RippersTest { @Test @Tag("flaky") - public void testMotherlessAlbumRip() throws IOException { - MotherlessRipper ripper = new MotherlessRipper(new URL("https://motherless.com/G1168D90")); + public void testMotherlessAlbumRip() throws IOException, URISyntaxException { + MotherlessRipper ripper = new MotherlessRipper(new URI("https://motherless.com/G1168D90").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java index 462561683..52f8b0d85 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaicomicsRipperTest.java @@ -1,39 +1,44 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MyhentaicomicsRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MyhentaicomicsRipperTest extends RippersTest { @Test - public void testMyhentaicomicsAlbum() throws IOException { - MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales")); + @Tag("flaky") + public void testMyhentaicomicsAlbum() throws IOException, URISyntaxException { + MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(new URI("http://myhentaicomics.com/index.php/Nienna-Lost-Tales").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://myhentaicomics.com/index.php/Nienna-Lost-Tales"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://myhentaicomics.com/index.php/Nienna-Lost-Tales").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); // Test a comic Assertions.assertEquals("Nienna-Lost-Tales", ripper.getGID(url)); // Test a search - Assertions.assertEquals("test", ripper.getGID(new URL("http://myhentaicomics.com/index.php/search?q=test"))); + Assertions.assertEquals("test", ripper.getGID(new URI("http://myhentaicomics.com/index.php/search?q=test").toURL())); // Test a tag - Assertions.assertEquals("2409", ripper.getGID(new URL("http://myhentaicomics.com/index.php/tag/2409/"))); + Assertions.assertEquals("2409", ripper.getGID(new URI("http://myhentaicomics.com/index.php/tag/2409/").toURL())); } @Test - public void testGetAlbumsToQueue() throws IOException { - URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/"); + @Tag("flaky") + public void testGetAlbumsToQueue() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaicomics.com/index.php/tag/3167/").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); Assertions.assertEquals(15, ripper.getAlbumsToQueue(ripper.getFirstPage()).size()); } @Test - public void testPageContainsAlbums() throws IOException { - URL url = new URL("https://myhentaicomics.com/index.php/tag/3167/"); - URL url2 = new URL("https://myhentaicomics.com/index.php/search?q=test"); + public void testPageContainsAlbums() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaicomics.com/index.php/tag/3167/").toURL(); + URL url2 = new URI("https://myhentaicomics.com/index.php/search?q=test").toURL(); MyhentaicomicsRipper ripper = new MyhentaicomicsRipper(url); Assertions.assertTrue(ripper.pageContainsAlbums(url)); Assertions.assertTrue(ripper.pageContainsAlbums(url2)); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java index 19f299453..f7e4273ac 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyhentaigalleryRipperTest.java @@ -1,23 +1,27 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.MyhentaigalleryRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class MyhentaigalleryRipperTest extends RippersTest { @Test - public void testMyhentaigalleryAlbum() throws IOException { + @Tag("flaky") + public void testMyhentaigalleryAlbum() throws IOException, URISyntaxException { MyhentaigalleryRipper ripper = new MyhentaigalleryRipper( - new URL("https://myhentaigallery.com/gallery/thumbnails/9201")); + new URI("https://myhentaigallery.com/gallery/thumbnails/9201").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://myhentaigallery.com/gallery/thumbnails/9201"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://myhentaigallery.com/gallery/thumbnails/9201").toURL(); MyhentaigalleryRipper ripper = new MyhentaigalleryRipper(url); Assertions.assertEquals("9201", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java index 16c5de5e7..a6a5a13a5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/MyreadingmangaRipperTest.java @@ -1,13 +1,15 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; + import com.rarchives.ripme.ripper.rippers.MyreadingmangaRipper; public class MyreadingmangaRipperTest extends RippersTest { - public void testRip() throws IOException { - MyreadingmangaRipper ripper = new MyreadingmangaRipper(new URL("https://myreadingmanga.info/zelo-lee-brave-lover-dj-slave-market-jp/")); + public void testRip() throws IOException, URISyntaxException { + MyreadingmangaRipper ripper = new MyreadingmangaRipper(new URI("https://myreadingmanga.info/zelo-lee-brave-lover-dj-slave-market-jp/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java index 15a9d91ad..e3522e712 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NatalieMuRipperTest.java @@ -25,10 +25,10 @@ // public void testNatalieMuURLPasses() throws IOException { // List passURLs = new ArrayList<>(); // // URLs that should work -// passURLs.add(new URL("http://natalie.mu/music/news/140367")); -// passURLs.add(new URL("http://cdn2.natalie.mu/music/news/140411")); -// passURLs.add(new URL("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655")); -// passURLs.add(new URL("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218")); +// passURLs.add(new URI("http://natalie.mu/music/news/140367").toURL()); +// passURLs.add(new URI("http://cdn2.natalie.mu/music/news/140411").toURL()); +// passURLs.add(new URI("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655").toURL()); +// passURLs.add(new URI("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218").toURL()); // for (URL url : passURLs) { // NatalieMuRipper ripper = new NatalieMuRipper(url); // ripper.setup(); @@ -42,19 +42,19 @@ // public void testNatalieMuRipper() throws IOException { // List contentURLs = new ArrayList<>(); // // URLs that should return more than 1 image -// contentURLs.add(new URL("http://natalie.mu/music/news/140367")); -// contentURLs.add(new URL("http://cdn2.natalie.mu/music/news/140411")); -// contentURLs.add(new URL("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655")); -// contentURLs.add(new URL("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218")); +// contentURLs.add(new URI("http://natalie.mu/music/news/140367").toURL()); +// contentURLs.add(new URI("http://cdn2.natalie.mu/music/news/140411").toURL()); +// contentURLs.add(new URI("http://cdn2.natalie.mu/music/gallery/show/news_id/140411/image_id/369655").toURL()); +// contentURLs.add(new URI("http://natalie.mu/music/gallery/show/news_id/139146/image_id/365218").toURL()); // // // Most *chans have volatile threads & can't be trusted for integration testing. // -// //contentURLs.add(new URL("http://boards.4chan.org/r/res/12225949")); -// //contentURLs.add(new URL("http://7chan.org/gif/res/23795.html")); -// //contentURLs.add(new URL("http://unichan2.org/b/res/518004.html")); +// //contentURLs.add(new URI("http://boards.4chan.org/r/res/12225949").toURL()); +// //contentURLs.add(new URI("http://7chan.org/gif/res/23795.html").toURL()); +// //contentURLs.add(new URI("http://unichan2.org/b/res/518004.html").toURL()); // // // xchan has an HTTPS certificaiton error... -// //contentURLs.add(new URL("http://xchan.pw/porn/res/437.html")); +// //contentURLs.add(new URI("http://xchan.pw/porn/res/437.html").toURL()); // for (URL url : contentURLs) { // NatalieMuRipper ripper = new NatalieMuRipper(url); // testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java index 4421b267c..fcfac96d3 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NewgroundsRipperTest.java @@ -2,21 +2,25 @@ import com.rarchives.ripme.ripper.rippers.NewgroundsRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; public class NewgroundsRipperTest extends RippersTest { @Test - public void testNewgroundsRip() throws IOException { - NewgroundsRipper ripper = new NewgroundsRipper(new URL("https://zone-sama.newgrounds.com/art")); + @Tag("flaky") + public void testNewgroundsRip() throws IOException, URISyntaxException { + NewgroundsRipper ripper = new NewgroundsRipper(new URI("https://zone-sama.newgrounds.com/art").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://zone-sama.newgrounds.com/art"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://zone-sama.newgrounds.com/art").toURL(); NewgroundsRipper ripper = new NewgroundsRipper(url); Assertions.assertEquals("zone-sama", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java index 00bba3b70..0477a13dc 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NfsfwRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.NfsfwRipper; @@ -13,21 +15,21 @@ public class NfsfwRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/291 -- nfsfw 'account suspended' error; disabled flaky test in CI") - public void testNfsfwRip() throws IOException { - NfsfwRipper ripper = new NfsfwRipper(new URL("http://nfsfw.com/gallery/v/Kitten/")); + public void testNfsfwRip() throws IOException, URISyntaxException { + NfsfwRipper ripper = new NfsfwRipper(new URI("http://nfsfw.com/gallery/v/Kitten/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://nfsfw.com/gallery/v/Kitten/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://nfsfw.com/gallery/v/Kitten/").toURL(); NfsfwRipper ripper = new NfsfwRipper(url); Assertions.assertEquals("Kitten", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten"); + url = new URI("http://nfsfw.com/gallery/v/Kitten").toURL(); Assertions.assertEquals("Kitten", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten/gif_001/"); + url = new URI("http://nfsfw.com/gallery/v/Kitten/gif_001/").toURL(); Assertions.assertEquals("Kitten__gif_001", ripper.getGID(url)); - url = new URL("http://nfsfw.com/gallery/v/Kitten/gif_001/"); + url = new URI("http://nfsfw.com/gallery/v/Kitten/gif_001/").toURL(); Assertions.assertEquals("Kitten__gif_001", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java index a18727038..1857e8658 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NhentaiRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.List; @@ -11,21 +13,21 @@ import org.junit.jupiter.api.Test; public class NhentaiRipperTest extends RippersTest { - public void testRip() throws IOException { - NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/")); + public void testRip() throws IOException, URISyntaxException { + NhentaiRipper ripper = new NhentaiRipper(new URI("https://nhentai.net/g/233295/").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - NhentaiRipper ripper = new NhentaiRipper(new URL("https://nhentai.net/g/233295/")); - Assertions.assertEquals("233295", ripper.getGID(new URL("https://nhentai.net/g/233295/"))); + public void testGetGID() throws IOException, URISyntaxException { + NhentaiRipper ripper = new NhentaiRipper(new URI("https://nhentai.net/g/233295/").toURL()); + Assertions.assertEquals("233295", ripper.getGID(new URI("https://nhentai.net/g/233295/").toURL())); } // Test the tag black listing @Test @Tag("flaky") - public void testTagBlackList() throws IOException { - URL url = new URL("https://nhentai.net/g/233295/"); + public void testTagBlackList() throws IOException, URISyntaxException { + URL url = new URI("https://nhentai.net/g/233295/").toURL(); NhentaiRipper ripper = new NhentaiRipper(url); List tagsOnPage = ripper.getTags(ripper.getFirstPage()); // Test multiple blacklisted tags diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java new file mode 100644 index 000000000..06e6d5c6a --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NsfwXxxRipperTest.java @@ -0,0 +1,16 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.NsfwXxxRipper; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class NsfwXxxRipperTest extends RippersTest { + @Test + public void testNsfwXxxUser() throws IOException, URISyntaxException { + NsfwXxxRipper ripper = new NsfwXxxRipper(new URI("https://nsfw.xxx/user/smay3991").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java index 38e697c2e..fb348d94b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/NudeGalsRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.NudeGalsRipper; import org.junit.jupiter.api.Assertions; @@ -9,14 +10,14 @@ public class NudeGalsRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")); + public void testRip() throws IOException, URISyntaxException { + NudeGalsRipper ripper = new NudeGalsRipper(new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - NudeGalsRipper ripper = new NudeGalsRipper(new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541")); - Assertions.assertEquals("5541", ripper.getGID( new URL("https://nude-gals.com/photoshoot.php?photoshoot_id=5541"))); + public void testGetGID() throws IOException, URISyntaxException { + NudeGalsRipper ripper = new NudeGalsRipper(new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL()); + Assertions.assertEquals("5541", ripper.getGID( new URI("https://nude-gals.com/photoshoot.php?photoshoot_id=5541").toURL())); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java index 3e716f45d..df5eb3dd0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/OglafRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.OglafRipper; @@ -9,8 +10,8 @@ public class OglafRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - OglafRipper ripper = new OglafRipper(new URL("http://oglaf.com/plumes/")); + public void testRip() throws IOException, URISyntaxException { + OglafRipper ripper = new OglafRipper(new URI("http://oglaf.com/plumes/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java index 844030055..d78ad5eff 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PahealRipperTest.java @@ -1,16 +1,17 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PahealRipper; import org.junit.jupiter.api.Test; public class PahealRipperTest extends RippersTest { @Test - public void testPahealRipper() throws IOException { + public void testPahealRipper() throws IOException, URISyntaxException { // a photo set - PahealRipper ripper = new PahealRipper(new URL("http://rule34.paheal.net/post/list/bimbo/1")); + PahealRipper ripper = new PahealRipper(new URI("http://rule34.paheal.net/post/list/bimbo/1").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java index 730a965c2..664f3fecc 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PawooRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PawooRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class PawooRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - PawooRipper ripper = new PawooRipper(new URL("https://pawoo.net/@halki/media")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + PawooRipper ripper = new PawooRipper(new URI("https://pawoo.net/@halki/media").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java index fb133d321..8581d038d 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PhotobucketRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PhotobucketRipper; @@ -13,9 +15,9 @@ public class PhotobucketRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/229 : Disabled test (temporary) : BasicRippersTest#testPhotobucketRip (timing out)") - public void testPhotobucketRip() throws IOException { + public void testPhotobucketRip() throws IOException, URISyntaxException { PhotobucketRipper ripper = new PhotobucketRipper( - new URL("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers?sort=3&page=1")); + new URI("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers?sort=3&page=1").toURL()); testRipper(ripper); deleteSubdirs(ripper.getWorkingDir()); deleteDir(ripper.getWorkingDir()); @@ -23,12 +25,12 @@ public void testPhotobucketRip() throws IOException { @Test @Disabled("new test, still disabled out because of the issue above, since this test also involves network IO.") - public void testGetNextPage() throws IOException { + public void testGetNextPage() throws IOException, URISyntaxException { // this album should have more than enough sub-albums and pages // to serve as a pretty good iteration test (barring server or // network errors) String baseURL = "http://s1255.photobucket.com/user/mimajki/library/Movie%20gifs?sort=6&page=1"; - URL url = new URL(baseURL); + URL url = new URI(baseURL).toURL(); PhotobucketRipper ripper = new PhotobucketRipper(url); org.jsoup.nodes.Document page = ripper.getFirstPage(); // NOTE: number of pages remaining includes the subalbums @@ -47,17 +49,17 @@ public void testGetNextPage() throws IOException { } @Test - public void testGetGID() throws IOException { - URL url = new URL( - "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples?sort=3&page=1"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI( + "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples?sort=3&page=1").toURL(); PhotobucketRipper ripper = new PhotobucketRipper(url); Assertions.assertEquals("doublesix66", ripper.getGID(url)); - url = new URL( - "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples/Painting%20examples?page=1&sort=3"); + url = new URI( + "http://s732.photobucket.com/user/doublesix66/library/Army%20Painter%20examples/Painting%20examples?page=1&sort=3").toURL(); Assertions.assertEquals("doublesix66", ripper.getGID(url)); - url = new URL("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers"); + url = new URI("http://s844.photobucket.com/user/SpazzySpizzy/library/Album%20Covers").toURL(); Assertions.assertEquals("SpazzySpizzy", ripper.getGID(url)); - url = new URL("http://s844.photobucket.com/user/SpazzySpizzy/library"); + url = new URI("http://s844.photobucket.com/user/SpazzySpizzy/library").toURL(); Assertions.assertEquals("SpazzySpizzy", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java index 9ba9110bd..fc79cb97b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PichunterRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PichunterRipper; @@ -11,18 +12,18 @@ public class PichunterRipperTest extends RippersTest { @Test @Tag("flaky") - public void testPichunterModelPageRip() throws IOException { + public void testPichunterModelPageRip() throws IOException, URISyntaxException { // A non-photoset - PichunterRipper ripper = new PichunterRipper(new URL("https://www.pichunter.com/models/Madison_Ivy")); + PichunterRipper ripper = new PichunterRipper(new URI("https://www.pichunter.com/models/Madison_Ivy").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void testPichunterGalleryRip() throws IOException { + public void testPichunterGalleryRip() throws IOException, URISyntaxException { // a photo set PichunterRipper ripper = new PichunterRipper( - new URL("http://www.pichunter.com/gallery/3270642/Its_not_only_those_who")); + new URI("http://www.pichunter.com/gallery/3270642/Its_not_only_those_who").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java index ec4e2383d..04da17a82 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PicstatioRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PicstatioRipper; @@ -9,13 +11,14 @@ public class PicstatioRipperTest extends RippersTest { - public void testRip() throws IOException { - PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers")); + public void testRip() throws IOException, URISyntaxException { + PicstatioRipper ripper = new PicstatioRipper(new URI("https://www.picstatio.com/aerial-view-wallpapers").toURL()); testRipper(ripper); } @Test - public void testGID() throws IOException { - PicstatioRipper ripper = new PicstatioRipper(new URL("https://www.picstatio.com/aerial-view-wallpapers")); - Assertions.assertEquals("aerial-view-wallpapers", ripper.getGID(new URL("https://www.picstatio.com/aerial-view-wallpapers"))); + public void testGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.picstatio.com/aerial-view-wallpapers").toURL(); + PicstatioRipper ripper = new PicstatioRipper(url); + Assertions.assertEquals("aerial-view-wallpapers", ripper.getGID(url)); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java index 7abe6e1e6..ad9d9b83e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PorncomixRipper; public class PorncomixRipperTest extends RippersTest { - public void testPorncomixAlbum() throws IOException { - PorncomixRipper ripper = new PorncomixRipper(new URL("http://www.porncomix.info/lust-unleashed-desire-to-submit/")); + public void testPorncomixAlbum() throws IOException, URISyntaxException { + PorncomixRipper ripper = new PorncomixRipper(new URI("http://www.porncomix.info/lust-unleashed-desire-to-submit/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java index 6a8390367..e8628955b 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PorncomixinfoRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PorncomixinfoRipper; -import org.junit.Test; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; public class PorncomixinfoRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - PorncomixinfoRipper ripper = new PorncomixinfoRipper(new URL("https://porncomixinfo.net/chapter/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + PorncomixinfoRipper ripper = new PorncomixinfoRipper(new URI("https://porncomixinfo.net/chapter/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/alx-come-to-naught-down-in-flames-up-in-smoke-tracy-scops/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java index 22de8349f..1bc6520f5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornhubRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.PornhubRipper; @@ -13,26 +15,26 @@ public class PornhubRipperTest extends RippersTest { @Test - public void testPornhubRip() throws IOException { + public void testPornhubRip() throws IOException, URISyntaxException { if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - PornhubRipper ripper = new PornhubRipper(new URL("https://www.pornhub.com/album/15680522")); + PornhubRipper ripper = new PornhubRipper(new URI("https://www.pornhub.com/album/15680522").toURL()); testRipper(ripper); } } - public void testGetGID() throws IOException { - URL url = new URL("https://www.pornhub.com/album/15680522?page=2"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.pornhub.com/album/15680522?page=2").toURL(); PornhubRipper ripper = new PornhubRipper(url); Assertions.assertEquals("15680522", ripper.getGID(url)); - url = new URL("https://www.pornhub.com/album/15680522"); + url = new URI("https://www.pornhub.com/album/15680522").toURL(); Assertions.assertEquals("15680522", ripper.getGID(url)); } @Test @Tag("flaky") - public void testGetNextPage() throws IOException { + public void testGetNextPage() throws IOException, URISyntaxException { String baseURL = "https://www.pornhub.com/album/30687901"; - PornhubRipper ripper = new PornhubRipper(new URL(baseURL)); + PornhubRipper ripper = new PornhubRipper(new URI(baseURL).toURL()); Document page = Http.url(baseURL).get(); int numPagesRemaining = 1; for (int idx = 0; idx < numPagesRemaining; idx++){ diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java index 1f79b2548..4fda9aee5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/PornpicsRipperTest.java @@ -1,13 +1,14 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.PornpicsRipper; public class PornpicsRipperTest extends RippersTest { - public void testRip() throws IOException { - PornpicsRipper ripper = new PornpicsRipper(new URL("https://www.pornpics.com/galleries/pornstar-dahlia-sky-takes-a-fat-cock-in-her-butthole-wearing-fishnet-stockings/")); + public void testRip() throws IOException, URISyntaxException { + PornpicsRipper ripper = new PornpicsRipper(new URI("https://www.pornpics.com/galleries/pornstar-dahlia-sky-takes-a-fat-cock-in-her-butthole-wearing-fishnet-stockings/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java index f4dbe3278..db0fa5302 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedditRipperTest.java @@ -2,7 +2,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.RedditRipper; @@ -14,23 +15,23 @@ public class RedditRipperTest extends RippersTest { @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/253 - public void testRedditSubredditRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL("http://www.reddit.com/r/nsfw_oc")); + public void testRedditSubredditRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("http://www.reddit.com/r/nsfw_oc").toURL()); testRipper(ripper); } @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/253 - public void testRedditSubredditTopRip() throws IOException { - RedditRipper ripper = new RedditRipper(new URL("http://www.reddit.com/r/nsfw_oc/top?t=all")); + public void testRedditSubredditTopRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("http://www.reddit.com/r/nsfw_oc/top?t=all").toURL()); testRipper(ripper); } @Test @Disabled - public void testRedditPostRip() throws IOException { + public void testRedditPostRip() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("http://www.reddit.com/r/UnrealGirls/comments/1ziuhl/in_class_veronique_popa/")); + new URI("http://www.reddit.com/r/UnrealGirls/comments/1ziuhl/in_class_veronique_popa/").toURL()); testRipper(ripper); } @@ -41,9 +42,25 @@ public void testRedditPostRip() throws IOException { */ @Test @Tag("flaky") - public void testRedditGfyGoodURL() throws IOException { + public void testRedditGfyGoodURL() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("https://www.reddit.com/r/bottesting/comments/7msozf/good_link/")); + new URI("https://www.reddit.com/r/bottesting/comments/7msozf/good_link/").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testSelfPostRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper( + new URI("https://www.reddit.com/r/gonewildstories/comments/oz7d97/f_18_finally_having_a_normal_sex_life/").toURL() + ); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testSelfPostAuthorRip() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/user/ickybabie_").toURL()); testRipper(ripper); } @@ -54,17 +71,28 @@ public void testRedditGfyGoodURL() throws IOException { */ @Test @Tag("flaky") - public void testRedditGfyBadURL() throws IOException { + public void testRedditGfyBadURL() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("https://www.reddit.com/r/bottesting/comments/7msmhi/bad_link/")); + new URI("https://www.reddit.com/r/bottesting/comments/7msmhi/bad_link/").toURL()); testRipper(ripper); } + /** + * GFYCAT TEST Tests a gfycat URL with the gifdeliverynetwork/redgifs hosted video + * + * @throws IOException + */ + @Test + public void testRedditGfycatRedirectURL() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper( + new URI("https://www.reddit.com/r/NSFW_GIF/comments/ennwsa/gorgeous_tits/").toURL()); + } + @Test @Tag("flaky") - public void testRedditGallery() throws IOException{ + public void testRedditGallery() throws IOException, URISyntaxException { RedditRipper ripper = new RedditRipper( - new URL("https://www.reddit.com/gallery/hrrh23")); + new URI("https://www.reddit.com/gallery/hrrh23").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java index ed71128d1..3ef0759c6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RedgifsRipperTest.java @@ -1,60 +1,66 @@ package com.rarchives.ripme.tst.ripper.rippers; +import com.rarchives.ripme.ripper.rippers.RedditRipper; import com.rarchives.ripme.ripper.rippers.RedgifsRipper; -import org.jsoup.nodes.Document; import org.junit.jupiter.api.*; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class RedgifsRipperTest extends RippersTest { /** * Rips correctly formatted URL directly from Redgifs - * @throws IOException */ @Test - @Disabled("test or ripper broken") - public void testRedgifsGoodURL() throws IOException{ - RedgifsRipper ripper = new RedgifsRipper(new URL("https://www.redgifs.com/watch/talkativewarpeddragon-petite")); + public void testRedgifsGoodURL() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/watch/ashamedselfishcoypu").toURL()); testRipper(ripper); } /** * Rips gifdeliverynetwork URL's by redirecting them to proper redgifs url - * @throws IOException */ @Test - @Tag("flaky") - public void testRedgifsBadRL() throws IOException{ - RedgifsRipper ripper = new RedgifsRipper(new URL("https://www.gifdeliverynetwork.com/foolishelasticchimpanzee")); + public void testRedgifsBadRL() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.gifdeliverynetwork.com/consideratetrustworthypigeon").toURL()); + testRipper(ripper); + } + + /** + * Rips a Redgifs profile + */ + @Test + public void testRedgifsProfile() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/users/ra-kunv2").toURL()); testRipper(ripper); } /** - * Rips a Redifs profile + * Rips a Redgif search * @throws IOException */ @Test - @Tag("flaky") - public void testRedgifsProfile() throws IOException { - RedgifsRipper ripper = new RedgifsRipper(new URL("https://redgifs.com/users/margo_monty")); + public void testRedgifsSearch() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/search?query=take+a+shot+every+time").toURL()); testRipper(ripper); } /** - * Rips a Redifs category/search + * Rips Redgif tags * @throws IOException */ @Test - @Disabled("test or ripper broken") - public void testRedgifsSearch() throws IOException { - RedgifsRipper ripper = new RedgifsRipper(new URL("https://redgifs.com/gifs/browse/little-caprice")); - Document doc = ripper.getFirstPage(); - - doc = ripper.getNextPage(doc); - Assertions.assertTrue("https://napi.redgifs.com/v1/gfycats/search?search_text=little%20caprice&count=150&start=150".equalsIgnoreCase(doc.location())); - doc = ripper.getNextPage(doc); - Assertions.assertTrue("https://napi.redgifs.com/v1/gfycats/search?search_text=little%20caprice&count=150&start=300".equalsIgnoreCase(doc.location())); + public void testRedgifsTags() throws IOException, URISyntaxException { + RedgifsRipper ripper = new RedgifsRipper(new URI("https://www.redgifs.com/gifs/animation,sfw,funny?order=best&tab=gifs").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testRedditRedgifs() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/r/nsfwhardcore/comments/ouz5bw/me_cumming_on_his_face/").toURL()); + testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java index c09b80187..24fa8ea7f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RippersTest.java @@ -6,9 +6,12 @@ import com.rarchives.ripme.ripper.rippers.ChanRipper; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; import org.junit.jupiter.api.Assertions; import com.rarchives.ripme.ripper.AbstractRipper; @@ -19,12 +22,16 @@ */ public class RippersTest { - private final Logger logger = Logger.getLogger(RippersTest.class); + private final Logger logger = LogManager.getLogger(RippersTest.class); void testRipper(AbstractRipper ripper) { try { // Turn on Debug logging - ((ConsoleAppender) Logger.getRootLogger().getAppender("stdout")).setThreshold(Level.DEBUG); + LoggerContext ctx = (LoggerContext) LogManager.getContext(false); + Configuration config = ctx.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME); + loggerConfig.setLevel(Level.DEBUG); + ctx.updateLoggers(); // This causes all Loggers to refetch information from their LoggerConfig. // Decrease timeout Utils.setConfigInteger("page.timeout", 20 * 1000); @@ -32,6 +39,13 @@ void testRipper(AbstractRipper ripper) { ripper.setup(); ripper.markAsTest(); ripper.rip(); + if (logger.isTraceEnabled()) { + logger.trace("working dir: " + ripper.getWorkingDir()); + logger.trace("list files: " + ripper.getWorkingDir().listFiles().length); + for (int i = 0; i < ripper.getWorkingDir().listFiles().length; i++) { + logger.trace(" " + ripper.getWorkingDir().listFiles()[i]); + } + } Assertions.assertTrue(ripper.getWorkingDir().listFiles().length >= 1, "Failed to download a single file from " + ripper.getURL()); } catch (IOException e) { @@ -56,9 +70,6 @@ void testRipper(AbstractRipper ripper) { // that we found links to it void testChanRipper(ChanRipper ripper) { try { - // Turn on Debug logging - ((ConsoleAppender) Logger.getRootLogger().getAppender("stdout")).setThreshold(Level.DEBUG); - // Decrease timeout Utils.setConfigInteger("page.timeout", 20 * 1000); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java index 89bd0fac8..662a7eb70 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/Rule34RipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.Rule34Ripper; @@ -9,14 +11,14 @@ public class Rule34RipperTest extends RippersTest { @Test - public void testShesFreakyRip() throws IOException { - Rule34Ripper ripper = new Rule34Ripper(new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo")); + public void testShesFreakyRip() throws IOException, URISyntaxException { + Rule34Ripper ripper = new Rule34Ripper(new URI("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://rule34.xxx/index.php?page=post&s=list&tags=bimbo").toURL(); Rule34Ripper ripper = new Rule34Ripper(url); Assertions.assertEquals("bimbo", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java index 03bbaa53f..73f79a563 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/RulePornRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.RulePornRipper; @@ -9,14 +11,14 @@ public class RulePornRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - RulePornRipper ripper = new RulePornRipper(new URL("https://ruleporn.com/tosh/")); + public void testRip() throws IOException, URISyntaxException { + RulePornRipper ripper = new RulePornRipper(new URI("https://ruleporn.com/tosh/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://ruleporn.com/tosh/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://ruleporn.com/tosh/").toURL(); RulePornRipper ripper = new RulePornRipper(url); Assertions.assertEquals("tosh", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java index 88f59e2e4..4efe9ba2e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SankakuComplexRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SankakuComplexRipper; @@ -12,30 +14,30 @@ public class SankakuComplexRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/257") - public void testSankakuChanRip() throws IOException { + public void testSankakuChanRip() throws IOException, URISyntaxException { SankakuComplexRipper ripper = new SankakuComplexRipper( - new URL("https://chan.sankakucomplex.com/?tags=cleavage")); + new URI("https://chan.sankakucomplex.com/?tags=cleavage").toURL()); testRipper(ripper); } @Test @Disabled("https://github.com/RipMeApp/ripme/issues/257") - public void testSankakuIdolRip() throws IOException { + public void testSankakuIdolRip() throws IOException, URISyntaxException { SankakuComplexRipper ripper = new SankakuComplexRipper( - new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29")); + new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL()); testRipper(ripper); } @Test - public void testgetGID() throws IOException { - URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29"); + public void testgetGID() throws IOException, URISyntaxException { + URL url = new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL(); SankakuComplexRipper ripper = new SankakuComplexRipper(url); Assertions.assertEquals("idol._meme_(me!me!me!)_(cosplay)", ripper.getGID(url)); } @Test - public void testgetSubDomain() throws IOException { - URL url = new URL("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29"); + public void testgetSubDomain() throws IOException, URISyntaxException { + URL url = new URI("https://idol.sankakucomplex.com/?tags=meme_%28me%21me%21me%21%29_%28cosplay%29").toURL(); SankakuComplexRipper ripper = new SankakuComplexRipper(url); Assertions.assertEquals("idol.", ripper.getSubDomain(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java index c7bf3d7d8..44bf06cfc 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ScrolllerRipperTest.java @@ -1,53 +1,55 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.ScrolllerRipper; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; -import java.util.HashMap; -import java.util.Map; - -public class ScrolllerRipperTest extends RippersTest { - @Test - public void testScrolllerGID() throws IOException { - Map testURLs = new HashMap<>(); - - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp"), "CatsStandingUp"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=pictures"), "CatsStandingUp"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures"), "CatsStandingUp"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top"), "CatsStandingUp"); - for (URL url : testURLs.keySet()) { - ScrolllerRipper ripper = new ScrolllerRipper(url); - ripper.setup(); - Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); - deleteDir(ripper.getWorkingDir()); - } - } - - @Test - public void testScrolllerFilterRegex() throws IOException { - Map testURLs = new HashMap<>(); - - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp"), "NOFILTER"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=pictures"), "PICTURE"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=videos"), "VIDEO"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=albums"), "ALBUM"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures"), "PICTURE"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?sort=top&filter=videos"), "VIDEO"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?sort=top&filter=albums"), "ALBUM"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top"), "PICTURE"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=videos&sort=top"), "VIDEO"); - testURLs.put(new URL("https://scrolller.com/r/CatsStandingUp?filter=albums&sort=top"), "ALBUM"); - for (URL url : testURLs.keySet()) { - ScrolllerRipper ripper = new ScrolllerRipper(url); - ripper.setup(); - Assertions.assertEquals(testURLs.get(url), ripper.convertFilterString(ripper.getParameter(ripper.getURL(),"filter"))); - deleteDir(ripper.getWorkingDir()); - } - } - - - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.ScrolllerRipper; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.HashMap; +import java.util.Map; + +public class ScrolllerRipperTest extends RippersTest { + @Test + public void testScrolllerGID() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "CatsStandingUp"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "CatsStandingUp"); + for (URL url : testURLs.keySet()) { + ScrolllerRipper ripper = new ScrolllerRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.getGID(ripper.getURL())); + deleteDir(ripper.getWorkingDir()); + } + } + + @Test + public void testScrolllerFilterRegex() throws IOException, URISyntaxException { + Map testURLs = new HashMap<>(); + + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp").toURL(), "NOFILTER"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums").toURL(), "ALBUM"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=pictures").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=videos").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?sort=top&filter=albums").toURL(), "ALBUM"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=pictures&sort=top").toURL(), "PICTURE"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=videos&sort=top").toURL(), "VIDEO"); + testURLs.put(new URI("https://scrolller.com/r/CatsStandingUp?filter=albums&sort=top").toURL(), "ALBUM"); + for (URL url : testURLs.keySet()) { + ScrolllerRipper ripper = new ScrolllerRipper(url); + ripper.setup(); + Assertions.assertEquals(testURLs.get(url), ripper.convertFilterString(ripper.getParameter(ripper.getURL(),"filter"))); + deleteDir(ripper.getWorkingDir()); + } + } + + + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java index eb3769a12..f389974bc 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ShesFreakyRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ShesFreakyRipper; @@ -12,15 +14,15 @@ public class ShesFreakyRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/254") - public void testShesFreakyRip() throws IOException { + public void testShesFreakyRip() throws IOException, URISyntaxException { ShesFreakyRipper ripper = new ShesFreakyRipper( - new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html")); + new URI("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.shesfreaky.com/gallery/nicee-snow-bunny-579NbPjUcYa.html").toURL(); ShesFreakyRipper ripper = new ShesFreakyRipper(url); Assertions.assertEquals("nicee-snow-bunny-579NbPjUcYa", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java index 2dd311f35..b7a8da536 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SinfestRipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SinfestRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class SinfestRipperTest extends RippersTest { @Test - public void testRip() throws IOException { - SinfestRipper ripper = new SinfestRipper(new URL("http://sinfest.net/view.php?date=2000-01-17")); + @Tag("flaky") + public void testRip() throws IOException, URISyntaxException { + SinfestRipper ripper = new SinfestRipper(new URI("http://sinfest.net/view.php?date=2000-01-17").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://sinfest.net/view.php?date=2000-01-17"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://sinfest.net/view.php?date=2000-01-17").toURL(); SinfestRipper ripper = new SinfestRipper(url); Assertions.assertEquals("2000-01-17", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java index 4085bb563..99c3f1aaf 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SmuttyRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.SmuttyRipper; @@ -11,14 +13,14 @@ public class SmuttyRipperTest extends RippersTest { @Test @Tag("flaky") - public void testRip() throws IOException { - SmuttyRipper ripper = new SmuttyRipper(new URL("https://smutty.com/user/QUIGON/")); + public void testRip() throws IOException, URISyntaxException { + SmuttyRipper ripper = new SmuttyRipper(new URI("https://smutty.com/user/QUIGON/").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://smutty.com/user/QUIGON/"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://smutty.com/user/QUIGON/").toURL(); SmuttyRipper ripper = new SmuttyRipper(url); Assertions.assertEquals("QUIGON", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java new file mode 100644 index 000000000..847540a33 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SoundgasmRipperTest.java @@ -0,0 +1,28 @@ +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.RedditRipper; +import com.rarchives.ripme.ripper.rippers.SoundgasmRipper; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class SoundgasmRipperTest extends RippersTest { + + @Test + @Tag("flaky") + public void testSoundgasmURLs() throws IOException, URISyntaxException { + SoundgasmRipper ripper = new SoundgasmRipper(new URI("https://soundgasm.net/u/HTMLExamples/Making-Text-into-a-Soundgasm-Audio-Link").toURL()); + testRipper(ripper); + } + + @Test + @Tag("flaky") + public void testRedditSoundgasmURL() throws IOException, URISyntaxException { + RedditRipper ripper = new RedditRipper(new URI("https://www.reddit.com/user/Mistress_Minerva/").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java index c73a244e4..684d46893 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/SpankBangRipperTest.java @@ -1,15 +1,18 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.SpankbangRipper; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class SpankBangRipperTest extends RippersTest { @Test - public void testSpankBangVideo() throws IOException { - SpankbangRipper ripper = new SpankbangRipper(new URL("https://spankbang.com/2a7fh/video/mdb901")); //most popular video of all time on site; should stay up + @Tag("flaky") + public void testSpankBangVideo() throws IOException, URISyntaxException { + SpankbangRipper ripper = new SpankbangRipper(new URI("https://spankbang.com/2a7fh/video/mdb901").toURL()); //most popular video of all time on site; should stay up testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java index 0ba05343e..83da1175f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StaRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.StaRipper; @@ -12,15 +14,15 @@ public class StaRipperTest extends RippersTest { @Test @Disabled("Ripper broken, Nullpointer exception") - public void testRip() throws IOException { - StaRipper ripper = new StaRipper(new URL("https://sta.sh/01umpyuxi4js")); + public void testRip() throws IOException, URISyntaxException { + StaRipper ripper = new StaRipper(new URI("https://sta.sh/01umpyuxi4js").toURL()); testRipper(ripper); } @Test @Disabled - public void testGetGID() throws IOException { - URL url = new URL("https://sta.sh/01umpyuxi4js"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://sta.sh/01umpyuxi4js").toURL(); StaRipper ripper = new StaRipper(url); Assertions.assertEquals("01umpyuxi4js", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java index 5c530a015..71038c945 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/StickyXXXRipperTest.java @@ -1,18 +1,21 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.video.StickyXXXRipper; // import com.rarchives.ripme.tst.ripper.rippers.RippersTest; import com.rarchives.ripme.utils.Utils; +import org.junit.jupiter.api.Test; public class StickyXXXRipperTest extends RippersTest { - public void testStickyXXXVideo() throws IOException { + @Test + public void testStickyXXXVideo() throws IOException, URISyntaxException { // This test fails on the CI - possibly due to checking for a file before it's written - so we're skipping it if (Utils.getConfigBoolean("test.run_flaky_tests", false)) { - StickyXXXRipper ripper = new StickyXXXRipper(new URL("http://www.stickyxxx.com/a-very-intense-farewell/")); + StickyXXXRipper ripper = new StickyXXXRipper(new URI("http://www.stickyxxx.com/a-very-intense-farewell/").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java index 36a3a29ea..4528482f6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TapasticRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TapasticRipper; @@ -11,14 +13,14 @@ public class TapasticRipperTest extends RippersTest { @Test @Disabled("ripper broken") - public void testTapasticRip() throws IOException { - TapasticRipper ripper = new TapasticRipper(new URL("https://tapas.io/series/TPIAG")); + public void testTapasticRip() throws IOException, URISyntaxException { + TapasticRipper ripper = new TapasticRipper(new URI("https://tapas.io/series/TPIAG").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://tapas.io/series/TPIAG"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://tapas.io/series/TPIAG").toURL(); TapasticRipper ripper = new TapasticRipper(url); Assertions.assertEquals("series_ TPIAG", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java index 6d27ca7cf..2a69bae7f 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TeenplanetRipperTest.java @@ -1,22 +1,26 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TeenplanetRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class TeenplanetRipperTest extends RippersTest { @Test - public void testTeenplanetRip() throws IOException { - TeenplanetRipper ripper = new TeenplanetRipper(new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html")); + @Tag("flaky") + public void testTeenplanetRip() throws IOException, URISyntaxException { + TeenplanetRipper ripper = new TeenplanetRipper(new URI("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://teenplanet.org/galleries/the-perfect-side-of-me-6588.html").toURL(); TeenplanetRipper ripper = new TeenplanetRipper(url); Assertions.assertEquals("the-perfect-side-of-me-6588", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java index 1067f1ebf..e7d85d34e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ThechiveRipperTest.java @@ -26,10 +26,12 @@ import com.rarchives.ripme.ripper.rippers.ThechiveRipper; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; /** * @@ -43,16 +45,18 @@ public class ThechiveRipperTest extends RippersTest { * @throws IOException */ @Test - public void testTheChiveRip() throws IOException { - ThechiveRipper ripper = new ThechiveRipper(new URL( - "https://thechive.com/2019/03/16/beautiful-badasses-lookin-good-in-and-out-of-uniform-35-photos/")); + @Tag("flaky") + public void testTheChiveRip() throws IOException, URISyntaxException { + ThechiveRipper ripper = new ThechiveRipper(new URI( + "https://thechive.com/2019/03/16/beautiful-badasses-lookin-good-in-and-out-of-uniform-35-photos/").toURL()); testRipper(ripper); } @Test - public void testTheChiveGif() throws IOException { + @Tag("flaky") + public void testTheChiveGif() throws IOException, URISyntaxException { ThechiveRipper ripper = new ThechiveRipper( - new URL("https://thechive.com/2019/03/14/dont-tease-me-just-squeeze-me-20-gifs/")); + new URI("https://thechive.com/2019/03/14/dont-tease-me-just-squeeze-me-20-gifs/").toURL()); testRipper(ripper); } @@ -60,8 +64,9 @@ public void testTheChiveGif() throws IOException { * "i.thechive.com" test. */ @Test - public void testIDotThechive() throws IOException { - ThechiveRipper ripper = new ThechiveRipper(new URL("https://i.thechive.com/witcheva")); + @Tag("flaky") + public void testIDotThechive() throws IOException, URISyntaxException { + ThechiveRipper ripper = new ThechiveRipper(new URI("https://i.thechive.com/witcheva").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java index 3c9b6a4a6..17ed73989 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TheyiffgalleryRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.TheyiffgalleryRipper; @@ -11,14 +13,14 @@ public class TheyiffgalleryRipperTest extends RippersTest { @Test @Tag("flaky") - public void testTheyiffgallery() throws IOException { - TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URL("https://theyiffgallery.com/index?/category/4303")); + public void testTheyiffgallery() throws IOException, URISyntaxException { + TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(new URI("https://theyiffgallery.com/index?/category/4303").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://theyiffgallery.com/index?/category/4303"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://theyiffgallery.com/index?/category/4303").toURL(); TheyiffgalleryRipper ripper = new TheyiffgalleryRipper(url); Assertions.assertEquals("4303", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java index 38dee4513..21818ae37 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TsuminoRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import java.util.List; import com.rarchives.ripme.ripper.rippers.TsuminoRipper; @@ -15,14 +16,14 @@ public class TsuminoRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testTsuminoRipper() throws IOException { - TsuminoRipper ripper = new TsuminoRipper(new URL("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-")); + public void testTsuminoRipper() throws IOException, URISyntaxException { + TsuminoRipper ripper = new TsuminoRipper(new URI("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-").toURL()); testRipper(ripper); } @Test @Disabled("Broken ripper") - public void testTagBlackList() throws IOException { - TsuminoRipper ripper = new TsuminoRipper(new URL("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-")); + public void testTagBlackList() throws IOException, URISyntaxException { + TsuminoRipper ripper = new TsuminoRipper(new URI("http://www.tsumino.com/Book/Info/43528/sore-wa-kurokute-suketeita-what-s-tight-and-black-and-sheer-all-over-").toURL()); Document doc = ripper.getFirstPage(); List tagsOnPage = ripper.getTags(doc); String[] tags1 = {"test", "one", "Smell"}; diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java index 07aeb28d1..e771e2095 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TumblrRipperTest.java @@ -2,7 +2,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TumblrRipper; @@ -12,30 +13,30 @@ public class TumblrRipperTest extends RippersTest { @Test @Disabled - public void testTumblrFullRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("http://wrouinr.tumblr.com")); + public void testTumblrFullRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("http://wrouinr.tumblr.com").toURL()); testRipper(ripper); } @Test @Disabled - public void testTumblrTagRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("https://these-are-my-b-sides.tumblr.com/tagged/boobs")); + public void testTumblrTagRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("https://these-are-my-b-sides.tumblr.com/tagged/boobs").toURL()); testRipper(ripper); } @Test @Disabled - public void testTumblrPostRip() throws IOException { - TumblrRipper ripper = new TumblrRipper(new URL("http://sadbaffoon.tumblr.com/post/132045920789/what-a-hoe")); + public void testTumblrPostRip() throws IOException, URISyntaxException { + TumblrRipper ripper = new TumblrRipper(new URI("http://sadbaffoon.tumblr.com/post/132045920789/what-a-hoe").toURL()); testRipper(ripper); } @Test @Disabled("Commented out because the test link is 404ing") - public void testEmbeddedImage() throws IOException { + public void testEmbeddedImage() throws IOException, URISyntaxException { TumblrRipper ripper = new TumblrRipper( - new URL("https://these-are-my-b-sides.tumblr.com/post/178225921524/this-was-fun")); + new URI("https://these-are-my-b-sides.tumblr.com/post/178225921524/this-was-fun").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java index 8e746c9e7..de164767a 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwitterRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TwitterRipper; @@ -11,16 +12,16 @@ public class TwitterRipperTest extends RippersTest { @Test @Tag("flaky") - public void testTwitterUserRip() throws IOException { - TwitterRipper ripper = new TwitterRipper(new URL("https://twitter.com/danngamber01/media")); + public void testTwitterUserRip() throws IOException, URISyntaxException { + TwitterRipper ripper = new TwitterRipper(new URI("https://twitter.com/danngamber01/media").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void testTwitterSearchRip() throws IOException { + public void testTwitterSearchRip() throws IOException, URISyntaxException { TwitterRipper ripper = new TwitterRipper( - new URL("https://twitter.com/search?f=tweets&q=from%3Aalinalixxx%20filter%3Aimages&src=typd")); + new URI("https://twitter.com/search?f=tweets&q=from%3Aalinalixxx%20filter%3Aimages&src=typd").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java index 3671d5062..1df43cff0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/TwodgalleriesRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.TwodgalleriesRipper; @@ -11,9 +12,9 @@ public class TwodgalleriesRipperTest extends RippersTest { @Test @Disabled("https://github.com/RipMeApp/ripme/issues/182") - public void testTwodgalleriesRip() throws IOException { + public void testTwodgalleriesRip() throws IOException, URISyntaxException { TwodgalleriesRipper ripper = new TwodgalleriesRipper( - new URL("http://www.2dgalleries.com/artist/regis-loisel-6477")); + new URI("http://www.2dgalleries.com/artist/regis-loisel-6477").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java index 58decfe6c..0ce645404 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VidbleRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.VidbleRipper; @@ -9,16 +11,16 @@ public class VidbleRipperTest extends RippersTest { @Test - public void testVidbleRip() throws IOException { - VidbleRipper ripper = new VidbleRipper(new URL("http://www.vidble.com/album/y1oyh3zd")); + public void testVidbleRip() throws IOException, URISyntaxException { + VidbleRipper ripper = new VidbleRipper(new URI("https://vidble.com/album/cGEFr8zi").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://www.vidble.com/album/y1oyh3zd"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://vidble.com/album/cGEFr8zi").toURL(); VidbleRipper ripper = new VidbleRipper(url); - Assertions.assertEquals("y1oyh3zd", ripper.getGID(url)); + Assertions.assertEquals("cGEFr8zi", ripper.getGID(url)); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java index fb1629468..6ac08ca4e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VideoRippersTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -40,9 +42,9 @@ private void videoTestHelper(VideoRipper ripper) { @Test @Disabled("Test disbaled. See https://github.com/RipMeApp/ripme/issues/574") - public void testTwitchVideoRipper() throws IOException { + public void testTwitchVideoRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull")); + contentURLs.add(new URI("https://clips.twitch.tv/FaithfulIncredulousPotTBCheesePull").toURL()); for (URL url : contentURLs) { // TwitchVideoRipper ripper = new TwitchVideoRipper(url); // videoTestHelper(ripper); @@ -51,18 +53,18 @@ public void testTwitchVideoRipper() throws IOException { @Test @Disabled("Test disabled see https://github.com/RipMeApp/ripme/issues/1095") - public void testPornhubRipper() throws IOException { + public void testPornhubRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("https://www.pornhub.com/view_video.php?viewkey=ph5a329fa707269")); + contentURLs.add(new URI("https://www.pornhub.com/view_video.php?viewkey=ph5a329fa707269").toURL()); for (URL url : contentURLs) { PornhubRipper ripper = new PornhubRipper(url); videoTestHelper(ripper); } } - public void testYuvutuRipper() throws IOException { + public void testYuvutuRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://www.yuvutu.com/video/828499/female-reader-armpit-job/")); + contentURLs.add(new URI("http://www.yuvutu.com/video/828499/female-reader-armpit-job/").toURL()); for (URL url : contentURLs) { YuvutuRipper ripper = new YuvutuRipper(url); videoTestHelper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java index 063cc036f..a315648d9 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ViewcomicRipperTest.java @@ -1,7 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.ViewcomicRipper; import org.junit.jupiter.api.Disabled; @@ -9,8 +10,8 @@ public class ViewcomicRipperTest extends RippersTest { @Test @Disabled("Ripper broken") - public void testViewcomicRipper() throws IOException { - ViewcomicRipper ripper = new ViewcomicRipper(new URL("https://view-comic.com/batman-no-mans-land-vol-1/")); + public void testViewcomicRipper() throws IOException, URISyntaxException { + ViewcomicRipper ripper = new ViewcomicRipper(new URI("https://view-comic.com/batman-no-mans-land-vol-1/").toURL()); testRipper(ripper); } } \ No newline at end of file diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java index b7f52e99a..7bf7badf5 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VkRipperTest.java @@ -1,11 +1,13 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.VkRipper; import org.json.JSONObject; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class VkRipperTest extends RippersTest { @@ -18,19 +20,22 @@ public class VkRipperTest extends RippersTest { // EXAMPLE: https://vk.com/album45506334_00?rev=1 (a single album - wall pictures) // EXAMPLE: https://vk.com/album45506334_101886701 (a single album - custom) @Test - public void testVkAlbumHttpRip() throws IOException { - VkRipper ripper = new VkRipper(new URL("https://vk.com/album45506334_0")); + @Tag("flaky") + public void testVkAlbumHttpRip() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("https://vk.com/album45506334_0").toURL()); testRipper(ripper); } @Test - public void testVkPhotosRip() throws IOException { - VkRipper ripper = new VkRipper(new URL("https://vk.com/photos45506334")); + @Tag("flaky") + public void testVkPhotosRip() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("https://vk.com/photos45506334").toURL()); testRipper(ripper); } @Test - public void testFindJSONObjectContainingPhotoID() throws IOException { - VkRipper ripper = new VkRipper(new URL("http://vk.com/album45506334_0")); + @Tag("flaky") + public void testFindJSONObjectContainingPhotoID() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("http://vk.com/album45506334_0").toURL()); String json = "{\"payload\":[0,[\"album-45984105_268691406\",18,14,[{\"id\":\"-45984105_457345201\",\"base\":\"https://sun9-37.userapi.com/\",\"tagged\":[],\"likes\":0,\"shares\":0,\"o_src\":\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E.jpg\",\"o_\":[\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E\",130,98],\"z_src\":\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg\",\"z_\":[\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI\",1280,960],\"w_src\":\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU.jpg\",\"w_\":[\"https://sun9-60.userapi.com/c857520/v857520962/10e24b/6ETsA15rAdU\",1405,1054]}]]],\"langVersion\":\"4298\"}"; String responseJson = @@ -42,8 +47,8 @@ public void testFindJSONObjectContainingPhotoID() throws IOException { } @Test - public void testGetBestSourceUrl() throws IOException { - VkRipper ripper = new VkRipper(new URL("http://vk.com/album45506334_0")); + public void testGetBestSourceUrl() throws IOException, URISyntaxException { + VkRipper ripper = new VkRipper(new URI("http://vk.com/album45506334_0").toURL()); String json = "{\"id\":\"-45984105_457345201\",\"base\":\"https://sun9-37.userapi.com/\",\"commcount\":0,\"date\":\"3 Dec at 1:14 am\",\"tagged\":[],\"attached_tags\":{\"max_tags_per_object\":5},\"o_src\":\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E.jpg\",\"o_\":[\"https://sun9-65.userapi.com/c857520/v857520962/10e24c/DPxygc3XW5E\",130,98],\"y_src\":\"https://sun9-9.userapi.com/c857520/v857520962/10e249/dUDeuY10s0A.jpg\",\"y_\":[\"https://sun9-9.userapi.com/c857520/v857520962/10e249/dUDeuY10s0A\",807,605],\"z_src\":\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg\",\"z_\":[\"https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI\",1280,960]}"; Assertions.assertEquals("https://sun9-41.userapi.com/c857520/v857520962/10e24a/EsDDQA36qKI.jpg", diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java index fc78ec2d0..20e144422 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/VscoRipperTest.java @@ -1,50 +1,52 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import com.rarchives.ripme.ripper.rippers.VscoRipper; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -import java.io.IOException; -import java.net.URL; - -public class VscoRipperTest extends RippersTest { - - /** - * Testing single image. - * - * @throws IOException - */ - @Test - public void testSingleImageRip() throws IOException { - VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/jonathangodoy/media/5d1aec76bb669a128035e98a")); - testRipper(ripper); - } - - /** - * Tests profile rip., Prevents Bug #679 from happening again. - * https://github.com/RipMeApp/ripme/issues/679 - * - * @throws IOException - */ - @Test - public void testHyphenatedRip() throws IOException { - VscoRipper ripper = new VscoRipper(new URL("https://vsco.co/jolly-roger/gallery")); - testRipper(ripper); - } - - /** - * Make sure it names the folder something sensible. - * - * @throws IOException - */ - @Test - public void testGetGID() throws IOException { - URL url = new URL("https://vsco.co/jolly-roger/media/590359c4ade3041f2658f407"); - - VscoRipper ripper = new VscoRipper(url); - - Assertions.assertEquals("jolly-roger/59035", ripper.getGID(url), "Failed to get GID"); - } - -} +package com.rarchives.ripme.tst.ripper.rippers; + +import com.rarchives.ripme.ripper.rippers.VscoRipper; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class VscoRipperTest extends RippersTest { + + /** + * Testing single image. + * + * @throws IOException + */ + @Test + public void testSingleImageRip() throws IOException, URISyntaxException { + VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/media/597ce449846079297b3f7cf3").toURL()); + testRipper(ripper); + } + + /** + * Tests profile rip., Prevents Bug #679 from happening again. + * https://github.com/RipMeApp/ripme/issues/679 + * + * @throws IOException + */ + @Test + public void testHyphenatedRip() throws IOException, URISyntaxException { + VscoRipper ripper = new VscoRipper(new URI("https://vsco.co/jolly-roger/gallery").toURL()); + testRipper(ripper); + } + + /** + * Make sure it names the folder something sensible. + * + * @throws IOException + */ + @Test + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://vsco.co/jolly-roger/media/590359c4ade3041f2658f407").toURL(); + + VscoRipper ripper = new VscoRipper(url); + + Assertions.assertEquals("jolly-roger/59035", ripper.getGID(url), "Failed to get GID"); + } + +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java index 6f4ed2ebe..d05f307c7 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WebtoonsRipperTest.java @@ -1,28 +1,33 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.WebtoonsRipper; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -public class WebtoonsRipperTest extends RippersTest { +public class WebtoonsRipperTest extends RippersTest { @Test - public void testWebtoonsAlbum() throws IOException { - WebtoonsRipper ripper = new WebtoonsRipper(new URL("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109")); + @Tag("flaky") + public void testWebtoonsAlbum() throws IOException, URISyntaxException { + WebtoonsRipper ripper = new WebtoonsRipper(new URI("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109").toURL()); testRipper(ripper); } @Test - public void testWedramabtoonsType() throws IOException { - WebtoonsRipper ripper = new WebtoonsRipper(new URL("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145")); + @Tag("flaky") + public void testWedramabtoonsType() throws IOException, URISyntaxException { + WebtoonsRipper ripper = new WebtoonsRipper(new URI("http://www.webtoons.com/en/drama/lookism/ep-145/viewer?title_no=1049&episode_no=145").toURL()); testRipper(ripper); } @Test @Disabled("URL format different") - public void testGetGID() throws IOException { - URL url = new URL("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://www.webtoons.com/en/super-hero/unordinary/episode-103/viewer?title_no=679&episode_no=109").toURL(); WebtoonsRipper ripper = new WebtoonsRipper(url); Assertions.assertEquals("super-hero", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java index 6a647286e..d0649aa93 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/WordpressComicRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.WordpressComicRipper; @@ -24,48 +26,50 @@ public class WordpressComicRipperTest extends RippersTest { @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI - public void test_totempole666() throws IOException { + public void test_totempole666() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.totempole666.com/comic/first-time-for-everything-00-cover/")); + new URI("http://www.totempole666.com/comic/first-time-for-everything-00-cover/").toURL()); testRipper(ripper); } @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI - public void test_buttsmithy() throws IOException { - WordpressComicRipper ripper = new WordpressComicRipper(new URL("http://buttsmithy.com/archives/comic/p1")); + public void test_buttsmithy() throws IOException, URISyntaxException { + WordpressComicRipper ripper = new WordpressComicRipper(new URI("http://buttsmithy.com/archives/comic/p1").toURL()); testRipper(ripper); } @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI - public void test_themonsterunderthebed() throws IOException { + public void test_themonsterunderthebed() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://themonsterunderthebed.net/?comic=test-post")); + new URI("http://themonsterunderthebed.net/?comic=test-post").toURL()); testRipper(ripper); } @Test - public void test_prismblush() throws IOException { + public void test_prismblush() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://prismblush.com/comic/hella-trap-pg-01/")); + new URI("http://prismblush.com/comic/hella-trap-pg-01/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_1() throws IOException { + @Tag("flaky") + public void test_konradokonski_1() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.konradokonski.com/sawdust/comic/get-up/")); + new URI("http://www.konradokonski.com/sawdust/comic/get-up/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_2() throws IOException { + @Tag("flaky") + public void test_konradokonski_2() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://www.konradokonski.com/wiory/comic/08182008/")); + new URI("http://www.konradokonski.com/wiory/comic/08182008/").toURL()); testRipper(ripper); } @Test - public void test_konradokonski_getAlbumTitle() throws IOException { - URL url = new URL("http://www.konradokonski.com/sawdust/comic/get-up/"); + public void test_konradokonski_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://www.konradokonski.com/sawdust/comic/get-up/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); Assertions.assertEquals("konradokonski.com_sawdust", ripper.getAlbumTitle(url)); @@ -73,47 +77,48 @@ public void test_konradokonski_getAlbumTitle() throws IOException { @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI - public void test_freeadultcomix() throws IOException { + public void test_freeadultcomix() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://freeadultcomix.com/finders-feepaid-in-full-sparrow/")); + new URI("http://freeadultcomix.com/finders-feepaid-in-full-sparrow/").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void test_delvecomic() throws IOException { + public void test_delvecomic() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://thisis.delvecomic.com/NewWP/comic/in-too-deep/")); + new URI("http://thisis.delvecomic.com/NewWP/comic/in-too-deep/").toURL()); testRipper(ripper); } @Test - public void test_Eightmuses_download() throws IOException { + public void test_Eightmuses_download() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/")); + new URI("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/").toURL()); testRipper(ripper); } @Test - public void test_Eightmuses_getAlbumTitle() throws IOException { - URL url = new URL("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/"); + public void test_Eightmuses_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("https://8muses.download/lustomic-playkittens-josh-samuel-porn-comics-8-muses/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); Assertions.assertEquals("8muses.download_lustomic-playkittens-josh-samuel-porn-comics-8-muses", ripper.getAlbumTitle(url)); } @Test - public void test_spyingwithlana_download() throws IOException { + @Tag("flaky") + public void test_spyingwithlana_download() throws IOException, URISyntaxException { WordpressComicRipper ripper = new WordpressComicRipper( - new URL("http://spyingwithlana.com/comic/the-big-hookup/")); + new URI("http://spyingwithlana.com/comic/the-big-hookup/").toURL()); testRipper(ripper); } @Test - public void test_spyingwithlana_getAlbumTitle() throws IOException { - URL url = new URL("http://spyingwithlana.com/comic/the-big-hookup/"); + public void test_spyingwithlana_getAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://spyingwithlana.com/comic/the-big-hookup/").toURL(); WordpressComicRipper ripper = new WordpressComicRipper(url); Assertions.assertEquals("spyingwithlana_the-big-hookup", ripper.getAlbumTitle(url)); } @Test @Tag("flaky") // https://github.com/RipMeApp/ripme/issues/269 - Disabled test - WordpressRipperTest: various domains flaky in CI - public void test_pepsaga() throws IOException { - WordpressComicRipper ripper = new WordpressComicRipper(new URL("http://shipinbottle.pepsaga.com/?p=281")); + public void test_pepsaga() throws IOException, URISyntaxException { + WordpressComicRipper ripper = new WordpressComicRipper(new URI("http://shipinbottle.pepsaga.com/?p=281").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java index 7b5ab870d..b4130cbb6 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XcartxRipperTest.java @@ -6,13 +6,14 @@ import org.junit.jupiter.api.Test; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; public class XcartxRipperTest extends RippersTest { @Test @Disabled("Broken ripper") - public void testAlbum() throws IOException { - XcartxRipper ripper = new XcartxRipper(new URL("http://xcartx.com/4937-tokimeki-nioi.html")); + public void testAlbum() throws IOException, URISyntaxException { + XcartxRipper ripper = new XcartxRipper(new URI("http://xcartx.com/4937-tokimeki-nioi.html").toURL()); testRipper(ripper); } } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java index e9475a1ed..24555e89e 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XhamsterRipperTest.java @@ -1,63 +1,64 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.XhamsterRipper; import org.jsoup.nodes.Document; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class XhamsterRipperTest extends RippersTest { @Test - public void testXhamsterAlbum1() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/sexy-preggo-girls-9026608")); - testRipper(ripper); - } - @Test - public void testXhamster2Album() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster2.com/photos/gallery/sexy-preggo-girls-9026608")); + @Tag("flaky") + public void testXhamsterAlbum1() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/photos/gallery/sexy-preggo-girls-9026608").toURL()); testRipper(ripper); } @Test - public void testXhamsterAlbum2() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + @Tag("flaky") + public void testXhamster2Album() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster2.com/photos/gallery/sexy-preggo-girls-9026608").toURL()); testRipper(ripper); } @Test - public void testXhamsterAlbumOneDomain() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.one/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + @Tag("flaky") + public void testXhamsterAlbum2() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL()); testRipper(ripper); } @Test @Tag("flaky") - public void testXhamsterAlbumDesiDomain() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664")); + public void testXhamsterAlbumDesiDomain() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL()); testRipper(ripper); } @Test - @Disabled("ripper broken?") - public void testXhamsterVideo() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://xhamster.com/videos/brazzers-busty-big-booty-milf-lisa-ann-fucks-her-masseur-1492828")); + @Tag("flaky") + public void testXhamsterVideo() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://xhamster.com/videos/brazzers-busty-big-booty-milf-lisa-ann-fucks-her-masseur-1492828").toURL()); testRipper(ripper); } @Test - public void testBrazilianXhamster() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://pt.xhamster.com/photos/gallery/silvana-7105696")); + @Tag("flaky") + public void testBrazilianXhamster() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://pt.xhamster.com/photos/gallery/cartoon-babe-15786301").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("https://xhamster5.desi/photos/gallery/japanese-dolls-4-asahi-mizuno-7254664").toURL(); XhamsterRipper ripper = new XhamsterRipper(url); Assertions.assertEquals("7254664", ripper.getGID(url)); } @Test - public void testGetNextPage() throws IOException { - XhamsterRipper ripper = new XhamsterRipper(new URL("https://pt.xhamster.com/photos/gallery/mega-compil-6-10728626")); + @Tag("flaky") + public void testGetNextPage() throws IOException, URISyntaxException { + XhamsterRipper ripper = new XhamsterRipper(new URI("https://pt.xhamster.com/photos/gallery/mega-compil-6-10728626").toURL()); Document doc = ripper.getFirstPage(); try { ripper.getNextPage(doc); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java index 807231e80..78eb5a3af 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XlecxRipperTest.java @@ -1,18 +1,19 @@ -package com.rarchives.ripme.tst.ripper.rippers; - -import java.io.IOException; -import java.net.URL; - -import com.rarchives.ripme.ripper.rippers.XlecxRipper; - -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; - -public class XlecxRipperTest extends RippersTest { - @Test - @Disabled("Broken ripper") - public void testAlbum() throws IOException { - XlecxRipper ripper = new XlecxRipper(new URL("http://xlecx.com/4274-black-canary-ravished-prey.html")); - testRipper(ripper); - } -} +package com.rarchives.ripme.tst.ripper.rippers; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import com.rarchives.ripme.ripper.rippers.XlecxRipper; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class XlecxRipperTest extends RippersTest { + @Test + @Disabled("Broken ripper") + public void testAlbum() throws IOException, URISyntaxException { + XlecxRipper ripper = new XlecxRipper(new URI("http://xlecx.com/4274-black-canary-ravished-prey.html").toURL()); + testRipper(ripper); + } +} diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java index 9446b6408..cde9d1119 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/XvideosRipperTest.java @@ -1,16 +1,16 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; -import java.net.URL; +import java.net.URI; +import java.net.URISyntaxException; import com.rarchives.ripme.ripper.rippers.XvideosRipper; -import com.rarchives.ripme.tst.ripper.rippers.RippersTest; import org.junit.jupiter.api.Test; public class XvideosRipperTest extends RippersTest { @Test - public void testXhamsterAlbum1() throws IOException { - XvideosRipper ripper = new XvideosRipper(new URL("https://www.xvideos.com/video23515878/dee_s_pool_toys")); + public void testXhamsterAlbum1() throws IOException, URISyntaxException { + XvideosRipper ripper = new XvideosRipper(new URI("https://www.xvideos.com/video23515878/dee_s_pool_toys").toURL()); testRipper(ripper); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java index bce22d628..9520ee08c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YoupornRipperTest.java @@ -5,6 +5,8 @@ import org.junit.jupiter.api.Test; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.util.ArrayList; import java.util.List; @@ -12,9 +14,11 @@ public class YoupornRipperTest extends RippersTest { @Test @Tag("flaky") - public void testYoupornRipper() throws IOException { + public void testYoupornRipper() throws IOException, URISyntaxException { List contentURLs = new ArrayList<>(); - contentURLs.add(new URL("http://www.youporn.com/watch/7669155/mrs-li-amateur-69-orgasm/?from=categ")); + // Video cannot be loaded: "Video has been flagged for verification" + //contentURLs.add(new URI("http://www.youporn.com/watch/7669155/mrs-li-amateur-69-orgasm/?from=categ").toURL()); + contentURLs.add(new URI("https://www.youporn.com/watch/13158849/smashing-star-slut-part-2/").toURL()); for (URL url : contentURLs) { YoupornRipper ripper = new YoupornRipper(url); testRipper(ripper); diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java index ec95a02cd..cc84c8d54 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/YuvutuRipperTest.java @@ -1,26 +1,30 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.YuvutuRipper; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; public class YuvutuRipperTest extends RippersTest { @Test - public void testYuvutuAlbum1() throws IOException { - YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=127013")); + @Tag("flaky") + public void testYuvutuAlbum1() throws IOException, URISyntaxException { + YuvutuRipper ripper = new YuvutuRipper(new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=127013").toURL()); testRipper(ripper); } @Test - public void testYuvutuAlbum2() throws IOException { - YuvutuRipper ripper = new YuvutuRipper(new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333")); + public void testYuvutuAlbum2() throws IOException, URISyntaxException { + YuvutuRipper ripper = new YuvutuRipper(new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333").toURL()); testRipper(ripper); } - public void testGetGID() throws IOException { - URL url = new URL("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://www.yuvutu.com/modules.php?name=YuGallery&action=view&set_id=420333").toURL(); YuvutuRipper ripper = new YuvutuRipper(url); Assertions.assertEquals("420333", ripper.getGID(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java index 3d21df97a..adbd4c77c 100644 --- a/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ripper/rippers/ZizkiRipperTest.java @@ -1,6 +1,8 @@ package com.rarchives.ripme.tst.ripper.rippers; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import com.rarchives.ripme.ripper.rippers.ZizkiRipper; @@ -12,22 +14,22 @@ public class ZizkiRipperTest extends RippersTest { @Test @Tag("flaky") - public void testRip() throws IOException { - ZizkiRipper ripper = new ZizkiRipper(new URL("http://zizki.com/dee-chorde/we-got-spirit")); + public void testRip() throws IOException, URISyntaxException { + ZizkiRipper ripper = new ZizkiRipper(new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL()); testRipper(ripper); } @Test - public void testGetGID() throws IOException { - URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit"); + public void testGetGID() throws IOException, URISyntaxException { + URL url = new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL(); ZizkiRipper ripper = new ZizkiRipper(url); Assertions.assertEquals("dee-chorde", ripper.getGID(url)); } @Test @Tag("flaky") - public void testAlbumTitle() throws IOException { - URL url = new URL("http://zizki.com/dee-chorde/we-got-spirit"); + public void testAlbumTitle() throws IOException, URISyntaxException { + URL url = new URI("http://zizki.com/dee-chorde/we-got-spirit").toURL(); ZizkiRipper ripper = new ZizkiRipper(url); Assertions.assertEquals("zizki_Dee Chorde_We Got Spirit", ripper.getAlbumTitle(url)); } diff --git a/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java b/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java index 6189d86aa..d35ff49e0 100644 --- a/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ui/LabelsBundlesTest.java @@ -10,12 +10,12 @@ import com.rarchives.ripme.utils.Utils; -import org.apache.log4j.Logger; -import org.junit.jupiter.api.Assertions; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.junit.jupiter.api.Test; public class LabelsBundlesTest { - private Logger logger = Logger.getLogger(Utils.class); + private Logger logger = LogManager.getLogger(Utils.class); private static final String DEFAULT_LANG = "en_US"; @Test diff --git a/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java b/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java index e5fe8b43e..fbd1c6046 100644 --- a/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java +++ b/src/test/java/com/rarchives/ripme/tst/ui/RipStatusMessageTest.java @@ -3,9 +3,11 @@ import com.rarchives.ripme.ui.RipStatusMessage; import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; public class RipStatusMessageTest { + @Test public void testConstructor() { RipStatusMessage.STATUS loadingResource = RipStatusMessage.STATUS.LOADING_RESOURCE; String path = "path/to/file"; diff --git a/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java b/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java new file mode 100644 index 000000000..32dcdd9bf --- /dev/null +++ b/src/test/java/com/rarchives/ripme/ui/UIContextMenuTests.java @@ -0,0 +1,190 @@ +package com.rarchives.ripme.ui; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import javax.swing.*; +import java.awt.*; +import java.awt.datatransfer.Clipboard; +import java.awt.datatransfer.StringSelection; +import java.awt.event.ActionEvent; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.jupiter.api.Assertions.fail; + +// these tests do not run on a server, as it is headless +@Tag("flaky") +public class UIContextMenuTests { + + private JFrame frame; + private JTextField textField; + private ContextMenuMouseListener contextMenuMouseListener; + + @BeforeEach + void setUp() throws InterruptedException, InvocationTargetException { + AtomicBoolean notDone = new AtomicBoolean(true); + + SwingUtilities.invokeAndWait(() -> { + frame = new JFrame("ContextMenuMouseListener Example"); + textField = new JTextField("Hello, world!"); + + // Create an instance of ContextMenuMouseListener + contextMenuMouseListener = new ContextMenuMouseListener(textField); + + // Add ContextMenuMouseListener to JTextField + textField.addMouseListener(contextMenuMouseListener); + + frame.getContentPane().add(textField, BorderLayout.CENTER); + frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + frame.setSize(300, 200); + frame.setVisible(true); + + notDone.set(false); + }); + + // Wait for the GUI to be fully initialized + while (notDone.get()) { + Thread.yield(); + } + } + + @AfterEach + void tearDown() { + frame.dispose(); + } + + @Test + void testCut() { + // Simulate a cut event + simulateCutEvent(); + // Add assertions if needed + } + + @Test + void testCopy() { + // Simulate a copy event + simulateCopyEvent(); + // Add assertions if needed + } + + @Test + void testPaste() { + // Simulate a paste event + simulatePasteEvent(); + // Add assertions if needed + } + + @Test + void testSelectAll() { + // Simulate a select all event + simulateSelectAllEvent(); + // Add assertions if needed + } + + @Test + void testUndo() { + // Simulate an undo event + simulateUndoEvent(); + // Add assertions if needed + } + + private void simulatePasteEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Assume there is some text to paste + String textToPaste = "Text to paste"; + + // Set the text to the clipboard + Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard(); + StringSelection stringSelection = new StringSelection(textToPaste); + clipboard.setContents(stringSelection, stringSelection); + + // Simulate a paste event + contextMenuMouseListener.getTextComponent().paste(); + + // Verify that the paste operation worked + String actualText = contextMenuMouseListener.getTextComponent().getText(); + + // Check if the text was appended after the initial text + if (actualText.equals(initialText + textToPaste)) { + System.out.println("Paste operation successful. Text content matches."); + } else { + fail("Paste operation failed. Text content does not match."); + } + } + + + + + private void simulateSelectAllEvent() { + // Simulate a select all event by invoking the selectAllAction + contextMenuMouseListener.getSelectAllAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that all text is selected + int expectedSelectionStart = 0; + int expectedSelectionEnd = contextMenuMouseListener.getTextComponent().getText().length(); + int actualSelectionStart = contextMenuMouseListener.getTextComponent().getSelectionStart(); + int actualSelectionEnd = contextMenuMouseListener.getTextComponent().getSelectionEnd(); + + if (expectedSelectionStart == actualSelectionStart && expectedSelectionEnd == actualSelectionEnd) { + System.out.println("Select All operation successful. Text is selected."); + } else { + fail("Select All operation failed. Text is not selected as expected."); + } + } + + private void simulateUndoEvent() { + + // Simulate an undo event by invoking the undoAction + contextMenuMouseListener.getUndoAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the undo operation worked + String expectedText = contextMenuMouseListener.getSavedString(); // Assuming the undo reverts to the saved state + String actualText = contextMenuMouseListener.getTextComponent().getText(); + + if (expectedText.equals(actualText)) { + System.out.println("Undo operation successful. Text content matches."); + } else { + fail("Undo operation failed. Text content does not match."); + } + } + + + private void simulateCopyEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Simulate a copy event by invoking the copyAction + contextMenuMouseListener.getCopyAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the copy operation worked + String actualText = contextMenuMouseListener.getDebugSavedString(); + + if (initialText.equals(actualText)) { + System.out.println("Copy operation successful. Text content matches."); + } else { + fail("Copy operation failed. Text content does not match."); + } + } + + private void simulateCutEvent() { + // Save the initial text content + String initialText = contextMenuMouseListener.getTextComponent().getText(); + + // Simulate a cut event by invoking the cutAction + contextMenuMouseListener.getCutAction().actionPerformed(new ActionEvent(contextMenuMouseListener.getTextComponent(), ActionEvent.ACTION_PERFORMED, "")); + + // Verify that the cut operation worked + String actualText = contextMenuMouseListener.getDebugSavedString(); + + if (initialText.equals(actualText)) { + System.out.println("Cut operation successful. Text content matches."); + } else { + fail("Cut operation failed. Text content does not match."); + } + } +} diff --git a/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java b/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java new file mode 100644 index 000000000..d28e6b078 --- /dev/null +++ b/src/test/java/com/rarchives/ripme/ui/UpdateUtilsTest.java @@ -0,0 +1,16 @@ +package com.rarchives.ripme.ui; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +public class UpdateUtilsTest { + + @Test + public void testIsNewerVersion() { + Assertions.assertFalse(UpdateUtils.isNewerVersion("1.7.94")); + Assertions.assertFalse(UpdateUtils.isNewerVersion("1.7.94-9-asdf")); + Assertions.assertTrue(UpdateUtils.isNewerVersion("1.7.94-11-asdf")); + Assertions.assertTrue(UpdateUtils.isNewerVersion("1.7.95")); + } + +} \ No newline at end of file diff --git a/utils/style.sh b/utils/style.sh deleted file mode 100644 index 45bb40e93..000000000 --- a/utils/style.sh +++ /dev/null @@ -1,27 +0,0 @@ -echo "" -echo "=====================================================" -echo "Tabs are not allowed" -echo "-----------------------------------------------------" -git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "Trailing whitespace is not allowed" -echo "-----------------------------------------------------" -git grep -n -P "[ \t]+$" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" | sed -e "s/ /\x1b[7m.\x1b[m/g" | sed -e "s/$/\x1b[7m$\x1b[m/g" -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "'){' is not allowed. Place a space between ')' and '{', i.e. 'if (a) {'" -echo "-----------------------------------------------------" -git grep -n -P "\)\{" -- :/*.java -echo "=====================================================" - -echo "" -echo "=====================================================" -echo "A space is required after keywords (if|else|for|while|do|try|catch|finally)" -echo "-----------------------------------------------------" -git grep -n -P "(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])" -- :/*.java | sed -r -e "s/(\b(if|for|while|catch)\b[(])|(\b(else|do|try|finally)\b[{])/\x1b[7m\0\x1b[m/g" -echo "=====================================================" diff --git a/utils/stylefix.sh b/utils/stylefix.sh deleted file mode 100644 index dbfad1e12..000000000 --- a/utils/stylefix.sh +++ /dev/null @@ -1,17 +0,0 @@ -echo "" -echo "=====================================================" -echo "Tabs are not allowed (please manually fix tabs)" -echo "-----------------------------------------------------" -git grep -n -P "\t" -- :/*.java | sed -e "s/\t/\x1b[7m--->\x1b[m/g" -echo "=====================================================" - -echo "Removing trailing whitespace..." -git grep -l -P "[ \t]+$" -- :/*.java | xargs -I % sed -i -r -e "s/[ \t]+$//g" % - -echo "Replacing '){' with ') {'..." -git grep -l -P "\)\{" -- :/*.java | xargs -I % sed -i -r -e "s/\)\{/) {/g" % - -echo "Adding space between keywords and punctuation..." -git grep -l -P "(\b(if|for|while|catch)\b[(])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(if|for|while|catch)\b[(])/\2 (/g" % -git grep -l -P "(\b(else|do|try|finally)\b[{])" -- :/*.java | xargs -I % sed -i -r -e "s/(\b(else|do|try|finally)\b[{])/\2 {/g" % - diff --git a/workspace.code-workspace b/workspace.code-workspace deleted file mode 100644 index 95b80106b..000000000 --- a/workspace.code-workspace +++ /dev/null @@ -1,16 +0,0 @@ -{ - "folders": [ - { - "path": "E:\\Downloads\\_Isaaku\\dev" - } - ], - "settings": { - "files.exclude": { - "**/.classpath": false, - "**/.project": true, - "**/.settings": true, - "**/.factorypath": true - }, - "java.configuration.updateBuildConfiguration": "automatic" - } -} \ No newline at end of file