diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..ea27a58 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,27 @@ +{ + "name": "nfcore", + "image": "nfcore/gitpod:latest", + "remoteUser": "gitpod", + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/opt/conda/bin/python", + "python.linting.enabled": true, + "python.linting.pylintEnabled": true, + "python.formatting.autopep8Path": "/opt/conda/bin/autopep8", + "python.formatting.yapfPath": "/opt/conda/bin/yapf", + "python.linting.flake8Path": "/opt/conda/bin/flake8", + "python.linting.pycodestylePath": "/opt/conda/bin/pycodestyle", + "python.linting.pydocstylePath": "/opt/conda/bin/pydocstyle", + "python.linting.pylintPath": "/opt/conda/bin/pylint" + }, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"] + } + } +} diff --git a/.editorconfig b/.editorconfig index b78de6e..b6b3190 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,7 +8,7 @@ trim_trailing_whitespace = true indent_size = 4 indent_style = space -[*.{md,yml,yaml,html,css,scss,js,cff}] +[*.{md,yml,yaml,html,css,scss,js}] indent_size = 2 # These files are edited and tested upstream in nf-core/modules diff --git a/.gitattributes b/.gitattributes index cf7aa95..7a2dabc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,5 +1,4 @@ *.config linguist-language=nextflow +*.nf.test linguist-language=nextflow modules/nf-core/** linguist-generated -modules/sanger-tol/** linguist-generated subworkflows/nf-core/** linguist-generated -subworkflows/sanger-tol/** linguist-generated diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 43f890f..b889e95 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -93,3 +93,19 @@ If you are using a new feature from core Nextflow, you may bump the minimum requ ### Images and figures For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines). + +## GitHub Codespaces + +This repo includes a devcontainer configuration which will create a GitHub Codespaces for Nextflow development! This is an online developer environment that runs in your browser, complete with VSCode and a terminal. + +To get started: + +- Open the repo in [Codespaces](https://github.com/sanger-tol/insdcdownload/codespaces) +- Tools installed + - nf-core + - Nextflow + +Devcontainer specs: + +- [DevContainer config](.devcontainer/devcontainer.json) +- [Dockerfile](.devcontainer/Dockerfile) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 716fdcc..ffd6c7c 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -38,13 +38,14 @@ body: id: system attributes: label: System information - description: "* Nextflow version _(eg. 22.04.0)_ + description: "* Nextflow version _(eg. 22.10.1)_ * Hardware _(eg. HPC, Desktop, Cloud)_ * Executor _(eg. slurm, local, awsbatch)_ - * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter or Charliecloud)_ + * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter, Charliecloud, + or Apptainer)_ * OS _(eg. CentOS Linux, macOS, Linux Mint)_ diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index c08e134..550c65a 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -1,19 +1,19 @@ name: nf-core branch protection -# This workflow is triggered on PRs to master branch on the repository -# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` +# This workflow is triggered on PRs to main branch on the repository +# It fails when someone tries to make a PR against the nf-core `main` branch instead of `dev` on: pull_request_target: - branches: [master] + branches: [main] jobs: test: runs-on: ubuntu-latest steps: - # PRs to the nf-core repo master branch are only ok if coming from the nf-core repo `dev` or any `patch` branches + # PRs to the nf-core repo main branch are only ok if coming from the nf-core repo `dev` or any `patch` branches - name: Check PRs if: github.repository == 'sanger-tol/insdcdownload' run: | - { [[ ${{github.event.pull_request.head.repo.full_name }} == sanger-tol/insdcdownload ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] + { [[ ${{github.event.pull_request.head.repo.full_name }} == sanger-tol/insdcdownload ]] && [[ $GITHUB_HEAD_REF == "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] # If the above check failed, post a comment on the PR explaining the failure # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets @@ -22,7 +22,7 @@ jobs: uses: mshick/add-pr-comment@v1 with: message: | - ## This PR is against the `master` branch :x: + ## This PR is against the `main` branch :x: * Do not close this PR * Click _Edit_ and change the `base` to `dev` @@ -32,9 +32,9 @@ jobs: Hi @${{ github.event.pull_request.user.login }}, - It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `master` branch. - The `master` branch on nf-core repositories should always contain code from the latest release. - Because of this, PRs to `master` are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. + It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `main` branch. + The `main` branch on nf-core repositories should always contain code from the latest release. + Because of this, PRs to `main` are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch. You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page. Note that even after this, the test will continue to show as failing until you push a new commit. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8327968..d0f2625 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,6 +11,10 @@ on: env: NXF_ANSI_LOG: false +concurrency: + group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}" + cancel-in-progress: true + jobs: test: name: Run pipeline with test data @@ -20,11 +24,11 @@ jobs: strategy: matrix: NXF_VER: - - "22.04.0" + - "22.10.1" - "latest-everything" steps: - name: Check out pipeline code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Nextflow uses: nf-core/setup-nextflow@v1 diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml new file mode 100644 index 0000000..694e90e --- /dev/null +++ b/.github/workflows/clean-up.yml @@ -0,0 +1,24 @@ +name: "Close user-tagged issues and PRs" +on: + schedule: + - cron: "0 0 * * 0" # Once a week + +jobs: + clean-up: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + steps: + - uses: actions/stale@v7 + with: + stale-issue-message: "This issue has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment otherwise this issue will be closed in 20 days." + stale-pr-message: "This PR has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor. Remove stale label or add a comment if it is still useful." + close-issue-message: "This issue was closed because it has been tagged as awaiting-changes or awaiting-feedback by an nf-core contributor and then staled for 20 days with no activity." + days-before-stale: 30 + days-before-close: 20 + days-before-pr-close: -1 + any-of-labels: "awaiting-changes,awaiting-feedback" + exempt-issue-labels: "WIP" + exempt-pr-labels: "WIP" + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix-linting.yml index 1c18c1d..324f123 100644 --- a/.github/workflows/fix-linting.yml +++ b/.github/workflows/fix-linting.yml @@ -8,23 +8,23 @@ jobs: # Only run if comment is on a PR with the main repo, and if it contains the magic keywords if: > contains(github.event.comment.html_url, '/pull/') && - contains(github.event.comment.body, '@nf-core-bot fix linting') && + contains(github.event.comment.body, '@sanger-tolsoft fix linting') && github.repository == 'sanger-tol/insdcdownload' runs-on: ubuntu-latest steps: - # Use the @nf-core-bot token to check out so we can push later + # Use the @sanger-tolsoft token to check out so we can push later - uses: actions/checkout@v3 with: - token: ${{ secrets.nf_core_bot_auth_token }} + token: ${{ secrets.sangertolsoft_access_token }} # Action runs on the issue comment, so we don't get the PR by default # Use the gh cli to check out the PR - name: Checkout Pull Request run: gh pr checkout ${{ github.event.issue.number }} env: - GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }} + GITHUB_TOKEN: ${{ secrets.sangertolsoft_access_token }} - - uses: actions/setup-node@v2 + - uses: actions/setup-node@v3 - name: Install Prettier run: npm install -g prettier @prettier/plugin-php @@ -34,9 +34,9 @@ jobs: id: prettier_status run: | if prettier --check ${GITHUB_WORKSPACE}; then - echo "::set-output name=result::pass" + echo "result=pass" >> $GITHUB_OUTPUT else - echo "::set-output name=result::fail" + echo "result=fail" >> $GITHUB_OUTPUT fi - name: Run 'prettier --write' @@ -46,8 +46,8 @@ jobs: - name: Commit & push changes if: steps.prettier_status.outputs.result == 'fail' run: | - git config user.email "core@nf-co.re" - git config user.name "nf-core-bot" + git config user.email "105875386+sanger-tolsoft@users.noreply.github.com" + git config user.name "sanger-tolsoft" git config push.default upstream git add . git status diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 8a5ce69..7ebc310 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -4,6 +4,8 @@ name: nf-core linting # that the code meets the nf-core guidelines. on: push: + branches: + - dev pull_request: release: types: [published] @@ -12,22 +14,22 @@ jobs: EditorConfig: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-node@v2 + - uses: actions/setup-node@v3 - name: Install editorconfig-checker run: npm install -g editorconfig-checker - name: Run ECLint check - run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile') + run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|cff\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile') Prettier: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-node@v2 + - uses: actions/setup-node@v3 - name: Install Prettier run: npm install -g prettier @@ -38,7 +40,7 @@ jobs: PythonBlack: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Check code lints with Black uses: psf/black@stable @@ -69,20 +71,20 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out pipeline code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Nextflow uses: nf-core/setup-nextflow@v1 - - uses: actions/setup-python@v3 + - uses: actions/setup-python@v4 with: - python-version: "3.7" + python-version: "3.8" architecture: "x64" - name: Install dependencies run: | python -m pip install --upgrade pip - pip install nf-core + pip install nf-core==2.8 - name: Run nf-core lint env: @@ -97,7 +99,7 @@ jobs: - name: Upload linting log file artifact if: ${{ always() }} - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: linting-logs path: | diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml index 04758f6..0bbcd30 100644 --- a/.github/workflows/linting_comment.yml +++ b/.github/workflows/linting_comment.yml @@ -18,7 +18,7 @@ jobs: - name: Get PR number id: pr_number - run: echo "::set-output name=pr_number::$(cat linting-logs/PR_number.txt)" + run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT - name: Post PR comment uses: marocchino/sticky-pull-request-comment@v2 diff --git a/.github/workflows/sanger_test.yml b/.github/workflows/sanger_test.yml new file mode 100644 index 0000000..e69af1e --- /dev/null +++ b/.github/workflows/sanger_test.yml @@ -0,0 +1,29 @@ +name: sanger-tol LSF tests + +on: + workflow_dispatch: +jobs: + run-tower: + name: Run LSF tests + runs-on: ubuntu-latest + steps: + - name: Launch workflow via tower + uses: seqeralabs/action-tower-launch@v2 + with: + workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} + compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} + revision: ${{ github.sha }} + workdir: ${{ secrets.TOWER_WORKDIR_PARENT }}/work/${{ github.repository }}/work-${{ github.sha }} + parameters: | + { + "outdir": "${{ secrets.TOWER_WORKDIR_PARENT }}/results/${{ github.repository }}/results-${{ github.sha }}", + } + profiles: test,sanger,singularity,cleanup + + - uses: actions/upload-artifact@v3 + with: + name: Tower debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.github/workflows/sanger_test_full.yml b/.github/workflows/sanger_test_full.yml new file mode 100644 index 0000000..e028c6b --- /dev/null +++ b/.github/workflows/sanger_test_full.yml @@ -0,0 +1,43 @@ +name: sanger-tol LSF full size tests + +on: + push: + branches: + - main + - dev + workflow_dispatch: +jobs: + run-tower: + name: Run LSF full size tests + runs-on: ubuntu-latest + steps: + - name: Sets env vars for push + run: | + echo "REVISION=${{ github.sha }}" >> $GITHUB_ENV + if: github.event_name == 'push' + + - name: Sets env vars for workflow_dispatch + run: | + echo "REVISION=${{ github.sha }}" >> $GITHUB_ENV + if: github.event_name == 'workflow_dispatch' + + - name: Launch workflow via tower + uses: seqeralabs/action-tower-launch@v2 + with: + workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} + access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} + compute_env: ${{ secrets.TOWER_COMPUTE_ENV }} + revision: ${{ env.REVISION }} + workdir: ${{ secrets.TOWER_WORKDIR_PARENT }}/work/${{ github.repository }}/work-${{ env.REVISION }} + parameters: | + { + "outdir": "${{ secrets.TOWER_WORKDIR_PARENT }}/results/${{ github.repository }}/results-${{ env.REVISION }}", + } + profiles: test_full,sanger,singularity,cleanup + + - uses: actions/upload-artifact@v3 + with: + name: Tower debug log file + path: | + tower_action_*.log + tower_action_*.json diff --git a/.nf-core.yml b/.nf-core.yml index 093639e..442b32c 100644 --- a/.nf-core.yml +++ b/.nf-core.yml @@ -1,7 +1,6 @@ repository_type: pipeline lint: files_exist: - - CODE_OF_CONDUCT.md - assets/multiqc_config.yml - assets/nf-core-insdcdownload_logo_light.png - conf/igenomes.config @@ -17,6 +16,7 @@ lint: - .github/workflows/linting.yml - assets/sendmail_template.txt - lib/NfcoreTemplate.groovy + - .github/workflows/branch.yml nextflow_config: - manifest.name - manifest.homePage diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..0c31cdb --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v2.7.1" + hooks: + - id: prettier diff --git a/.prettierignore b/.prettierignore index d0e7ae5..437d763 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,4 +1,6 @@ email_template.html +adaptivecard.json +slackreport.json .nextflow* work/ data/ @@ -7,3 +9,4 @@ results/ testing/ testing* *.pyc +bin/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b386e7..4a82368 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,38 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## v1.1.0 - [2022-10-07] +## [[2.0.0](https://github.com/sanger-tol/insdcdownload/releases/tag/2.0.0)] – Light elf – [2024-06-04] + +This version supports the new FTP structure of Ensembl + +### Enhancements & fixes + +- The sample-sheet column `species_dir` is replaced with the `outdir` column which + represents where the assembly and repeats are downloaded (in immediate sub-directories) +- Relative paths in the sample-sheet are now evaluated from the `--outdir` parameter +- Memory usage rules for `samtools dict` +- Appropriate use of `tabix`'s TBI and CSI indexing, depending on the sequence lengths +- `--outdir` is a _mandatory_ parameter + +### Parameters + +_In the samplesheet_ + +| Old parameter | New parameter | +| ------------- | ------------- | +| species_dir | outdir | + +> **NB:** Parameter has been **updated** if both old and new parameter information is present.
**NB:** Parameter has been **added** if just the new parameter information is present.
**NB:** Parameter has been **removed** if new parameter information isn't present. + +### Software dependencies + +Note, since the pipeline is using Nextflow DSL2, each process will be run with its own [Biocontainer](https://biocontainers.pro/#/registry). This means that on occasion it is entirely possible for the pipeline to be using different versions of the same tool. However, the overall software dependency changes compared to the last release have been listed below for reference. Only `Docker` or `Singularity` containers are supported, `conda` is not supported. + +| Dependency | Old version | New version | +| ---------- | ----------- | ----------- | +| multiqc | 1.13 | 1.14 | + +## [[1.1.0](https://github.com/sanger-tol/insdcdownload/releases/tag/1.1.0)] – Deciduous ent – [2022-10-07] Minor update that fixes a few bugs @@ -16,7 +47,7 @@ Minor update that fixes a few bugs - New `species_dir` column to indicate where to download the files to -## v1.0.0 - [2022-08-12] +## [[1.0.0](https://github.com/sanger-tol/insdcdownload/releases/tag/1.0.0)] – Flaming balrog – [2022-08-12] Initial release of sanger-tol/insdcdownload, created with the [nf-core](https://nf-co.re/) template. diff --git a/CITATION.cff b/CITATION.cff index 5e7a3c3..21a2422 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -2,25 +2,25 @@ # Visit https://bit.ly/cffinit to generate yours today! cff-version: 1.2.0 -title: sanger-tol/insdcdownload v1.1.0 - Deciduous ent +title: sanger-tol/insdcdownload message: >- - If you use this software, please cite it using the - metadata from this file. + If you use this software, please cite it using the + metadata from this file. type: software authors: - - given-names: Matthieu - family-names: Muffato - affiliation: Wellcome Sanger Institute - orcid: "https://orcid.org/0000-0002-7860-3560" - - given-names: Priyanka - family-names: Surana - orcid: "https://orcid.org/0000-0002-7167-0875" - affiliation: Wellcome Sanger Institute + - given-names: Matthieu + family-names: Muffato + affiliation: Wellcome Sanger Institute + orcid: "https://orcid.org/0000-0002-7860-3560" + - given-names: Priyanka + family-names: Surana + orcid: "https://orcid.org/0000-0002-7167-0875" + affiliation: Wellcome Sanger Institute identifiers: - - type: doi - value: 10.5281/zenodo.7155119 + - type: doi + value: 10.5281/zenodo.6983932 repository-code: "https://github.com/sanger-tol/insdcdownload" license: MIT commit: TODO -version: 1.1.0 +version: 2.0.0 date-released: "2022-10-07" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f4fd052 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,111 @@ +# Code of Conduct at nf-core (v1.0) + +## Our Pledge + +In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core, pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: + +- Age +- Body size +- Familial status +- Gender identity and expression +- Geographical location +- Level of experience +- Nationality and national origins +- Native language +- Physical and neurological ability +- Race or ethnicity +- Religion +- Sexual identity and orientation +- Socioeconomic status + +Please note that the list above is alphabetised and is therefore not ranked in any order of preference or importance. + +## Preamble + +> Note: This Code of Conduct (CoC) has been drafted by the nf-core Safety Officer and been edited after input from members of the nf-core team and others. "We", in this document, refers to the Safety Officer and members of the nf-core core team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will amended periodically to keep it up-to-date, and in case of any dispute, the most current version will apply. + +An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). Our current safety officer is Renuka Kudva. + +nf-core is a young and growing community that welcomes contributions from anyone with a shared vision for [Open Science Policies](https://www.fosteropenscience.eu/taxonomy/term/8). Open science policies encompass inclusive behaviours and we strive to build and maintain a safe and inclusive environment for all individuals. + +We have therefore adopted this code of conduct (CoC), which we require all members of our community and attendees in nf-core events to adhere to in all our workspaces at all times. Workspaces include but are not limited to Slack, meetings on Zoom, Jitsi, YouTube live etc. + +Our CoC will be strictly enforced and the nf-core team reserve the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities. + +We ask all members of our community to help maintain a supportive and productive workspace and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC. + +Questions, concerns or ideas on what we can include? Contact safety [at] nf-co [dot] re + +## Our Responsibilities + +The safety officer is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour. + +The safety officer in consultation with the nf-core core team have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +Members of the core team or the safety officer who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and be subject to the same actions as others in violation of the CoC. + +## When are where does this Code of Conduct apply? + +Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events. This includes but is not limited to the following listed alphabetically and therefore in no order of preference: + +- Communicating with an official project email address. +- Communicating with community members within the nf-core Slack channel. +- Participating in hackathons organised by nf-core (both online and in-person events). +- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence. +- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc. +- Representing nf-core on social media. This includes both official and personal accounts. + +## nf-core cares 😊 + +nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include but are not limited to the following (listed in alphabetical order): + +- Ask for consent before sharing another community member’s personal information (including photographs) on social media. +- Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity. +- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) +- Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.) +- Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can) +- Focus on what is best for the team and the community. (When in doubt, ask) +- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn. +- Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!) +- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**) +- Take breaks when you feel like you need them. +- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.) + +## nf-core frowns on 😕 + +The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this code of conduct. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces. + +- Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom. +- “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online. +- Spamming or trolling of individuals on social media. +- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention. +- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience. + +### Online Trolling + +The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the added issue of online trolling. This is unacceptable, reports of such behaviour will be taken very seriously, and perpetrators will be excluded from activities immediately. + +All community members are required to ask members of the group they are working within for explicit consent prior to taking screenshots of individuals during video calls. + +## Procedures for Reporting CoC violations + +If someone makes you feel uncomfortable through their behaviours or actions, report it as soon as possible. + +You can reach out to members of the [nf-core core team](https://nf-co.re/about) and they will forward your concerns to the safety officer(s). + +Issues directly concerning members of the core team will be dealt with by other members of the core team and the safety manager, and possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson, and details will be shared in due course. + +All reports will be handled with utmost discretion and confidentially. + +## Attribution and Acknowledgements + +- The [Contributor Covenant, version 1.4](http://contributor-covenant.org/version/1/4) +- The [OpenCon 2017 Code of Conduct](http://www.opencon2017.org/code_of_conduct) (CC BY 4.0 OpenCon organisers, SPARC and Right to Research Coalition) +- The [eLife innovation sprint 2020 Code of Conduct](https://sprint.elifesciences.org/code-of-conduct/) +- The [Mozilla Community Participation Guidelines v3.1](https://www.mozilla.org/en-US/about/governance/policies/participation/) (version 3.1, CC BY-SA 3.0 Mozilla) + +## Changelog + +### v1.0 - March 12th, 2021 + +- Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC. diff --git a/README.md b/README.md index f3d6b5d..2e62502 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ -[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7155119-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7155119) +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.6983932-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.6983932) -[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/) +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/) [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/) [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/) [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/) @@ -19,14 +19,6 @@ **sanger-tol/insdcdownload** is a pipeline that downloads assemblies from INSDC into a Tree of Life directory structure. -The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community! - -On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets. - -## Pipeline summary - -## Overview - The pipeline takes an assembly accession number, as well as the assembly name, and downloads it. It also builds a set of common indices (such as `samtools faidx`), and extracts the repeat-masking performed by the NCBI. Steps involved: @@ -41,38 +33,34 @@ Steps involved: - Extract the coordinates of the masked regions into a BED file. - Compress and index the BED file with `bgzip` and `tabix`. -## Quick Start - -1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`) +## Usage -2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_. +> **Note** +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) +> with `-profile test` before running the workflow on actual data. -3. Download the pipeline and test it on a minimal dataset with a single command: +The easiest is to provide the exact name and accession number of the assembly like this: - ```bash - nextflow run sanger-tol/insdcdownload -profile test,YOURPROFILE --outdir - ``` +```console +nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 +``` - Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string. +> **Warning:** +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_; +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files). - > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. - > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. - > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. - > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. +The pipeline also supports bulk downloads through a sample-sheet. +More information about this mode on our [pipeline website](https://pipelines.tol.sanger.ac.uk/insdcdownload/usage). -4. Start running your own analysis! - - ```console - nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir results - ``` - -## Documentation +## Credits -The sanger-tol/insdcdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md). +sanger-tol/insdcdownload was mainly written by [Matthieu Muffato](https://github.com/muffato), with major borrowings from a's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows. -## Credits +We thank the following people for their assistance in the development of this pipeline: -sanger-tol/insdcdownload was mainly written by @muffato, with major borrowings from @priyanka-surana's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows. +- [Priyanka Surana](https://github.com/priyanka-surana) for providing reviews. ## Contributions and Support @@ -82,7 +70,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `# ## Citations -If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7155119](https://doi.org/10.5281/zenodo.7155119) +If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.6983932](https://doi.org/10.5281/zenodo.6983932) An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file. diff --git a/assets/adaptivecard.json b/assets/adaptivecard.json new file mode 100644 index 0000000..44934f8 --- /dev/null +++ b/assets/adaptivecard.json @@ -0,0 +1,67 @@ +{ + "type": "message", + "attachments": [ + { + "contentType": "application/vnd.microsoft.card.adaptive", + "contentUrl": null, + "content": { + "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", + "msteams": { + "width": "Full" + }, + "type": "AdaptiveCard", + "version": "1.2", + "body": [ + { + "type": "TextBlock", + "size": "Large", + "weight": "Bolder", + "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>", + "text": "sanger-tol/insdcdownload v${version} - ${runName}", + "wrap": true + }, + { + "type": "TextBlock", + "spacing": "None", + "text": "Completed at ${dateComplete} (duration: ${duration})", + "isSubtle": true, + "wrap": true + }, + { + "type": "TextBlock", + "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>", + "wrap": true + }, + { + "type": "TextBlock", + "text": "The command used to launch the workflow was as follows:", + "wrap": true + }, + { + "type": "TextBlock", + "text": "${commandLine}", + "isSubtle": true, + "wrap": true + } + ], + "actions": [ + { + "type": "Action.ShowCard", + "title": "Pipeline Configuration", + "card": { + "type": "AdaptiveCard", + "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json", + "body": [ + { + "type": "FactSet", + "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %> + ] + } + ] + } + } + ] + } + } + ] +} diff --git a/assets/samplesheet.csv b/assets/samplesheet.csv index 49f1be2..0f2f8be 100644 --- a/assets/samplesheet.csv +++ b/assets/samplesheet.csv @@ -1,7 +1,7 @@ -species_dir,assembly_name,assembly_accession -25g/data/echinoderms/Asterias_rubens,eAstRub1.3,GCA_902459465.3 -25g/data/insects/Osmia_bicornis,iOsmBic2.1,GCA_907164935.1 -25g/data/insects/Osmia_bicornis,iOsmBic2.1_alternate_haplotype,GCA_907164925.1 -darwin/data/fungi/Laetiporus_sulphureus,gfLaeSulp1.1,GCA_927399515.1 -darwin/data/insects/Noctua_fimbriata,ilNocFimb1.1,GCA_905163415.1 -darwin/data/mammals/Meles_meles,mMelMel3.2_paternal_haplotype,GCA_922984935.2 +outdir,assembly_name,assembly_accession +Asterias_rubens/eAstRub1.3,eAstRub1.3,GCA_902459465.3 +Osmia_bicornis/iOsmBic2.1,iOsmBic2.1,GCA_907164935.1 +Osmia_bicornis/iOsmBic2.1_alternate_haplotype,iOsmBic2.1_alternate_haplotype,GCA_907164925.1 +Laetiporus_sulphureus/gfLaeSulp1.1,gfLaeSulp1.1,GCA_927399515.1 +Noctua_fimbriata/ilNocFimb1.1,ilNocFimb1.1,GCA_905163415.1 +Meles_meles/mMelMel3.2_paternal_haplotype,mMelMel3.2_paternal_haplotype,GCA_922984935.2 diff --git a/assets/sanger-tol-insdcdownload_logo_light.png b/assets/sanger-tol-insdcdownload_logo_light.png new file mode 100755 index 0000000..aab5faf Binary files /dev/null and b/assets/sanger-tol-insdcdownload_logo_light.png differ diff --git a/assets/schema_input.json b/assets/schema_input.json index ed91197..dafbfaa 100644 --- a/assets/schema_input.json +++ b/assets/schema_input.json @@ -7,7 +7,7 @@ "items": { "type": "object", "properties": { - "species_dir": { + "outdir": { "type": "string", "pattern": "^\\S+$", "errorMessage": "Species directory must be provided and exist" @@ -23,6 +23,6 @@ "errorMessage": "Assembly accession number must be provided and be of the form GCA_*" } }, - "required": ["species_dir", "assembly_name", "assembly_accession"] + "required": ["outdir", "assembly_name", "assembly_accession"] } } diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt index 7e07ad0..6703a88 100644 --- a/assets/sendmail_template.txt +++ b/assets/sendmail_template.txt @@ -9,7 +9,7 @@ Content-Type: text/html; charset=utf-8 $email_html --nfcoremimeboundary -Content-Type: image/png;name="sanger-tol-insdcdownload_logo.png" +Content-Type: image/png;name="sanger-tol-insdcdownload_logo_light.png" Content-Transfer-Encoding: base64 Content-ID: Content-Disposition: inline; filename="sanger-tol-insdcdownload_logo_light.png" diff --git a/assets/slackreport.json b/assets/slackreport.json new file mode 100644 index 0000000..ea96df5 --- /dev/null +++ b/assets/slackreport.json @@ -0,0 +1,34 @@ +{ + "attachments": [ + { + "fallback": "Plain-text summary of the attachment.", + "color": "<% if (success) { %>good<% } else { %>danger<%} %>", + "author_name": "sanger-tol/insdcdownload v${version} - ${runName}", + "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico", + "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>", + "fields": [ + { + "title": "Command used to launch the workflow", + "value": "```${commandLine}```", + "short": false + } + <% + if (!success) { %> + , + { + "title": "Full error message", + "value": "```${errorReport}```", + "short": false + }, + { + "title": "Pipeline configuration", + "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>", + "short": false + } + <% } + %> + ], + "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})" + } + ] +} diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py index 92561e7..aa8bfc4 100755 --- a/bin/check_samplesheet.py +++ b/bin/check_samplesheet.py @@ -28,7 +28,7 @@ class RowChecker: def __init__( self, - dir_col="species_dir", + dir_col="outdir", name_col="assembly_name", accession_col="assembly_accession", **kwargs, @@ -38,7 +38,7 @@ def __init__( Args: dir_col (str): The name of the column that contains the species directory - (default "species_dir"). + (default "outdir"). name_col (str): The name of the column that contains the assembly name (default "assembly_name"). accession_col (str): The name of the column that contains the accession @@ -78,9 +78,7 @@ def _validate_accession(self, row): if not row[self._accession_col]: raise AssertionError("Accession number is required.") if not self._regex_accession.match(row[self._accession_col]): - raise AssertionError( - "Accession numbers must match %s." % self._regex_accession - ) + raise AssertionError("Accession numbers must match %s." % self._regex_accession) def _validate_name(self, row): """Assert that the assembly name is non-empty and has no space.""" @@ -125,9 +123,6 @@ def sniff_format(handle): peek = read_head(handle) handle.seek(0) sniffer = csv.Sniffer() - # if not sniffer.has_header(peek): - # logger.critical(f"The given sample sheet does not appear to contain a header.") - # sys.exit(1) dialect = sniffer.sniff(peek) return dialect @@ -147,12 +142,12 @@ def check_samplesheet(file_in, file_out): Example: This function checks that the samplesheet follows the following structure:: - species_dir,assembly_name,assembly_accession - darwin/data/fungi/Laetiporus_sulphureus,gfLaeSulp1.1,GCA_927399515.1 - darwin/data/mammals/Meles_meles,mMelMel3.2_paternal_haplotype,GCA_922984935.2 + outdir,assembly_name,assembly_accession + Laetiporus_sulphureus/gfLaeSulp1.1,gfLaeSulp1.1,GCA_927399515.1 + Meles_meles/mMelMel3.2_paternal_haplotype,mMelMel3.2_paternal_haplotype,GCA_922984935.2 """ required_columns = { - "species_dir", + "outdir", "assembly_name", "assembly_accession", } @@ -161,9 +156,8 @@ def check_samplesheet(file_in, file_out): reader = csv.DictReader(in_handle, dialect=sniff_format(in_handle)) # Validate the existence of the expected header columns. if not required_columns.issubset(reader.fieldnames): - logger.critical( - f"The sample sheet **must** contain the column headers: {', '.join(required_columns)}." - ) + req_cols = ", ".join(required_columns) + logger.critical(f"The sample sheet **must** contain these column headers: {req_cols}.") sys.exit(1) # Validate each row. checker = RowChecker() diff --git a/bin/repeats_bed.py b/bin/repeats_bed.py index 71043ea..e4bd8cd 100755 --- a/bin/repeats_bed.py +++ b/bin/repeats_bed.py @@ -2,15 +2,15 @@ # This script was originally conceived by @muffato import argparse +import gzip import sys __doc__ = "This script prints a BED file of the masked regions a fasta file." def fasta_to_bed(fasta): - in_gap = None - with open(sys.argv[1]) as fh: + with gzip.open(fasta, "rt") if fasta.endswith(".gz") else open(fasta) as fh: for line in fh: line = line[:-1] if line.startswith(">"): @@ -38,10 +38,9 @@ def fasta_to_bed(fasta): if __name__ == "__main__": - parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("fasta", help="Input Fasta file.") - parser.add_argument("--version", action="version", version="%(prog)s 1.0") + parser.add_argument("--version", action="version", version="%(prog)s 1.1") args = parser.parse_args() fasta_to_bed(args.fasta) diff --git a/conf/base.config b/conf/base.config index aa29932..316a11d 100644 --- a/conf/base.config +++ b/conf/base.config @@ -6,7 +6,7 @@ process { - errorStrategy = { task.exitStatus in [130,143,137,104,134,139] ? 'retry' : 'finish' } + errorStrategy = { task.exitStatus in ((130..145) + 104) ? 'retry' : 'finish' } maxRetries = 2 maxErrors = '-1' @@ -16,10 +16,16 @@ process { memory = { check_max( 50.MB * task.attempt, 'memory' ) } time = { check_max( 30.min * task.attempt, 'time' ) } - // samtools dict takes more memory on larger genomes + // tabix needs pointers to the sequences in memory + withName: '.*:.*:PREPARE_REPEATS:TABIX_TABIX_.*' { + // 50 MB per 25,000 sequences + memory = { check_max( 50.MB * task.attempt * Math.ceil(meta.n_sequences / 25000), 'memory' ) } + } + + // samtools dict loads entire sequences in memory withName: 'SAMTOOLS_DICT' { - // 50 MB per 500 Mbp - memory = { check_max( 50.MB + 50.MB * task.attempt * Math.ceil(fasta.size() / 500000000), 'memory' ) } + // 50 MB per 50 Mbp + memory = { check_max( 50.MB + 50.MB * task.attempt * Math.ceil(meta.max_length / 50000000), 'memory' ) } } withName:CUSTOM_DUMPSOFTWAREVERSIONS { diff --git a/conf/modules.config b/conf/modules.config index 091bce5..c0bcbe4 100644 --- a/conf/modules.config +++ b/conf/modules.config @@ -15,7 +15,7 @@ process { withName: 'NCBI_DOWNLOAD' { maxForks = 3 publishDir = [ - path: { "${meta.species_dir}/assembly/release/${meta.assembly_name}/insdc" }, + path: { "${meta.outdir}/assembly" }, mode: 'copy', saveAs: { filename -> filename.endsWith('assembly_report.txt') || filename.endsWith('assembly_stats.txt') || filename.endsWith("ACCESSION") ? filename : null } ] @@ -23,7 +23,7 @@ process { withName: '.*:.*:PREPARE_UNMASKED_FASTA:.*' { publishDir = [ - path: { "${meta.species_dir}/assembly/release/${meta.assembly_name}/insdc" }, + path: { "${meta.outdir}/assembly" }, mode: 'copy', saveAs: { filename -> filename.equals('versions.yml') ? null : filename } ] @@ -31,7 +31,7 @@ process { withName: '.*:.*:(PREPARE_REPEAT_MASKED_FASTA:.*|PREPARE_REPEATS:TABIX_.*)' { publishDir = [ - path: { "${meta.species_dir}/analysis/${meta.assembly_name}/repeats/ncbi" }, + path: { "${meta.outdir}/repeats/ncbi" }, mode: 'copy', saveAs: { filename -> filename.equals('versions.yml') ? null : filename } ] diff --git a/docs/output.md b/docs/output.md index d5ccd56..312f315 100644 --- a/docs/output.md +++ b/docs/output.md @@ -4,7 +4,9 @@ This document describes the output produced by the pipeline. -The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory. +The directories listed below will be created in a directory based on the `--outdir` command-line parameter and the `outdir` column of the samplesheet. +) after the pipeline has finished. +All paths are relative to the top-level results directory. The directories comply with Tree of Life's canonical directory structure. @@ -22,20 +24,17 @@ Here are the files you can expect in the `assembly/` sub-directory. ```text assembly -└── release - └── gfLaeSulp1.1 - └── insdc - ├── ACCESSION - ├── GCA_927399515.1.assembly_report.txt - ├── GCA_927399515.1.assembly_stats.txt - ├── GCA_927399515.1.fa.dict - ├── GCA_927399515.1.fa.gz - ├── GCA_927399515.1.fa.gz.fai - ├── GCA_927399515.1.fa.gz.gzi - └── GCA_927399515.1.fa.gz.sizes +├── ACCESSION +├── GCA_927399515.1.assembly_report.txt +├── GCA_927399515.1.assembly_stats.txt +├── GCA_927399515.1.fa.dict +├── GCA_927399515.1.fa.gz +├── GCA_927399515.1.fa.gz.fai +├── GCA_927399515.1.fa.gz.gzi +└── GCA_927399515.1.fa.gz.sizes ``` -The directory structure includes the assembly name, e.g. `gfLaeSulp1.1`, and all files are named after the assembly accession, e.g. `GCA_927399515.1`. +All files are named after the assembly accession, e.g. `GCA_927399515.1`. - `GCA_*.assembly_report.txt` and `GCA_*.assembly_stats.txt`: report and statistics files, straight from the NCBI FTP - `GCA_*.fa.gz`: Unmasked assembly in Fasta format, compressed with `bgzip` (whose index is `GCA_*.fa.gz.gzi`) @@ -47,35 +46,34 @@ with the exception of `ACCESSION`, which contains a single line of text: the ass ### Primary analysis files -Here are the files you can expect in the `analysis/` sub-directory. +Here are the files you can expect in the `repeats/` sub-directory. ```text -analysis -└── gfLaeSulp1.1 - └── repeats - └── ncbi - ├── GCA_927399515.1.masked.ncbi.bed.gz - ├── GCA_927399515.1.masked.ncbi.bed.gz.csi - ├── GCA_927399515.1.masked.ncbi.bed.gz.tbi - ├── GCA_927399515.1.masked.ncbi.fa.dict - ├── GCA_927399515.1.masked.ncbi.fa.gz - ├── GCA_927399515.1.masked.ncbi.fa.gz.fai - ├── GCA_927399515.1.masked.ncbi.fa.gz.gzi - └── GCA_927399515.1.masked.ncbi.fa.gz.sizes +repeats +└── ncbi + ├── GCA_927399515.1.masked.ncbi.bed.gz + ├── GCA_927399515.1.masked.ncbi.bed.gz.csi + ├── GCA_927399515.1.masked.ncbi.bed.gz.tbi + ├── GCA_927399515.1.masked.ncbi.fa.dict + ├── GCA_927399515.1.masked.ncbi.fa.gz + ├── GCA_927399515.1.masked.ncbi.fa.gz.fai + ├── GCA_927399515.1.masked.ncbi.fa.gz.gzi + └── GCA_927399515.1.masked.ncbi.fa.gz.sizes ``` They all correspond to the repeat-masking analysis run by the NCBI themselves. Like for the `assembly/` sub-directory, -the directory structure includes the assembly name, e.g. `gfLaeSulp1.1`, and all files are named after the assembly accession, e.g. `GCA_927399515.1`. +all files are named after the assembly accession, e.g. `GCA_927399515.1`. - `GCA_*.masked.ncbi.fa.gz`: Masked assembly in Fasta format, compressed with `bgzip` (whose index is `GCA_*.fa.gz.gzi`) - `GCA_*.masked.ncbi.fa.gz.fai`: `samtools faidx` index, which allows accessing any region of the assembly in constant time - `GCA_*.masked.ncbi.fa.dict`: `samtools dict` index, which allows identifying a sequence by its MD5 checksum -- `GCA_*.masked.ncbi.bed.gz`: BED file with the coordinates of the regions masked by the NCBI pipeline, with accompanying `tabix` indices (`.csi` and `.tbi`) +- `GCA_*.masked.ncbi.bed.gz`: BED file with the coordinates of the regions masked by the NCBI pipeline, with accompanying `tabix` indices (`.csi` and `.tbi`), depending on the sequence lengths ### Pipeline information -- `pipeline_info/` +- `pipeline_info/insdcdownload/` - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. + - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. [Nextflow](https://www.nextflow.io/docs/latest/tracing.html) provides excellent functionality for generating various reports relevant to the running and execution of the pipeline. This will allow you to troubleshoot errors with the running of the pipeline, and also provide you with other information such as launch commands, run times and resource usage. diff --git a/docs/usage.md b/docs/usage.md index 0217a00..f785a05 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,53 +1,93 @@ # sanger-tol/insdcdownload: Usage +## :warning: Please read this documentation on the nf-core website: [https://pipelines.tol.sanger.ac.uk/insdcdownload/usage](https://pipelines.tol.sanger.ac.uk/insdcdownload/usage) + +> _Documentation of pipeline parameters is generated automatically from the pipeline schema and can no longer be found in markdown files._ + ## Introduction The pipeline takes an assembly accession number, as well as the assembly name, and downloads it in a given directory. It also extracts the repeat-masking performed by the NCBI, and builds a set of common indices (such as `samtools faidx`). -## Running the pipeline +## One-off downloads -The typical command for running the pipeline is as follows: +The pipeline accepts command-one line arguments to specify a single genome to download: + +- `--assembly_name`: The name of the assembly, +- `--assembly_accession`: The accession number of the assembly, +- `--outdir`: Where the pipeline runtime information will be stored, and where data will be downloaded (except if absolute paths are given in the samplesheet). ```console -nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir +nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir gfLaeSulp1.1_data ``` -This will launch the pipeline and download the `gfLaeSulp1.1` assembly (accession `GCA_927399515.1`) into the `/` directory, +This will launch the pipeline and download the `gfLaeSulp1.1` assembly (accession `GCA_927399515.1`) into the `gfLaeSulp1.1_data/` directory, which will be created if needed. +## Bulk download + +The pipeline can download multiple assemblies at once, by providing them in a `.csv` file through the `--input` parameter. +It has to be a comma-separated file with three columns, and a header row as shown in the examples below. + +```console +outdir,assembly_name,assembly_accession +darwin/data/fungi/Laetiporus_sulphureus,gfLaeSulp1.1,GCA_927399515.1 +darwin/data/mammals/Meles_meles,mMelMel3.2_paternal_haplotype,GCA_922984935.2 +``` + +| Column | Description | +| -------------------- | -------------------------------------------------------------------------------- | +| `outdir` | Base download directory for this species. Evaluated from `--outdir` if relative. | +| `assembly_name` | Name of the assembly, as on the NCBI website, e.g. `gfLaeSulp1.1`. | +| `assembly_accession` | Accession number of the assembly to download. Typically of the form `GCA_*.*`. | + +A samplesheet may contain: + +- multiple assemblies of the same species +- multiple assemblies in the same output directory +- only one row per assembly + +All samplesheet columns correspond exactly to their corresponding command-line parameter, +except `outdir` which overrides or complements `--oudir`. + +An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline. + +```bash +nextflow run sanger-tol/insdcdownload -profile singularity --input /path/to/samplesheet.csv --outdir /path/to/results +``` + +## Nextflow outputs + Note that the pipeline will create the following files in your working directory: ```bash work # Directory containing the nextflow working files # Finished results in specified location (defined with --outdir) .nextflow_log # Log file from Nextflow +.nextflow # Directory where Nextflow keeps track of jobs # Other nextflow hidden files, eg. history of pipeline runs and old logs. ``` -## Bulk download +If you wish to repeatedly use the same parameters for multiple runs, rather than specifying each flag in the command, you can specify these in a params file. -The pipeline can download multiple assemblies at once, by providing them in a `.csv` file through the `--input` parameter. -It has to be a comma-separated file with 2 columns, and a header row as shown in the examples below. +Pipeline settings can be provided in a `yaml` or `json` file via `-params-file `. -```console -nextflow run sanger-tol/insdcdownload --input '[path to samplesheet file]' --outdir +> ⚠️ Do not use `-c ` to specify parameters as this will result in errors. Custom config files specified with `-c` must only be used for [tuning process resource specifications](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources), other infrastructural tweaks (such as output directories), or module arguments (args). +> The above pipeline run specified with a params file in yaml format: + +```bash +nextflow run sanger-tol/insdcdownload -profile docker -params-file params.yaml ``` -The values in the file must correspond to the values you would add to the `--assembly_accession` and `--assembly_name` parameters. +with `params.yaml` containing: -```console -assembly_accession,assembly_name -GCA_927399515.1,gfLaeSulp1.1 -GCA_922984935.2,mMelMel3.2_paternal_haplotype +```yaml +assembly: "gfLaeSulp1.1" +assembly_accession: "GCA_927399515.1" +outdir: "./results/" ``` -| Column | Description | -| -------------------- | ------------------------------------------------------------------------------ | -| `assembly_accession` | Accession number of the assembly to download. Typically of the form `GCA_*.*`. | -| `assembly_name` | Name of the assembly, as on the NCBI website, e.g. `gfLaeSulp1.1`. | - -An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline. +You can also generate such `YAML`/`JSON` files via [nf-core/launch](https://nf-co.re/launch). ### Updating the pipeline @@ -61,9 +101,13 @@ nextflow pull sanger-tol/insdcdownload It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. -First, go to the [sanger-tol/insdcdownload releases page](https://github.com/sanger-tol/insdcdownload/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. +First, go to the [sanger-tol/insdcdownload releases page](https://github.com/sanger-tol/insdcdownload/releases) and find the latest pipeline version - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. Of course, you can switch to another version by changing the number after the `-r` flag. -This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. +This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports. + +To further assist in reproducbility, you can use share and re-use [parameter files](#running-the-pipeline) to repeat pipeline runs with the same settings without having to write out a command with every single parameter. + +> 💡 If you wish to share such profile (such as upload as supplementary material for academic publications), make sure to NOT include cluster specific paths to files, nor institutional specific profiles. ## Core Nextflow arguments @@ -73,7 +117,7 @@ This version number will be logged in reports when you run the pipeline, so that Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. -Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/). +Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Apptainer, Conda) - see below. > We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. @@ -82,7 +126,7 @@ The pipeline also dynamically loads configurations from [https://github.com/nf-c Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important! They are loaded in sequence, so later profiles can overwrite earlier profiles. -If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended. +If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment. - `docker` - A generic configuration profile to be used with [Docker](https://docker.com/) @@ -94,8 +138,10 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) - `charliecloud` - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) +- `apptainer` + - A generic configuration profile to be used with [Apptainer](https://apptainer.org/) - `conda` - - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. + - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter, Charliecloud, or Apptainer. - `test` - A profile with a minimal configuration for automated testing - Corresponds to defining the assembly to download as command-line parameters so needs no other parameters @@ -119,96 +165,19 @@ Specify the path to a specific config file (this is a core Nextflow command). Se Whilst the default requirements set within the pipeline will hopefully work for most people and with most input data, you may find that you want to customise the compute resources that the pipeline requests. Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with any of the error codes specified [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L18) it will automatically be resubmitted with higher requests (2 x original, then 3 x original). If it still fails after the third attempt then the pipeline execution is stopped. -For example, if the nf-core/rnaseq pipeline is failing after multiple re-submissions of the `STAR_ALIGN` process due to an exit code of `137` this would indicate that there is an out of memory issue: - -```console -[62/149eb0] NOTE: Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1) -Error executing process > 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)' - -Caused by: - Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) - -Command executed: - STAR \ - --genomeDir star \ - --readFilesIn WT_REP1_trimmed.fq.gz \ - --runThreadN 2 \ - --outFileNamePrefix WT_REP1. \ - - -Command exit status: - 137 - -Command output: - (empty) - -Command error: - .command.sh: line 9: 30 Killed STAR --genomeDir star --readFilesIn WT_REP1_trimmed.fq.gz --runThreadN 2 --outFileNamePrefix WT_REP1. -Work dir: - /home/pipelinetest/work/9d/172ca5881234073e8d76f2a19c88fb +To change the resource requests, please see the [max resources](https://nf-co.re/docs/usage/configuration#max-resources) and [tuning workflow resources](https://nf-co.re/docs/usage/configuration#tuning-workflow-resources) section of the nf-core website. -Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run` -``` - -To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN). -We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/software/star/align/main.nf`. -If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9). -The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements. -The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB. -Providing you haven't set any other standard nf-core parameters to **cap** the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB. -The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections. - -```nextflow -process { - withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' { - memory = 100.GB - } -} -``` - -> **NB:** We specify the full process name i.e. `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN` in the config file because this takes priority over the short name (`STAR_ALIGN`) and allows existing configuration using the full process name to be correctly overridden. -> -> If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly. - -### Updating containers - -The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`. +### Custom Containers -1. Check the default version used by the pipeline in the module file for [Pangolin](https://github.com/nf-core/viralrecon/blob/a85d5969f9025409e3618d6c280ef15ce417df65/modules/nf-core/software/pangolin/main.nf#L14-L19) -2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) -3. Create the custom config accordingly: +In some cases you may wish to change which container or conda environment a step of the pipeline uses for a particular tool. By default nf-core pipelines use containers and software from the [biocontainers](https://biocontainers.pro/) or [bioconda](https://bioconda.github.io/) projects. However in some cases the pipeline specified version maybe out of date. - - For Docker: +To use a different container from the default container or conda environment specified in a pipeline, please see the [updating tool versions](https://nf-co.re/docs/usage/configuration#updating-tool-versions) section of the nf-core website. - ```nextflow - process { - withName: PANGOLIN { - container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` +### Custom Tool Arguments - - For Singularity: +A pipeline might not always support every possible argument or option of a particular tool used in pipeline. Fortunately, nf-core pipelines provide some freedom to users to insert additional parameters that the pipeline does not include by default. - ```nextflow - process { - withName: PANGOLIN { - container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - - For Conda: - - ```nextflow - process { - withName: PANGOLIN { - conda = 'bioconda::pangolin=3.0.5' - } - } - ``` - -> **NB:** If you wish to periodically update individual tool-specific results (e.g. Pangolin) generated by the pipeline then you must ensure to keep the `work/` directory otherwise the `-resume` ability of the pipeline will be compromised and it will restart from scratch. +To learn how to provide additional arguments to a particular tool of the pipeline, please see the [customising tool arguments](https://nf-co.re/docs/usage/configuration#customising-tool-arguments) section of the nf-core website. ### nf-core/configs @@ -218,6 +187,14 @@ See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs). +## Azure Resource Requests + +To be used with the `azurebatch` profile by specifying the `-profile azurebatch`. +We recommend providing a compute `params.vm_type` of `Standard_D16_v3` VMs by default but these options can be changed if required. + +Note that the choice of VM size depends on your quota and the overall workload during the analysis. +For a thorough list, please refer the [Azure Sizes for virtual machines in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). + ## Running in the background Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished. diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy index b3d092f..9b34804 100755 --- a/lib/NfcoreSchema.groovy +++ b/lib/NfcoreSchema.groovy @@ -2,6 +2,7 @@ // This file holds several functions used to perform JSON parameter validation, help and summary rendering for the nf-core pipeline template. // +import nextflow.Nextflow import org.everit.json.schema.Schema import org.everit.json.schema.loader.SchemaLoader import org.everit.json.schema.ValidationException @@ -46,7 +47,6 @@ class NfcoreSchema { 'quiet', 'syslog', 'v', - 'version', // Options for `nextflow run` command 'ansi', @@ -84,6 +84,7 @@ class NfcoreSchema { 'stub-run', 'test', 'w', + 'with-apptainer', 'with-charliecloud', 'with-conda', 'with-dag', @@ -178,7 +179,7 @@ class NfcoreSchema { } if (has_error) { - System.exit(1) + Nextflow.error('Exiting!') } } diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy index 78966e9..85785ef 100755 --- a/lib/NfcoreTemplate.groovy +++ b/lib/NfcoreTemplate.groovy @@ -32,6 +32,25 @@ class NfcoreTemplate { } } + // + // Generate version string + // + public static String version(workflow) { + String version_string = "" + + if (workflow.manifest.version) { + def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' + version_string += "${prefix_v}${workflow.manifest.version}" + } + + if (workflow.commitId) { + def git_shortsha = workflow.commitId.substring(0, 7) + version_string += "-g${git_shortsha}" + } + + return version_string + } + // // Construct and send completion email // @@ -61,7 +80,7 @@ class NfcoreTemplate { misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp def email_fields = [:] - email_fields['version'] = workflow.manifest.version + email_fields['version'] = NfcoreTemplate.version(workflow) email_fields['runName'] = workflow.runName email_fields['success'] = workflow.success email_fields['dateComplete'] = workflow.complete @@ -113,7 +132,7 @@ class NfcoreTemplate { } // Write summary e-mail HTML to a file - def output_d = new File("${params.outdir}/pipeline_info/") + def output_d = new File("${params.tracedir}/") if (!output_d.exists()) { output_d.mkdirs() } @@ -123,6 +142,64 @@ class NfcoreTemplate { output_tf.withWriter { w -> w << email_txt } } + // + // Construct and send a notification to a web server as JSON + // e.g. Microsoft Teams and Slack + // + public static void IM_notification(workflow, params, summary_params, projectDir, log) { + def hook_url = params.hook_url + + def summary = [:] + for (group in summary_params.keySet()) { + summary << summary_params[group] + } + + def misc_fields = [:] + misc_fields['start'] = workflow.start + misc_fields['complete'] = workflow.complete + misc_fields['scriptfile'] = workflow.scriptFile + misc_fields['scriptid'] = workflow.scriptId + if (workflow.repository) misc_fields['repository'] = workflow.repository + if (workflow.commitId) misc_fields['commitid'] = workflow.commitId + if (workflow.revision) misc_fields['revision'] = workflow.revision + misc_fields['nxf_version'] = workflow.nextflow.version + misc_fields['nxf_build'] = workflow.nextflow.build + misc_fields['nxf_timestamp'] = workflow.nextflow.timestamp + + def msg_fields = [:] + msg_fields['version'] = NfcoreTemplate.version(workflow) + msg_fields['runName'] = workflow.runName + msg_fields['success'] = workflow.success + msg_fields['dateComplete'] = workflow.complete + msg_fields['duration'] = workflow.duration + msg_fields['exitStatus'] = workflow.exitStatus + msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None') + msg_fields['errorReport'] = (workflow.errorReport ?: 'None') + msg_fields['commandLine'] = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "") + msg_fields['projectDir'] = workflow.projectDir + msg_fields['summary'] = summary << misc_fields + + // Render the JSON template + def engine = new groovy.text.GStringTemplateEngine() + // Different JSON depending on the service provider + // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format + def json_path = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json" + def hf = new File("$projectDir/assets/${json_path}") + def json_template = engine.createTemplate(hf).make(msg_fields) + def json_message = json_template.toString() + + // POST + def post = new URL(hook_url).openConnection(); + post.setRequestMethod("POST") + post.setDoOutput(true) + post.setRequestProperty("Content-Type", "application/json") + post.getOutputStream().write(json_message.getBytes("UTF-8")); + def postRC = post.getResponseCode(); + if (! postRC.equals(200)) { + log.warn(post.getErrorStream().getText()); + } + } + // // Print pipeline summary on completion // @@ -132,7 +209,7 @@ class NfcoreTemplate { if (workflow.stats.ignoredCount == 0) { log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-" } else { - log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed successfully, but with errored process(es) ${colors.reset}-" + log.info "-${colors.purple}[$workflow.manifest.name]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-" } } else { log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-" @@ -220,18 +297,19 @@ class NfcoreTemplate { // public static String logo(workflow, monochrome_logs) { Map colors = logColours(monochrome_logs) + String workflow_version = NfcoreTemplate.version(workflow) String.format( """\n ${dashedLine(monochrome_logs)} - ${colors.blue} _____ ${colors.green} _______ ${colors.red} _${colors.reset} - ${colors.blue} / ____| ${colors.green}|__ __| ${colors.red}| |${colors.reset} - ${colors.blue} | (___ __ _ _ __ __ _ ___ _ __ ${colors.reset}______${colors.green}| |${colors.yellow} ___ ${colors.red}| |${colors.reset} - ${colors.blue} \\___ \\ / _` | '_ \\ / _` |/ _ \\ '__|${colors.reset}______${colors.green}| |${colors.yellow}/ _ \\${colors.red}| |${colors.reset} - ${colors.blue} ____) | (_| | | | | (_| | __/ | ${colors.green}| |${colors.yellow} (_) ${colors.red}| |____${colors.reset} - ${colors.blue} |_____/ \\__,_|_| |_|\\__, |\\___|_| ${colors.green}|_|${colors.yellow}\\___/${colors.red}}|______|${colors.reset} + ${colors.blue} _____ ${colors.green} _______ ${colors.red} _${colors.reset} + ${colors.blue} / ____| ${colors.green}|__ __| ${colors.red}| |${colors.reset} + ${colors.blue} | (___ __ _ _ __ __ _ ___ _ __ ${colors.reset} ___ ${colors.green}| |${colors.yellow} ___ ${colors.red}| |${colors.reset} + ${colors.blue} \\___ \\ / _` | '_ \\ / _` |/ _ \\ '__|${colors.reset}|___|${colors.green}| |${colors.yellow}/ _ \\${colors.red}| |${colors.reset} + ${colors.blue} ____) | (_| | | | | (_| | __/ | ${colors.green}| |${colors.yellow} (_) ${colors.red}| |____${colors.reset} + ${colors.blue} |_____/ \\__,_|_| |_|\\__, |\\___|_| ${colors.green}|_|${colors.yellow}\\___/${colors.red}|______|${colors.reset} ${colors.blue} __/ |${colors.reset} ${colors.blue} |___/${colors.reset} - ${colors.purple} ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset} + ${colors.purple} ${workflow.manifest.name} ${workflow_version}${colors.reset} ${dashedLine(monochrome_logs)} """.stripIndent() ) diff --git a/lib/Utils.groovy b/lib/Utils.groovy old mode 100755 new mode 100644 index 28567bd..8d030f4 --- a/lib/Utils.groovy +++ b/lib/Utils.groovy @@ -21,19 +21,26 @@ class Utils { } // Check that all channels are present - def required_channels = ['conda-forge', 'bioconda', 'defaults'] - def conda_check_failed = !required_channels.every { ch -> ch in channels } + // This channel list is ordered by required channel priority. + def required_channels_in_order = ['conda-forge', 'bioconda', 'defaults'] + def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean // Check that they are in the right order - conda_check_failed |= !(channels.indexOf('conda-forge') < channels.indexOf('bioconda')) - conda_check_failed |= !(channels.indexOf('bioconda') < channels.indexOf('defaults')) + def channel_priority_violation = false + def n = required_channels_in_order.size() + for (int i = 0; i < n - 1; i++) { + channel_priority_violation |= !(channels.indexOf(required_channels_in_order[i]) < channels.indexOf(required_channels_in_order[i+1])) + } - if (conda_check_failed) { + if (channels_missing | channel_priority_violation) { log.warn "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" + " There is a problem with your Conda configuration!\n\n" + " You will need to set-up the conda-forge and bioconda channels correctly.\n" + - " Please refer to https://bioconda.github.io/user/install.html#set-up-channels\n" + - " NB: The order of the channels matters!\n" + + " Please refer to https://bioconda.github.io/\n" + + " The observed channel order is \n" + + " ${channels}\n" + + " but the following channel order is required:\n" + + " ${required_channels_in_order}\n" + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" } } diff --git a/lib/WorkflowInsdcdownload.groovy b/lib/WorkflowInsdcdownload.groovy index 81b679d..d536dde 100755 --- a/lib/WorkflowInsdcdownload.groovy +++ b/lib/WorkflowInsdcdownload.groovy @@ -2,6 +2,8 @@ // This file holds several functions specific to the workflow/insdcdownload.nf in the sanger-tol/insdcdownload pipeline // +import nextflow.Nextflow + class WorkflowInsdcdownload { // @@ -13,15 +15,16 @@ class WorkflowInsdcdownload { if (params.input) { def f = new File(params.input); if (!f.exists()) { - log.error "'${params.input}' doesn't exist" - System.exit(1) + Nextflow.error "'${params.input}' doesn't exist" } } else { - if (!params.assembly_accession || !params.assembly_name || !params.outdir) { - log.error "Either --input, or --assembly_accession, --assembly_name, and --outdir must be provided" - System.exit(1) + if (!params.assembly_accession || !params.assembly_name) { + Nextflow.error "Either --input, or --assembly_accession and --assembly_name must be provided" } } + if (!params.outdir) { + Nextflow.error "--outdir is mandatory" + } } } diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy index 71fac17..a572108 100755 --- a/lib/WorkflowMain.groovy +++ b/lib/WorkflowMain.groovy @@ -2,6 +2,8 @@ // This file holds several functions specific to the main.nf workflow in the sanger-tol/insdcdownload pipeline // +import nextflow.Nextflow + class WorkflowMain { // @@ -10,7 +12,7 @@ class WorkflowMain { public static String citation(workflow) { return "If you use ${workflow.manifest.name} for your analysis please cite:\n\n" + "* The pipeline\n" + - " https://doi.org/10.5281/zenodo.7155119\n\n" + + " https://doi.org/10.5281/zenodo.6983932\n\n" + "* The nf-core framework\n" + " https://doi.org/10.1038/s41587-020-0439-x\n\n" + "* Software dependencies\n" + @@ -18,9 +20,9 @@ class WorkflowMain { } // - // Print help to screen if required + // Generate help string // - public static String help(workflow, params, log) { + public static String help(workflow, params) { def command = "nextflow run ${workflow.manifest.name} --input samplesheet.csv --fasta reference.fa -profile docker" def help_string = '' help_string += NfcoreTemplate.logo(workflow, params.monochrome_logs) @@ -31,9 +33,9 @@ class WorkflowMain { } // - // Print parameter summary log to screen + // Generate parameter summary log string // - public static String paramsSummaryLog(workflow, params, log) { + public static String paramsSummaryLog(workflow, params) { def summary_log = '' summary_log += NfcoreTemplate.logo(workflow, params.monochrome_logs) summary_log += NfcoreSchema.paramsSummaryLog(workflow, params) @@ -48,24 +50,30 @@ class WorkflowMain { public static void initialise(workflow, params, log) { // Print help to screen if required if (params.help) { - log.info help(workflow, params, log) + log.info help(workflow, params) System.exit(0) } - // Validate workflow parameters via the JSON schema - if (params.validate_params) { - NfcoreSchema.validateParameters(workflow, params, log) + // Print workflow version and exit on --version + if (params.version) { + String workflow_version = NfcoreTemplate.version(workflow) + log.info "${workflow.manifest.name} ${workflow_version}" + System.exit(0) } // Print parameter summary log to screen + log.info paramsSummaryLog(workflow, params) - log.info paramsSummaryLog(workflow, params, log) + // Validate workflow parameters via the JSON schema + if (params.validate_params) { + NfcoreSchema.validateParameters(workflow, params, log) + } // Check that a -profile or Nextflow config has been provided to run the pipeline NfcoreTemplate.checkConfigProvided(workflow, log) // Check that conda channels are set-up correctly - if (params.enable_conda) { + if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) { Utils.checkCondaChannels(log) } diff --git a/modules.json b/modules.json index 4b6e41a..b3f059d 100644 --- a/modules.json +++ b/modules.json @@ -2,32 +2,39 @@ "name": "sanger-tol/insdcdownload", "homePage": "https://github.com/sanger-tol/insdcdownload", "repos": { - "nf-core/modules": { - "git_url": "https://github.com/nf-core/modules.git", + "https://github.com/nf-core/modules.git": { "modules": { - "custom/dumpsoftwareversions": { - "branch": "master", - "git_sha": "82501fe6d0d12614db67751d30af98d16e63dc59" - }, - "custom/getchromsizes": { - "branch": "master", - "git_sha": "d75b37fef175f241230ee25c485bd574c768e282" - }, - "samtools/dict": { - "branch": "master", - "git_sha": "31409f5e727ec932f0b3a399c7a3847d70b21374" - }, - "samtools/faidx": { - "branch": "master", - "git_sha": "3eb99152cedbb7280258858e5df08478a4670696" - }, - "tabix/bgzip": { - "branch": "master", - "git_sha": "31c0b49f6527ef196e89eca49a36af2de71711f8" - }, - "tabix/tabix": { - "branch": "master", - "git_sha": "5e7b1ef9a5a2d9258635bcbf70fcf37dacd1b247" + "nf-core": { + "custom/dumpsoftwareversions": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "custom/getchromsizes": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "samtools/dict": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "samtools/faidx": { + "branch": "master", + "git_sha": "fd742419940e01ba1c5ecb172c3e32ec840662fe", + "installed_by": ["modules"] + }, + "tabix/bgzip": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + }, + "tabix/tabix": { + "branch": "master", + "git_sha": "911696ea0b62df80e900ef244d7867d177971f73", + "installed_by": ["modules"] + } } } } diff --git a/modules/local/ncbi_download.nf b/modules/local/ncbi_download.nf index 829e3bc..ae63e52 100644 --- a/modules/local/ncbi_download.nf +++ b/modules/local/ncbi_download.nf @@ -6,13 +6,13 @@ process NCBI_DOWNLOAD { tag "$assembly_accession" label 'process_single' - conda (params.enable_conda ? "bioconda::wget=1.18" : null) + conda "bioconda::gnu-wget=1.18" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/gnu-wget:1.18--h7132678_6' : - 'quay.io/biocontainers/gnu-wget:1.18--h7132678_6' }" + 'biocontainers/gnu-wget:1.18--h7132678_6' }" input: - tuple val(assembly_accession), val(assembly_name), val(species_dir) + tuple val(assembly_accession), val(assembly_name), val(outdir) output: tuple val(meta), path(filename_fasta) , emit: fasta @@ -36,7 +36,7 @@ process NCBI_DOWNLOAD { meta = [ id : assembly_accession, assembly_name : assembly_name, - species_dir : species_dir, + outdir : outdir, ] def prefix = task.ext.prefix ?: "${meta.id}" filename_assembly_report = "${prefix}.assembly_report.txt" @@ -45,14 +45,13 @@ process NCBI_DOWNLOAD { filename_accession = "ACCESSION" """ - #export https_proxy=http://wwwcache.sanger.ac.uk:3128 - #export http_proxy=http://wwwcache.sanger.ac.uk:3128 wget ${ftp_path}/${remote_filename_stem}_assembly_report.txt wget ${ftp_path}/${remote_filename_stem}_assembly_stats.txt wget ${ftp_path}/${remote_filename_stem}_genomic.fna.gz wget ${ftp_path}/md5checksums.txt - grep "\\(_assembly_report\\.txt\$\\|_assembly_stats\\.txt\$\\|_genomic\\.fna\\.gz\$\\)" md5checksums.txt > md5checksums_restricted.txt + grep "\\(_assembly_report\\.txt\$\\|_assembly_stats\\.txt\$\\|_genomic\\.fna\\.gz\$\\)" md5checksums.txt \ + | grep -v "\\(_from_genomic\\.fna\\.gz\$\\)"> md5checksums_restricted.txt md5sum -c md5checksums_restricted.txt mv ${remote_filename_stem}_assembly_report.txt ${filename_assembly_report} mv ${remote_filename_stem}_assembly_stats.txt ${filename_assembly_stats} diff --git a/modules/local/remove_masking.nf b/modules/local/remove_masking.nf index b5d2cf3..7a865f7 100644 --- a/modules/local/remove_masking.nf +++ b/modules/local/remove_masking.nf @@ -3,10 +3,10 @@ process REMOVE_MASKING { tag "$genome" label 'process_single' - conda (params.enable_conda ? "conda-forge::gawk=5.1.0" : null) + conda "conda-forge::gawk=5.1.0" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/gawk:5.1.0' : - 'quay.io/biocontainers/gawk:5.1.0' }" + 'biocontainers/gawk:5.1.0' }" input: tuple val(meta), path(genome) diff --git a/modules/sanger-tol/nf-core-pipeline/repeats_bed.nf b/modules/local/repeats_bed.nf similarity index 87% rename from modules/sanger-tol/nf-core-pipeline/repeats_bed.nf rename to modules/local/repeats_bed.nf index 8f66714..4a7c166 100644 --- a/modules/sanger-tol/nf-core-pipeline/repeats_bed.nf +++ b/modules/local/repeats_bed.nf @@ -4,10 +4,10 @@ process REPEATS_BED { tag "$genome" label 'process_single' - conda (params.enable_conda ? "conda-forge::python=3.9.1" : null) + conda "conda-forge::python=3.9.1" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/python:3.9--1' : - 'quay.io/biocontainers/python:3.9--1' }" + 'biocontainers/python:3.9--1' }" input: tuple val(meta), path(genome) diff --git a/modules/local/samplesheet_check.nf b/modules/local/samplesheet_check.nf index 6fc7f27..0308af3 100644 --- a/modules/local/samplesheet_check.nf +++ b/modules/local/samplesheet_check.nf @@ -3,10 +3,10 @@ process SAMPLESHEET_CHECK { tag "$samplesheet" label 'process_single' - conda (params.enable_conda ? "conda-forge::python=3.8.3" : null) + conda "conda-forge::python=3.8.3" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/python:3.8.3' : - 'quay.io/biocontainers/python:3.8.3' }" + 'biocontainers/python:3.8.3' }" input: path samplesheet diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf similarity index 82% rename from modules/nf-core/modules/custom/dumpsoftwareversions/main.nf rename to modules/nf-core/custom/dumpsoftwareversions/main.nf index cebb6e0..ebc8727 100644 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf +++ b/modules/nf-core/custom/dumpsoftwareversions/main.nf @@ -2,10 +2,10 @@ process CUSTOM_DUMPSOFTWAREVERSIONS { label 'process_single' // Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container - conda (params.enable_conda ? 'bioconda::multiqc=1.13' : null) + conda "bioconda::multiqc=1.14" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' : - 'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }" + 'https://depot.galaxyproject.org/singularity/multiqc:1.14--pyhdfd78af_0' : + 'biocontainers/multiqc:1.14--pyhdfd78af_0' }" input: path versions diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/custom/dumpsoftwareversions/meta.yml similarity index 88% rename from modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml rename to modules/nf-core/custom/dumpsoftwareversions/meta.yml index 60b546a..c32657d 100644 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml +++ b/modules/nf-core/custom/dumpsoftwareversions/meta.yml @@ -1,7 +1,9 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/yaml-schema.json name: custom_dumpsoftwareversions description: Custom module used to dump software versions within the nf-core pipeline template keywords: - custom + - dump - version tools: - custom: diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py new file mode 100755 index 0000000..da03340 --- /dev/null +++ b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python + + +"""Provide functions to merge multiple versions.yml files.""" + + +import yaml +import platform +from textwrap import dedent + + +def _make_versions_html(versions): + """Generate a tabular HTML output of all versions for MultiQC.""" + html = [ + dedent( + """\\ + + + + + + + + + + """ + ) + ] + for process, tmp_versions in sorted(versions.items()): + html.append("") + for i, (tool, version) in enumerate(sorted(tmp_versions.items())): + html.append( + dedent( + f"""\\ + + + + + + """ + ) + ) + html.append("") + html.append("
Process Name Software Version
{process if (i == 0) else ''}{tool}{version}
") + return "\\n".join(html) + + +def main(): + """Load all version files and generate merged output.""" + versions_this_module = {} + versions_this_module["${task.process}"] = { + "python": platform.python_version(), + "yaml": yaml.__version__, + } + + with open("$versions") as f: + versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module + + # aggregate versions by the module name (derived from fully-qualified process name) + versions_by_module = {} + for process, process_versions in versions_by_process.items(): + module = process.split(":")[-1] + try: + if versions_by_module[module] != process_versions: + raise AssertionError( + "We assume that software versions are the same between all modules. " + "If you see this error-message it means you discovered an edge-case " + "and should open an issue in nf-core/tools. " + ) + except KeyError: + versions_by_module[module] = process_versions + + versions_by_module["Workflow"] = { + "Nextflow": "$workflow.nextflow.version", + "$workflow.manifest.name": "$workflow.manifest.version", + } + + versions_mqc = { + "id": "software_versions", + "section_name": "${workflow.manifest.name} Software Versions", + "section_href": "https://github.com/${workflow.manifest.name}", + "plot_type": "html", + "description": "are collected at run time from the software output.", + "data": _make_versions_html(versions_by_module), + } + + with open("software_versions.yml", "w") as f: + yaml.dump(versions_by_module, f, default_flow_style=False) + with open("software_versions_mqc.yml", "w") as f: + yaml.dump(versions_mqc, f, default_flow_style=False) + + with open("versions.yml", "w") as f: + yaml.dump(versions_this_module, f, default_flow_style=False) + + +if __name__ == "__main__": + main() diff --git a/modules/nf-core/modules/custom/getchromsizes/main.nf b/modules/nf-core/custom/getchromsizes/main.nf similarity index 88% rename from modules/nf-core/modules/custom/getchromsizes/main.nf rename to modules/nf-core/custom/getchromsizes/main.nf index 8e1693d..060a2e8 100644 --- a/modules/nf-core/modules/custom/getchromsizes/main.nf +++ b/modules/nf-core/custom/getchromsizes/main.nf @@ -2,10 +2,10 @@ process CUSTOM_GETCHROMSIZES { tag "$fasta" label 'process_single' - conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null) + conda "bioconda::samtools=1.16.1" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' : - 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }" + 'https://depot.galaxyproject.org/singularity/samtools:1.16.1--h6899075_1' : + 'biocontainers/samtools:1.16.1--h6899075_1' }" input: tuple val(meta), path(fasta) diff --git a/modules/nf-core/modules/custom/getchromsizes/meta.yml b/modules/nf-core/custom/getchromsizes/meta.yml similarity index 100% rename from modules/nf-core/modules/custom/getchromsizes/meta.yml rename to modules/nf-core/custom/getchromsizes/meta.yml diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py deleted file mode 100644 index 7c2abfa..0000000 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python - -import yaml -import platform -from textwrap import dedent - - -def _make_versions_html(versions): - html = [ - dedent( - """\\ - - - - - - - - - - """ - ) - ] - for process, tmp_versions in sorted(versions.items()): - html.append("") - for i, (tool, version) in enumerate(sorted(tmp_versions.items())): - html.append( - dedent( - f"""\\ - - - - - - """ - ) - ) - html.append("") - html.append("
Process Name Software Version
{process if (i == 0) else ''}{tool}{version}
") - return "\\n".join(html) - - -versions_this_module = {} -versions_this_module["${task.process}"] = { - "python": platform.python_version(), - "yaml": yaml.__version__, -} - -with open("$versions") as f: - versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module - -# aggregate versions by the module name (derived from fully-qualified process name) -versions_by_module = {} -for process, process_versions in versions_by_process.items(): - module = process.split(":")[-1] - try: - if versions_by_module[module] != process_versions: - raise AssertionError( - "We assume that software versions are the same between all modules. " - "If you see this error-message it means you discovered an edge-case " - "and should open an issue in nf-core/tools. " - ) - except KeyError: - versions_by_module[module] = process_versions - -versions_by_module["Workflow"] = { - "Nextflow": "$workflow.nextflow.version", - "$workflow.manifest.name": "$workflow.manifest.version", -} - -versions_mqc = { - "id": "software_versions", - "section_name": "${workflow.manifest.name} Software Versions", - "section_href": "https://github.com/${workflow.manifest.name}", - "plot_type": "html", - "description": "are collected at run time from the software output.", - "data": _make_versions_html(versions_by_module), -} - -with open("software_versions.yml", "w") as f: - yaml.dump(versions_by_module, f, default_flow_style=False) -with open("software_versions_mqc.yml", "w") as f: - yaml.dump(versions_mqc, f, default_flow_style=False) - -with open("versions.yml", "w") as f: - yaml.dump(versions_this_module, f, default_flow_style=False) diff --git a/modules/nf-core/modules/samtools/dict/main.nf b/modules/nf-core/samtools/dict/main.nf similarity index 86% rename from modules/nf-core/modules/samtools/dict/main.nf rename to modules/nf-core/samtools/dict/main.nf index 91f782b..f5b469b 100644 --- a/modules/nf-core/modules/samtools/dict/main.nf +++ b/modules/nf-core/samtools/dict/main.nf @@ -2,10 +2,10 @@ process SAMTOOLS_DICT { tag "$fasta" label 'process_single' - conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null) + conda "bioconda::samtools=1.17" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' : - 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }" + 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : + 'biocontainers/samtools:1.17--h00cdaf9_0' }" input: tuple val(meta), path(fasta) diff --git a/modules/nf-core/modules/samtools/dict/meta.yml b/modules/nf-core/samtools/dict/meta.yml similarity index 100% rename from modules/nf-core/modules/samtools/dict/meta.yml rename to modules/nf-core/samtools/dict/meta.yml diff --git a/modules/nf-core/modules/samtools/faidx/main.nf b/modules/nf-core/samtools/faidx/main.nf similarity index 59% rename from modules/nf-core/modules/samtools/faidx/main.nf rename to modules/nf-core/samtools/faidx/main.nf index ef940db..59ed308 100644 --- a/modules/nf-core/modules/samtools/faidx/main.nf +++ b/modules/nf-core/samtools/faidx/main.nf @@ -2,18 +2,20 @@ process SAMTOOLS_FAIDX { tag "$fasta" label 'process_single' - conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null) + conda "bioconda::samtools=1.17" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' : - 'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }" + 'https://depot.galaxyproject.org/singularity/samtools:1.17--h00cdaf9_0' : + 'biocontainers/samtools:1.17--h00cdaf9_0' }" input: tuple val(meta), path(fasta) + tuple val(meta2), path(fai) output: - tuple val(meta), path ("*.fai"), emit: fai - tuple val(meta), path ("*.gzi"), emit: gzi, optional: true - path "versions.yml" , emit: versions + tuple val(meta), path ("*.{fa,fasta}") , emit: fa , optional: true + tuple val(meta), path ("*.fai") , emit: fai, optional: true + tuple val(meta), path ("*.gzi") , emit: gzi, optional: true + path "versions.yml" , emit: versions when: task.ext.when == null || task.ext.when @@ -23,8 +25,8 @@ process SAMTOOLS_FAIDX { """ samtools \\ faidx \\ - $args \\ - $fasta + $fasta \\ + $args cat <<-END_VERSIONS > versions.yml "${task.process}": @@ -33,8 +35,12 @@ process SAMTOOLS_FAIDX { """ stub: + def match = (task.ext.args =~ /-o(?:utput)?\s(.*)\s?/).findAll() + def fastacmd = match[0] ? "touch ${match[0][1]}" : '' """ + ${fastacmd} touch ${fasta}.fai + cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/nf-core/modules/samtools/faidx/meta.yml b/modules/nf-core/samtools/faidx/meta.yml similarity index 79% rename from modules/nf-core/modules/samtools/faidx/meta.yml rename to modules/nf-core/samtools/faidx/meta.yml index fe2fe9a..957b25e 100644 --- a/modules/nf-core/modules/samtools/faidx/meta.yml +++ b/modules/nf-core/samtools/faidx/meta.yml @@ -3,6 +3,7 @@ description: Index FASTA file keywords: - index - fasta + - faidx tools: - samtools: description: | @@ -17,12 +18,21 @@ input: - meta: type: map description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] + Groovy Map containing reference information + e.g. [ id:'test' ] - fasta: type: file description: FASTA file pattern: "*.{fa,fasta}" + - meta2: + type: map + description: | + Groovy Map containing reference information + e.g. [ id:'test' ] + - fai: + type: file + description: FASTA index file + pattern: "*.{fai}" output: - meta: type: map diff --git a/modules/nf-core/modules/tabix/bgzip/main.nf b/modules/nf-core/tabix/bgzip/main.nf similarity index 65% rename from modules/nf-core/modules/tabix/bgzip/main.nf rename to modules/nf-core/tabix/bgzip/main.nf index aaef785..8c47d9e 100644 --- a/modules/nf-core/modules/tabix/bgzip/main.nf +++ b/modules/nf-core/tabix/bgzip/main.nf @@ -2,10 +2,10 @@ process TABIX_BGZIP { tag "$meta.id" label 'process_single' - conda (params.enable_conda ? 'bioconda::tabix=1.11' : null) + conda "bioconda::tabix=1.11" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/tabix:1.11--hdfd78af_0' : - 'quay.io/biocontainers/tabix:1.11--hdfd78af_0' }" + 'biocontainers/tabix:1.11--hdfd78af_0' }" input: tuple val(meta), path(input) @@ -22,15 +22,29 @@ process TABIX_BGZIP { def args = task.ext.args ?: '' prefix = task.ext.prefix ?: "${meta.id}" in_bgzip = ["gz", "bgz", "bgzf"].contains(input.getExtension()) - output = in_bgzip ? input.getBaseName() : "${prefix}.${input.getExtension()}.gz" - command1 = in_bgzip ? '-d' : '-c' - command2 = in_bgzip ? '' : " > ${output}" + extension = in_bgzip ? input.getBaseName().tokenize(".")[-1] : input.getExtension() + output = in_bgzip ? "${prefix}.${extension}" : "${prefix}.${extension}.gz" + command = in_bgzip ? '-d' : '' // Name the index according to $prefix, unless a name has been requested if ((args.matches("(^| )-i\\b") || args.matches("(^| )--index(\$| )")) && !args.matches("(^| )-I\\b") && !args.matches("(^| )--index-name\\b")) { args = args + " -I ${output}.gzi" } """ - bgzip $command1 $args -@${task.cpus} $input $command2 + bgzip $command -c $args -@${task.cpus} $input > ${output} + + cat <<-END_VERSIONS > versions.yml + "${task.process}": + tabix: \$(echo \$(tabix -h 2>&1) | sed 's/^.*Version: //; s/ .*\$//') + END_VERSIONS + """ + + stub: + prefix = task.ext.prefix ?: "${meta.id}" + in_bgzip = ["gz", "bgz", "bgzf"].contains(input.getExtension()) + output = in_bgzip ? input.getBaseName() : "${prefix}.${input.getExtension()}.gz" + + """ + touch ${output} cat <<-END_VERSIONS > versions.yml "${task.process}": diff --git a/modules/nf-core/modules/tabix/bgzip/meta.yml b/modules/nf-core/tabix/bgzip/meta.yml similarity index 98% rename from modules/nf-core/modules/tabix/bgzip/meta.yml rename to modules/nf-core/tabix/bgzip/meta.yml index 72f0abc..c3ea210 100644 --- a/modules/nf-core/modules/tabix/bgzip/meta.yml +++ b/modules/nf-core/tabix/bgzip/meta.yml @@ -44,3 +44,4 @@ authors: - "@joseespinosa" - "@drpatelh" - "@maxulysse" + - "@nvnieuwk" diff --git a/modules/nf-core/modules/tabix/tabix/main.nf b/modules/nf-core/tabix/tabix/main.nf similarity index 89% rename from modules/nf-core/modules/tabix/tabix/main.nf rename to modules/nf-core/tabix/tabix/main.nf index 21b2e79..5bf332e 100644 --- a/modules/nf-core/modules/tabix/tabix/main.nf +++ b/modules/nf-core/tabix/tabix/main.nf @@ -2,10 +2,10 @@ process TABIX_TABIX { tag "$meta.id" label 'process_single' - conda (params.enable_conda ? 'bioconda::tabix=1.11' : null) + conda "bioconda::tabix=1.11" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? 'https://depot.galaxyproject.org/singularity/tabix:1.11--hdfd78af_0' : - 'quay.io/biocontainers/tabix:1.11--hdfd78af_0' }" + 'biocontainers/tabix:1.11--hdfd78af_0' }" input: tuple val(meta), path(tab) diff --git a/modules/nf-core/modules/tabix/tabix/meta.yml b/modules/nf-core/tabix/tabix/meta.yml similarity index 100% rename from modules/nf-core/modules/tabix/tabix/meta.yml rename to modules/nf-core/tabix/tabix/meta.yml diff --git a/nextflow.config b/nextflow.config index 8263dc8..c281633 100644 --- a/nextflow.config +++ b/nextflow.config @@ -16,18 +16,19 @@ params { ftp_root = "https://ftp.ncbi.nlm.nih.gov/genomes/all/GCA" // Boilerplate options - outdir = null - tracedir = "${params.outdir}/pipeline_info" + outdir = 'results' + tracedir = "${params.outdir}/pipeline_info/insdcdownload" publish_dir_mode = 'copy' email = null email_on_fail = null plaintext_email = false monochrome_logs = false + hook_url = null help = false + version = false validate_params = true show_hidden_params = false schema_ignore_params = 'genomes' - enable_conda = false // Config options @@ -66,62 +67,92 @@ try { // } - profiles { - debug { process.beforeScript = 'echo $HOSTNAME' } + cleanup { cleanup = true } + debug { + dumpHashes = true + process.beforeScript = 'echo $HOSTNAME' + cleanup = false + } conda { - params.enable_conda = true + conda.enabled = true docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } mamba { - params.enable_conda = true + conda.enabled = true conda.useMamba = true docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } docker { docker.enabled = true + docker.registry = 'quay.io' docker.userEmulation = true + conda.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false + } + arm { + docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64' } singularity { singularity.enabled = true singularity.autoMounts = true + conda.enabled = false docker.enabled = false podman.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } podman { podman.enabled = true + podman.registry = 'quay.io' + conda.enabled = false docker.enabled = false singularity.enabled = false shifter.enabled = false charliecloud.enabled = false + apptainer.enabled = false } shifter { shifter.enabled = true + conda.enabled = false docker.enabled = false singularity.enabled = false podman.enabled = false charliecloud.enabled = false + apptainer.enabled = false } charliecloud { charliecloud.enabled = true + conda.enabled = false docker.enabled = false singularity.enabled = false podman.enabled = false shifter.enabled = false + apptainer.enabled = false + } + apptainer { + apptainer.enabled = true + conda.enabled = false + docker.enabled = false + singularity.enabled = false + podman.enabled = false + shifter.enabled = false + charliecloud.enabled = false } gitpod { executor.name = 'local' @@ -168,12 +199,13 @@ dag { manifest { name = 'sanger-tol/insdcdownload' - author = '@muffato' + author = """@muffato""" homePage = 'https://github.com/sanger-tol/insdcdownload' - description = 'Pipeline that downloads assemblies from INSDC into a Tree of Life directory structure' + description = """Pipeline that downloads assemblies from INSDC into a Tree of Life directory structure""" mainScript = 'main.nf' - nextflowVersion = '!>=22.04.0' - version = '1.1.0' + nextflowVersion = '!>=22.10.1' + version = '2.0.0' + doi = '10.5281/zenodo.6983932' } // Load modules.config for DSL2 module specific options diff --git a/nextflow_schema.json b/nextflow_schema.json index 51de569..bc19c9b 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -27,7 +27,8 @@ "outdir": { "type": "string", "format": "directory-path", - "description": "The output directory where the results will be saved. Not considered when running the pipeline with a .csv file as input.", + "description": "The output directory where the results will be saved. Not considered for sample-sheet entries that have an absolute path.", + "default": "results", "fa_icon": "fas fa-folder-open" }, "input": { @@ -37,7 +38,7 @@ "pattern": "^\\S+\\.csv$", "schema": "assets/schema_input.json", "description": "Path to comma-separated file containing information about the assemblies to download. Used for bulk download of many assemblies.", - "help_text": "The file has to be a comma-separated file with three columns, and a header row. The columns names must be `species_dir`, `assembly_accession`, and `assembly_name`.", + "help_text": "The file has to be a comma-separated file with three columns, and a header row. The columns names must be `outdir`, `assembly_accession`, and `assembly_name`.", "fa_icon": "fas fa-file-csv" }, "ftp_root": { @@ -153,6 +154,12 @@ "fa_icon": "fas fa-question-circle", "hidden": true }, + "version": { + "type": "boolean", + "description": "Display version and exit.", + "fa_icon": "fas fa-question-circle", + "hidden": true + }, "publish_dir_mode": { "type": "string", "default": "copy", @@ -182,10 +189,17 @@ "fa_icon": "fas fa-palette", "hidden": true }, + "hook_url": { + "type": "string", + "description": "Incoming hook URL for messaging service", + "fa_icon": "fas fa-people-group", + "help_text": "Incoming hook URL for messaging service. Currently, MS Teams and Slack are supported.", + "hidden": true + }, "tracedir": { "type": "string", "description": "Directory to keep pipeline Nextflow logs and reports.", - "default": "${params.outdir}/pipeline_info", + "default": "${params.outdir}/pipeline_info/insdcdownload", "fa_icon": "fas fa-cogs", "hidden": true }, @@ -202,12 +216,6 @@ "description": "Show all params when using `--help`", "hidden": true, "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters." - }, - "enable_conda": { - "type": "boolean", - "description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.", - "hidden": true, - "fa_icon": "fas fa-bacon" } } } diff --git a/pipeline_template.yml b/pipeline_template.yml new file mode 100644 index 0000000..0aa7398 --- /dev/null +++ b/pipeline_template.yml @@ -0,0 +1,3 @@ +prefix: sanger-tol +skip: + - igenomes diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..0d62beb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,10 @@ +# Config file for Python. Mostly used to configure linting of bin/check_samplesheet.py with Black. +# Should be kept the same as nf-core/tools to avoid fighting with template synchronisation. +[tool.black] +line-length = 120 +target_version = ["py37", "py38", "py39", "py310"] + +[tool.isort] +profile = "black" +known_first_party = ["nf_core"] +multi_line_output = 3 diff --git a/subworkflows/local/download_genome.nf b/subworkflows/local/download_genome.nf index 0a57503..dd4da55 100644 --- a/subworkflows/local/download_genome.nf +++ b/subworkflows/local/download_genome.nf @@ -9,7 +9,7 @@ include { REMOVE_MASKING } from '../../modules/local/remove_masking' workflow DOWNLOAD_GENOME { take: - assembly_params // tuple(assembly_accession, assembly_name, species_dir) + assembly_params // tuple(assembly_accession, assembly_name, outdir) main: diff --git a/subworkflows/local/params_check.nf b/subworkflows/local/params_check.nf index e1fcf63..e9e4458 100644 --- a/subworkflows/local/params_check.nf +++ b/subworkflows/local/params_check.nf @@ -7,13 +7,13 @@ include { SAMPLESHEET_CHECK } from '../../modules/local/samplesheet_check' workflow PARAMS_CHECK { take: - inputs // tuple, see below + samplesheet // file + cli_params // tuple, see below + outdir // file output directory main: - def (samplesheet, assembly_accession, assembly_name, outdir) = inputs - ch_versions = Channel.empty() ch_inputs = Channel.empty() @@ -21,33 +21,27 @@ workflow PARAMS_CHECK { SAMPLESHEET_CHECK ( file(samplesheet, checkIfExists: true) ) .csv - // Provides species_dir, assembly_accession, and assembly_name + // Provides outdir, assembly_accession, and assembly_name .splitCsv ( header:true, sep:',' ) // Convert to tuple, as required by the download subworkflow .map { [ it["assembly_accession"], it["assembly_name"], - it["species_dir"], + (it["outdir"].startsWith("/") ? "" : outdir + "/") + it["outdir"], ] } .set { ch_inputs } - ch_versions = ch_versions.mix(SAMPLESHEET_CHECK.out.versions.first()) + ch_versions = ch_versions.mix(SAMPLESHEET_CHECK.out.versions) } else { - - ch_inputs = Channel.of( - [ - assembly_accession, - assembly_name, - outdir, - ] - ) - + // Add the other input channel in, as it's expected to have all the parameters in the right order + // except the output directory which must be appended + ch_inputs = ch_inputs.mix(cli_params.map { it + [outdir] } ) } emit: - assembly_params = ch_inputs // channel: tuple(assembly_accession, assembly_name, species_dir) + assembly_params = ch_inputs // channel: tuple(assembly_accession, assembly_name, outdir) versions = ch_versions // channel: versions.yml } diff --git a/subworkflows/local/prepare_fasta.nf b/subworkflows/local/prepare_fasta.nf new file mode 100644 index 0000000..c615e63 --- /dev/null +++ b/subworkflows/local/prepare_fasta.nf @@ -0,0 +1,76 @@ +// +// Prepare all the indexes for a Fasta file +// + +include { CUSTOM_GETCHROMSIZES } from '../../modules/nf-core/custom/getchromsizes/main' +include { SAMTOOLS_DICT } from '../../modules/nf-core/samtools/dict/main' +include { TABIX_BGZIP } from '../../modules/nf-core/tabix/bgzip/main' + + +workflow PREPARE_FASTA { + + take: + fasta // file: /path/to/genome.fa + + + main: + ch_versions = Channel.empty() + + // Compress the Fasta file + ch_compressed_fasta = TABIX_BGZIP (fasta).output + ch_versions = ch_versions.mix(TABIX_BGZIP.out.versions) + + // Generate Samtools index and chromosome sizes file + ch_samtools_faidx = CUSTOM_GETCHROMSIZES (ch_compressed_fasta).fai + ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions) + + // Read the .fai file, extract sequence statistics, and make an extended meta map + sequence_map = ch_samtools_faidx.map { + meta, fai -> [meta, meta + get_sequence_map(fai)] + } + // Update all channels to use the extended meta map + fasta_gz = ch_compressed_fasta.join(sequence_map).map { [it[2], it[1]]} + faidx = ch_samtools_faidx.join(sequence_map).map { [it[2], it[1]]} + gzi = CUSTOM_GETCHROMSIZES.out.gzi.join(sequence_map).map { [it[2], it[1]]} + sizes = CUSTOM_GETCHROMSIZES.out.sizes.join(sequence_map).map { [it[2], it[1]]} + expanded_fasta = fasta.join(sequence_map).map { [it[2], it[1]]} + + // Generate Samtools dictionary + ch_samtools_dict = SAMTOOLS_DICT (expanded_fasta).dict + ch_versions = ch_versions.mix(SAMTOOLS_DICT.out.versions) + + + emit: + fasta_gz = fasta_gz // path: genome.fa.gz + faidx = faidx // path: genome.fa.gz.fai + dict = ch_samtools_dict // path: genome.fa.dict + gzi = gzi // path: genome.fa.gz.gzi + sizes = sizes // path: genome.fa.gz.sizes + versions = ch_versions.ifEmpty(null) // channel: [ versions.yml ] +} + +// Read the .fai file to extract the number of sequences, the maximum and total sequence length +// Inspired from https://github.com/nf-core/rnaseq/blob/3.10.1/lib/WorkflowRnaseq.groovy +def get_sequence_map(fai_file) { + def n_sequences = 0 + def max_length = 0 + def total_length = 0 + fai_file.eachLine { line -> + def lspl = line.split('\t') + def chrom = lspl[0] + def length = lspl[1].toInteger() + n_sequences ++ + total_length += length + if (length > max_length) { + max_length = length + } + } + + def sequence_map = [:] + sequence_map.n_sequences = n_sequences + sequence_map.total_length = total_length + if (n_sequences) { + sequence_map.max_length = max_length + } + return sequence_map +} diff --git a/subworkflows/sanger-tol/prepare_repeats.nf b/subworkflows/local/prepare_repeats.nf similarity index 53% rename from subworkflows/sanger-tol/prepare_repeats.nf rename to subworkflows/local/prepare_repeats.nf index 7b25e03..df42c52 100644 --- a/subworkflows/sanger-tol/prepare_repeats.nf +++ b/subworkflows/local/prepare_repeats.nf @@ -3,10 +3,10 @@ // and prepare indexes for it // -include { REPEATS_BED } from '../../modules/sanger-tol/nf-core-pipeline/repeats_bed' -include { TABIX_BGZIP } from '../../modules/nf-core/modules/tabix/bgzip/main' -include { TABIX_TABIX as TABIX_TABIX_CSI } from '../../modules/nf-core/modules/tabix/tabix/main' -include { TABIX_TABIX as TABIX_TABIX_TBI } from '../../modules/nf-core/modules/tabix/tabix/main' +include { REPEATS_BED } from '../../modules/local/repeats_bed' +include { TABIX_BGZIP } from '../../modules/nf-core/tabix/bgzip/main' +include { TABIX_TABIX as TABIX_TABIX_CSI } from '../../modules/nf-core/tabix/tabix/main' +include { TABIX_TABIX as TABIX_TABIX_TBI } from '../../modules/nf-core/tabix/tabix/main' workflow PREPARE_REPEATS { @@ -26,10 +26,23 @@ workflow PREPARE_REPEATS { ch_compressed_bed = TABIX_BGZIP ( ch_bed ).output ch_versions = ch_versions.mix(TABIX_BGZIP.out.versions) - // Index the BED file in two formats for maximum compatibility - ch_indexed_bed_csi = TABIX_TABIX_CSI ( ch_compressed_bed ).csi + // Try indexing the BED file in two formats for maximum compatibility + // but each has its own limitations + tabix_selector = ch_compressed_bed.branch { meta, bed -> + tbi_and_csi: meta.max_length < 2**29 + only_csi: meta.max_length < 2**32 + no_tabix: true + } + + // Output channels to tell the downstream subworkflows which indexes are missing + // (therefore, only meta is available) + no_csi = tabix_selector.no_tabix.map {it[0]} + no_tbi = tabix_selector.only_csi.mix(tabix_selector.no_tabix).map {it[0]} + + // Do the indexing on the compatible Fasta files + ch_indexed_bed_csi = TABIX_TABIX_CSI ( tabix_selector.tbi_and_csi.mix(tabix_selector.only_csi) ).csi ch_versions = ch_versions.mix(TABIX_TABIX_CSI.out.versions) - ch_indexed_bed_tbi = TABIX_TABIX_TBI ( ch_compressed_bed ).tbi + ch_indexed_bed_tbi = TABIX_TABIX_TBI ( tabix_selector.tbi_and_csi ).tbi ch_versions = ch_versions.mix(TABIX_TABIX_TBI.out.versions) @@ -37,5 +50,7 @@ workflow PREPARE_REPEATS { bed_gz = ch_compressed_bed // path: genome.bed.gz bed_csi = ch_indexed_bed_csi // path: genome.bed.gz.csi bed_tbi = ch_indexed_bed_tbi // path: genome.bed.gz.tbi + no_csi = no_csi // (only meta) + no_tbi = no_tbi // (only meta) versions = ch_versions.ifEmpty(null) // channel: [ versions.yml ] } diff --git a/subworkflows/sanger-tol/prepare_fasta.nf b/subworkflows/sanger-tol/prepare_fasta.nf deleted file mode 100644 index c2c576d..0000000 --- a/subworkflows/sanger-tol/prepare_fasta.nf +++ /dev/null @@ -1,39 +0,0 @@ -// -// Prepare all the indexes for a Fasta file -// - -include { CUSTOM_GETCHROMSIZES } from '../../modules/nf-core/modules/custom/getchromsizes/main' -include { SAMTOOLS_DICT } from '../../modules/nf-core/modules/samtools/dict/main' -include { TABIX_BGZIP } from '../../modules/nf-core/modules/tabix/bgzip/main' - - -workflow PREPARE_FASTA { - - take: - fasta // file: /path/to/genome.fa - - - main: - ch_versions = Channel.empty() - - // Compress the Fasta file - ch_compressed_fasta = TABIX_BGZIP (fasta).output - ch_versions = ch_versions.mix(TABIX_BGZIP.out.versions) - - // Generate Samtools index and chromosome sizes file - ch_samtools_faidx = CUSTOM_GETCHROMSIZES (ch_compressed_fasta).fai - ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions) - - // Generate Samtools dictionary - ch_samtools_dict = SAMTOOLS_DICT (fasta).dict - ch_versions = ch_versions.mix(SAMTOOLS_DICT.out.versions) - - - emit: - fasta_gz = ch_compressed_fasta // path: genome.fa.gz - faidx = ch_samtools_faidx // path: genome.fa.gz.fai - dict = ch_samtools_dict // path: genome.fa.dict - gzi = CUSTOM_GETCHROMSIZES.out.gzi // path: genome.fa.gz.gzi - sizes = CUSTOM_GETCHROMSIZES.out.sizes // path: genome.fa.gz.sizes - versions = ch_versions.ifEmpty(null) // channel: [ versions.yml ] -} diff --git a/workflows/insdcdownload.nf b/workflows/insdcdownload.nf index cfcb5cf..67793b7 100644 --- a/workflows/insdcdownload.nf +++ b/workflows/insdcdownload.nf @@ -20,9 +20,9 @@ WorkflowInsdcdownload.initialise(params, log) // include { DOWNLOAD_GENOME } from '../subworkflows/local/download_genome' include { PARAMS_CHECK } from '../subworkflows/local/params_check' -include { PREPARE_FASTA as PREPARE_UNMASKED_FASTA } from '../subworkflows/sanger-tol/prepare_fasta' -include { PREPARE_FASTA as PREPARE_REPEAT_MASKED_FASTA } from '../subworkflows/sanger-tol/prepare_fasta' -include { PREPARE_REPEATS } from '../subworkflows/sanger-tol/prepare_repeats' +include { PREPARE_FASTA as PREPARE_UNMASKED_FASTA } from '../subworkflows/local/prepare_fasta' +include { PREPARE_FASTA as PREPARE_REPEAT_MASKED_FASTA } from '../subworkflows/local/prepare_fasta' +include { PREPARE_REPEATS } from '../subworkflows/local/prepare_repeats' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -33,7 +33,7 @@ include { PREPARE_REPEATS } from '../subworkflows/s // // MODULE: Installed directly from nf-core/modules // -include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/modules/custom/dumpsoftwareversions/main' +include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main' /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -46,12 +46,14 @@ workflow INSDCDOWNLOAD { ch_versions = Channel.empty() PARAMS_CHECK ( - [ - params.input, - params.assembly_accession, - params.assembly_name, - params.outdir, - ] + params.input, + Channel.of( + [ + params.assembly_accession, + params.assembly_name, + ] + ), + params.outdir, ) ch_versions = ch_versions.mix(PARAMS_CHECK.out.versions) @@ -73,7 +75,7 @@ workflow INSDCDOWNLOAD { ) ch_versions = ch_versions.mix(PREPARE_REPEAT_MASKED_FASTA.out.versions) PREPARE_REPEATS ( - DOWNLOAD_GENOME.out.fasta_masked + PREPARE_REPEAT_MASKED_FASTA.out.fasta_gz ) ch_versions = ch_versions.mix(PREPARE_REPEATS.out.versions) @@ -93,6 +95,9 @@ workflow.onComplete { NfcoreTemplate.email(workflow, params, summary_params, projectDir, log) } NfcoreTemplate.summary(workflow, params, log) + if (params.hook_url) { + NfcoreTemplate.IM_notification(workflow, params, summary_params, projectDir, log) + } } /*