diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..606d5fa35f --- /dev/null +++ b/.coveragerc @@ -0,0 +1,42 @@ +# .coveragerc to control coverage.py +[run] +branch = True +source = + ./tutor + ./bin +omit = + */templates/* + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if self\.debug + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + + # Don't complain about abstract methods, they aren't run: + @(abc\.)?abstractmethod + +ignore_errors = True +show_missing = True +skip_empty = True +precision = 2 + +[html] +skip_empty = True +show_contexts = True + +[json] +pretty_print = True +show_contexts = True \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 69c4738060..ce5d14dfd2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,7 +7,7 @@ assignees: '' --- - + **Bug description** diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 74f40d0433..570e640ce6 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -4,6 +4,6 @@ about: This is not the appropriate channel --- -Please post on our forums: https://discuss.overhang.io for questions about using `tutor`. +Please post on our forums: https://discuss.openedx.org/tag/tutor for questions about using `tutor`. Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker. diff --git a/.github/workflows/auto-add-to-project.yml b/.github/workflows/auto-add-to-project.yml new file mode 100644 index 0000000000..7dadeb6633 --- /dev/null +++ b/.github/workflows/auto-add-to-project.yml @@ -0,0 +1,17 @@ +name: Auto Add Issues to Project + +on: + issues: + types: + - opened + +jobs: + # https://github.com/actions/add-to-project + add-to-project: + name: Add issue and bugs to project + runs-on: ubuntu-latest + steps: + - uses: actions/add-to-project@v0.5.0 + with: + project-url: https://github.com/orgs/overhangio/projects/4 + github-token: ${{ secrets.GH_PROJECT_PERSONAL_ACCESS_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..c7069dfb22 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,95 @@ +# This script can be tested locally with act: +# +# act --secret GITHUB_TOKEN=... --job release +# +# https://github.com/nektos/act/ +# To generate a token: https://github.com/settings/tokens (add r/w permissions for "Contents") +name: Release + +on: + push: + tags: + - '*' + workflow_dispatch: + +jobs: + release: + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + # https://github.com/actions/runner-images#available-images + # It's important that we build the tutor binaries with the *oldest* possible + # OS releases and Python version. See these docs for more information: + # https://pyinstaller.org/en/stable/usage.html#making-gnu-linux-apps-forward-compatible + - os: ubuntu-20.04 + locale: C.UTF-8 + # https://endoflife.date/macos + - os: macos-12 + locale: en_US.UTF-8 + env: + LC_ALL: ${{ matrix.locale }} + LANG: ${{ matrix.locale }} + steps: + ##### Setup environment + # https://github.com/actions/checkout + - uses: actions/checkout@v3 + - name: Set up Python + # https://github.com/actions/setup-python + uses: actions/setup-python@v3 + with: + python-version: 3.9 + cache: 'pip' + cache-dependency-path: requirements/dev.txt + - name: Upgrade pip and setuptools + # https://pypi.org/project/pip/ + # https://pypi.org/project/setuptools/ + # https://pypi.org/project/wheel/ + run: python -m pip install --upgrade pip setuptools==65.6.3 wheel + - name: Print info about the current python installation + run: make ci-info + - name: Install requirements + run: make bootstrap-dev-plugins + + ##### Run tests, generate bundle + # - name: Run tests + # run: make test + - name: Create bundle + run: make bundle + # - name: Test bundle + # run: make ci-test-bundle + + ##### Download gh utility: https://github.com/cli/cli/releases + # This is unnecessary on GitHub, but useful when running locally with act. + # WARNING: this will only work on amd64 + - name: Check or download gh utility + run: | + if ! which gh; then + echo "Downloading 'gh' utility" + if [ "$(uname -s)" = "Linux" ]; then + curl -L -o gh.tar.gz https://github.com/cli/cli/releases/download/v2.28.0/gh_2.28.0_linux_amd64.tar.gz + tar xzf gh.tar.gz + mv ./gh_2.28.0_linux_amd64/bin/gh /usr/local/bin/gh + else + curl -L -o gh.zip https://github.com/cli/cli/releases/download/v2.28.0/gh_2.28.0_macOS_amd64.zip + unzip xzf gh.zip + mv ./gh_2.28.0_macOS_amd64/bin/gh /usr/local/bin/gh + fi + which gh + fi + + ##### Create release on GitHub + - name: Create or update GitHub release + run: scriv github-release --repo=overhangio/tutor + env: + GITHUB_TOKEN: ${{ github.token }} + # scriv command will fail when not on a tag, such as running with act or a + # manual trigger. + if: ${{ github.ref_type == 'tag' }} + - name: Upload release asset to GitHub + run: | + export FILENAME="tutor-$(uname -s)_$(uname -m)" + mv ./dist/tutor $FILENAME + gh release upload --clobber v$(make version) $FILENAME + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 0000000000..df0be4886e --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,17 @@ +name: Sync with private repo + +on: + push: + branches: [ master, main, nightly ] + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Add remote + run: git remote add overhangio https://${{ secrets.GIT_USERNAME }}:${{ secrets.GIT_PASSWORD }}@git.overhang.io/core/tutor.git + - name: Push + run: git push overhangio $GITHUB_REF diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000..b982239229 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,40 @@ +name: Run tests + +on: + push: + branches: [ master, main, nightly ] + pull_request: + branches: [ master, main, nightly ] + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.9', '3.12'] + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: requirements/dev.txt + - name: Upgrade pip + run: python -m pip install --upgrade pip setuptools + - name: Install dependencies + run: pip install -r requirements/dev.txt + - name: Static code analysis + run: make test-lint + - name: Python unit tests + run: make test-unit + - name: Static type checks + run: make test-types + - name: Code formatting + run: make test-format + - name: Package tests + run: make test-pythonpackage + - name: Install docs dependencies + run: pip install -r requirements/docs.txt + - name: Build docs + run: make docs diff --git a/.gitignore b/.gitignore index 4a24ea2430..d01f6874b3 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,7 @@ __pycache__ /build/ /dist/ -/releases/ + +# Unit test/ coverage reports +.coverage +/htmlcov/ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 932deffbe4..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -language: python -matrix: - include: - - os: linux - # We need an older version of python in order to have compatibility with - # older versions of libc - dist: xenial - python: 3.6 - - os: osx - language: generic - -env: - jobs: - - PATH=/tmp/bin/:$PATH - -script: - # In Mac OS python and pip binaries are v2, so we create symlinks - - mkdir /tmp/bin && ln -s $(which python3) /tmp/bin/python && ln -s $(which pip3) /tmp/bin/pip - - pip install --upgrade pip setuptools==44.0.0 - - make ci-info - - make bootstrap-dev-plugins - - make test - - make bundle - - make ci-test-bundle - -deploy: - # Create github release and push binary to github - - provider: script - script: make ci-push-bundle - skip_cleanup: true - on: - tags: true \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 68eed7232d..4556c4f873 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,29 +1,870 @@ # Changelog -Note: Breaking changes between versions are indicated by "πŸ’₯". +This file includes a history of past releases. Changes that were not yet added to a release are in the [changelog.d/](./changelog.d) folder. -## Unreleased + + + + + +## v18.1.4 (2024-10-24) + +- [Improvement] Set `EDXAPP_TEST_MONGO_HOST` env var in the openedx-dev image so that it no longer needs to be set by hand when running edx-platform unit tests (by @kdmccormick). + +- [Feature] Added `-c` or `--clean` option to tutor config save: For plugin developers and advanced users, this option cleans the `env/` folder before saving, ensuring a fresh environment for testing and development. (by @CodeWithEmad) + +- [Feature] Add a `patches show my-patch-name`. This is a convenient command for the troubleshooting of plugins. (by @regisb) + +- [Improvement] Fixes an issue which caused 502 errors by a premature closed connection by uwsgi, it also improves the handling of SIGTERM in docker and other uwsgi improvements (by @Ian2012). + +- [Improvement] Do not run useless celery workers (lms-worker, cms-worker) in development. This should save us ~700MB memory. (by @arbrandes, @regisb). + +- [Bugfix] Fixed an issue where the site name was not limited to 50 characters when creating a new site configuration. (by @CodeWithEmad) + +- [Feature] Update Open edX version to redwood.3 (by @dawoudsheraz) + + +## v18.1.3 (2024-08-13) + +- [Feature] Update to redwood.2 tag (by @dawoudsheraz) + + +## v18.1.2 (2024-07-26) + +- [Security] Add upstream security fix as patch in Open edX image (by @dawoudsheraz) + + +## v18.1.1 (2024-07-04) + +- [Bugfix] Fix `mysql-native-password is not loaded` error in MySQL 8.4 when upgrading from tutor 15 or an earlier release to tutor 18 by enabling the plugin. (by @Danyal-Faheem) + + +## v18.1.0 (2024-06-25) + +- πŸ’₯[Feature] Upgrade default charset and collation of mysql to "utf8mb4" and "utf8mb4_unicode_ci" respectively. This upgrade should be automatic for most users. However, if you are running a third-party MySQL (i.e. `RUN_MYSQL=false`), you are expected to upgrade manually. Please refer to the third-party provider's documentation for detailed upgrade instructions. Ensuring that your MySQL version is up-to-date is crucial for maintaining compatibility and security. (by @Danyal-Faheem) +- [Bugfix] Do not fail on start when there are not persistent volume claims to apply. (by @snglth) +- [Bugfix] Fix legacy warnings during Docker build. (by @regisb) + + +## v18.0.0 (2024-06-19) + +- πŸ’₯[Feature] Upgrade to Redwood (by @dawoudsheraz) +- [Bugfix] Wrap Django5 warning imports in try-except block to avoid failures in django3 that's still in use in edx-platform's master branch (by @mariajgrimaldi). +- πŸ’₯[Feature] Pull translations via `atlas` during Docker build. This breaks the `openedx-i18n` custom locale Tutor feature in favor of [OEP-58](https://docs.openedx.org/en/latest/developers/concepts/oep58.html) in favor of . (by @omarithawi) +- πŸ’₯[Feature] The `openedx-assets` command is replaced with `npm run` subcommands. This will slightly reduce the build time for edx-platform assets and comprehensive themes. It will also open up the door for more significant build time reductions in the future. Here is a migration guide, where each command is to be run in the `lms` or `cms` container: + + **Before** | **After** + -----------------------------------------|------------------------------------------------------------------------------------- + `openedx-assets build --env=prod ARGS` | `npm run build -- ARGS` + `openedx-assets build --env=dev ARGS` | `npm run build-dev -- ARGS` + `openedx-assets common --env=prod ARGS` | `npm run compile-sass -- --skip-themes ARGS` + `openedx-assets common --env=dev ARGS` | `npm run compile-sass-dev -- --skip-themes ARGS` + `openedx-assets webpack --env=prod ARGS` | `npm run webpack -- ARGS` + `openedx-assets webpack --env=dev ARGS` | `npm run webpack-dev -- ARGS` + `openedx-assets npm` | `npm run postinstall` (`npm clean-install` runs this automatically) + `openedx-assets xmodule` | (no longer necessary) + `openedx-assets collect ARGS` | `./manage.py lms collectstatic --noinput ARGS && ./manage.py cms collectstatic ARGS` + `openedx-assets watch-themes ARGS` | `npm run watch-themes -- ARGS` + +For more details, see the [deprecation notice for paver](https://github.com/openedx/edx-platform/issues/34467) +and the [static assets reference](https://github.com/openedx/edx-platform/tree/open-release/redwood.master/docs/references/static-assets.rst) +in edx-platform. + +- πŸ’₯[Feature] Update MongoDB to v7.0.7 (by @dawoudsheraz) MongoDB is upgraded from version 4.4 to 7.0. Since there have been major releases since 4.4, the upgrade will need to go through them before running Mongo 7. MongoDB would need to follow 4.4 --> 5.0 --> 6.0 --> 7.0 upgrade path to work correctly. The container will keep on restarting with featureCompatibility error if the upgrade path is not followed. To upgrade mongo, run the following command based in the appropriate environment: + + tutor upgrade --from=quince + +For k8s only, the above command will not perform the upgrade automatically. Instead, the command will output a series of commands that would need to be run manually to carry out the upgrade. + +- [Improvement] Upgrade Nodejs from 16.14.0 to 18.20.1 in edx-platform. (by @regisb) +- [Improvement] Auto-detect bind mounts of openedx-learning for edx-platform (by @bradenmacdonald) +- [Feature] Upgrade Open edX image to use Python 3.11 (by @dawoudsheraz) +- [Bugfix] Remove CORS_ALLOW_HEADERS setting from the LMS/Studio config template. This setting, which holds site-agnostic application logic, is now consistently set to a reasonable value upstream by LMS and CMS config. Using the upstream values fixes a bug where course import in Studio using the new Course Authoring MFE was broken in Tutor deployments because it required additional headers to be allowed (content-range and content-disposition) (by @ormsbee) +- [Improvement] Made Docker cache hits more frequent during the openedx image build via BuildKit's `COPY --link` feature (by @kdmccormick). +- πŸ’₯[Improvement] Upgrade MySQL to 8.4.0. The upgrade should be automatic for most users. However, if you are running a third-party MySQL (i.e., RUN_MYSQL=false), you are expected to upgrade manually. Please refer to the third-party provider's documentation for detailed upgrade instructions. Ensuring that your MySQL version is up-to-date is crucial for maintaining compatibility and security. (by @rohansaeed) +- πŸ’₯[Improvement] Ensure that the edx-platform repository git checkout is cached by Docker during image build. This means that the cache will automatically be cleared any time there is an upstream change. Thus, it is no longer necessary to run `tutor images build --no-cache` just to fetch the latest edx-platform changes. For this to work, any GitHub repository referenced by `EDX_PLATFORM_REPOSITORY` needs to end with ".git". Make sure that this is the case if you have modified the value of this setting in the past. (by @regisb) + + +## v17.0.6 (2024-06-13) + +- [Feature] Introduces the IS_FILE_RENDERED Filter, which allows developers to specify files that should be copied directly without rendering. This update empowers developers to manage special file types, ensuring that they are transferred intact without undergoing template processing. (by @Abdul-Muqadim-Arbisoft) + +- [Improvement] Remove the obsolete `version` property from all Docker Compose files and remove the DOCKER_COMPOSE_VERSION config setting. This addresses the deprecation warning from docker-compose (version 1.27 and above) regarding `version` being obsolete. (by @jasonmokk) + +- [Bugfix] Fix permissions error on windows when running `tutor dev start` (by @Danyal-Faheem) + + +## v17.0.5 (2024-05-22) + +- [Feature] Introduces the CONFIG_USER Filter. Used to declare unique key:value pairs in config.yml that will be overwritten when running tutor config save. Useful for injecting secrets from an external secrets manager into edx, or other values that may change over time that you need to programmatically fetch. (by @abonnell) + +- [Improvement] Add ability to patch proxy configuration for Caddy (by @ravikhetani) + +- [Security] Add Upstream "Privilege re-escalation in Studio after staff access removed" git security patch in Open edX Image(by @dawoudsheraz) + + +## v17.0.4 (2024-04-09) + +- [Security] Update Redis to 7.2.4 (by @dawoudsheraz) + +- [Improvement] Update release to open-release/quince.3 (by @dawoudsheraz) + + +## v17.0.3 (2024-03-26) + +- πŸ’₯[Bugfix] Prevent infinite growth of course structure cache in Redis. (by @regisb) + - Redis is now configured with a maximum memory size of 4GB. If this is too low for your platform, you should increase this value using the new "redis-conf" patch. + - Make sure that course structure cache keys have an actual timeout. +- [Feature] Introduce the "redis-conf" patch. (by @regisb) +- [Bugfix] Fix merge conflicts in nightly when trying to apply patches from the master branch. (by @regisb) +- [Bugfix] Ensure mounted installable packages are installed as expected upon initialization. (by @dawoudsheraz) + + +## v17.0.2 (2024-02-09) + +- [Feature] Several enhancements to the Demo Course (by @kdmccormick): + - The [Open edX Demo Course](https://github.com/openedx/openedx-demo-course) has been re-built from scratch with up-to-date instruction-focused content. Its directory structure has changed. + - In order to support both the old and new structures of the Demo Course's repository, the command `tutor local do importdemocourse` will now auto-determine the course root based on the location of `course.xml`. Use the `--repo-dir` argument to override this behavior. + - The new command `tutor local do importdemolibraries` will import any content libraries defined within the Demo Course repository. At the moment, that is just the "Respiratory System Question Bank", which is an optional but helpful extension to the new Demo Course. + - To try out the new Demo Course now, run: `tutor local do importdemocourse --version master`. + - To try out the demo Respiratory System Question Bank now, run: `tutor local do importdemolibraries --version master`. + - To revert back to an older Demo Course version at any point, run: `tutor local do importdemocourse --version open-release/quince.2`, replacing `quince.2` with your preferred course version. +- [Bugfix] Remove duplicate volume declarations that cause `docker compose` v2.24.1 to fail. +- [Bugfix] Actually update the environment on `tutor plugins enable ...`. (by @regisb) +- [Feature] Introduce a `tutor.hooks.lru_cache` decorator that is automatically cleared whenever a plugin is loaded or unloaded. This is useful, in particular when a plugin implements a costly function that depends on tutor hooks. (by @regisb) +- [Bugfix] Fix compatibility with Python 3.12 by replacing pkg_resources with importlib_metadata and importlib_resources. (by @Danyal-Faheem) +- [Improvement] Upgrade base release to open-release/quince.2. (by @regisb) + + +## v17.0.1 (2024-01-25) + +- [Bugfix] Error "'Crypto.PublicKey.RSA.RsaKey object' has no attribute 'dq'" during `tutor config save` was caused by outdated minimum version of the pycryptodome package. To resolve this issue, run `pip install --upgrade pycryptodome`. (by @regisb) +- [Feature] add `CONFIG_INTERACTIVE` action that allows tutor plugins to interact with the configuration at the time of the interactive questionnaire that happens during tutor local launch. (by @Alec4r). +- [Improvement] Add `.webp` and. `.otf` extensions to list of binary extensions to ignore when rendering templates. +- [Security] Fix JWT scopes in XBlock callbacks. (by @regisb) + + +## v17.0.0 (2023-12-09) + +- πŸ’₯[Feature] Upgrade to Quince. (by @regisb) +- πŸ’₯[Feature] Replace "*.local.overhang.io" hostnames by "*.local.edly.io". (by @regisb) +- πŸ’₯[Feature] Enable the Indigo theme by default, if no other theme is set. (by @regisb) +- πŸ’₯[Deprecation] Tutor no longer supports the legacy Docker builder, which was previously available by setting `DOCKER_BUILDKIT=0` in the host environment. Going forward, Tutor will always use BuildKit (a.k.a. `docker buildx` in Docker v19-v22, or just `docker build` in Docker v23). This transition will improve build performance and should be seamless for Tutor users who are running a supported Docker version (by @kdmccormick). +- πŸ’₯[Deprecation] The template variable `is_buildkit_enabled`, which now always returns True, is deprecated. Plugin authors should assume BuildKit is enabled and remove the variable from their templates (by @kdmccormick). +- πŸ’₯[Deprecation] Adding Python packages to edx-platform via `private.txt` is no longer supported. Instead, users should bind-mount their requirement directories with `tutor mounts add ...`. (by @regisb) +- [Bugfix] Updated how the Tutor setting `JWT_RSA_PRIVATE_KEY` is rendered into the LMS Django setting `JWT_AUTH['JWT_PRIVATE_SIGNING_JWK']` as required by a recent breaking upstream change. The new representation of the `JWT_PRIVATE_SIGNING_JWK` simply adds the `dq`, `dp`, and `qi` parameters. Without this fix, LMS would encounter an `InvalidKeyError` on all logins. (by @kdmccormick) +- [Improvement] You don't have to run `tutor config save` every time you enable or disable a plugin anymore. (by @CodeWithEmad) + + +## v16.1.8 (2023-12-10) + +- [Feature] Make it easy to work on 3rd-party edx-platform Python packages with `tutor mounts add /path/to/my/package`. (by @regisb) +- [Improvement] When configured with `RUN_MYSQL: true`, run `mysqld` with binlog expiry set to 3 days (rather than the default of 30). +- [Improvement] Fix `ulimits` error for elasticsearch in Docker rootless mode (by @OmarIthawi) +- [Improvement] Do not hardcode `OPENEDX_COMMON_VERSION = master` in the nightly branch. This removes git conflicts whenever we bump the common version in the master branch. (by @regisb) +- [Improvement] The ``iter_mounts`` template function can now take multiple image names as argument. This should concern only very advanced users. (by @regisb) + + +## v16.1.7 (2023-11-17) + +- [Feature] Upgrade to open-release/palm.4. It is strongly recommended to upgrade to this release for as long as possible before upgrading to Quince. Otherwise, many users will be logged out after the Quince upgrade and will have to log in again -- see the Quince release notes. (by @regisb) +- [Bugfix] Fix installation of tutor due to missing dev.txt file in Python package. (by @regisb) + + +## v16.1.6 (2023-11-16) + +- [Feature] Upgrade to open-release/palm.4. (by @regisb) +- [Improvement] Install tutor development tools with `pip install tutor[dev]`. (by @CodeWithEmad) + + +## v16.1.5 (2023-10-30) + +- [Bugfix] Fix ORA2 file uploads in CMS. As the cache settings are shared between CMS and LMS, the settings are moved from `common_lms.py` to `common_all.py`. (by @FatemeKhodayari) + + +## v16.1.4 (2023-10-13) + +- [Improvement] No more large dev images. This was fixed by adding `--no-log-init` option to useradd command and reducing space usage of `/var/log/faillog`. (by @CodeWithEmad) +- [Improvement] Upgrade the Open edX default version to open-release/palm.3. (by @regisb) + + +## v16.1.3 (2023-10-10) + +- [Improvement] Adds `connect=False` to the LMS and CMS' MongoDB connection to prevent `ServerSelectionError` after a cluster failover. (by @open-craft) +- [Bugfix] Override `CMS_BASE` setting in Studio for the development environment. Without this, parts of Studio will try to use the devstack default of localhost:8010 instead. (by @ormsbee) +- [Bugfix] Fix build error caused by py2neo package that was abruptly pulled from pypi and GitHub. (by @regisb) + + +## v16.1.2 (2023-10-02) + +- [Bugfix] Render config settings that are inside lists. (by @regisb) +- [Bugfix] Correctly parse strings prefixed with pound "#" key in `tutor config save --set KEY=#value` commands. (by @regisb) +- [Feature] New action introduced: `CONFIG_LOADED`. This action is called whenever the config is loaded and makes it possible to verify the validity of configuration settings at runtime. (by @CodeWithEmad) +- [Bugfix] Fix file upload in open response assessments. (by @regisb) + + +## v16.1.1 (2023-08-29) + +- πŸ’₯[Bugfix] Apply "fix mysql crash after upgrade to Palm" from 16.1.0 to `tutor k8s` deployments, as well. Users previously running `tutor k8s` with `RUN_MYSQL: true`, with any version between 16.0.0 and 16.1.0 including, might have to fix their data manually. For users running `tutor local`, this change has no effect, as the underlying issue was already fixed in 16.1.0. For users running `tutor k8s` with `RUN_MYSQL: false`, this change is also a no-op. (by @fghaas) + + +## v16.1.0 (2023-08-16) + +- [Improvement] Improve support of legacy non-BuildKit mode: (by @regisb) + - [Bugfix] Fix building of openedx Docker image. + - [Improvement] Remove `--cache-from` build option. + - [Improvement] Add a warning concerning the lack of support of the `--build-context` option. +- πŸ’₯[Bugfix] Fix mysql crash after upgrade to Palm. After an upgrade to Palm, the mysql client run by Django defaults to a utf8mb4 character set and collation, but the mysql server still runs with utf8mb3. This causes broken data during migration from Olive to Palm, and more generally when data is written to the database. To resolve this issue, we explicitely set the utf8mb3 charset and collation in the client. Users who were running Palm might have to fix their data manually. In the future we will upgrade the mysql server to utf8mb4. (by @regisb) +- [Improvement] We upgrade to MySQL 8.1.0 to avoid having to restart the server after the upgrade. +- [Bugfix] Ask whether user wants to run locally during `tutor local launch`. (by @regisb) +- [Bugfix] Fix a race condition that could prevent a newly provisioned Studio container from starting due to a FileExistsError when creating logs directory. + + +## v16.0.5 (2023-08-09) + +- [Improvement] Upgrade the Open edX default version to open-release/palm.2. (by @regisb) + + +## v16.0.4 (2023-08-03) + +- [Feature] Add support for HTTP/3, which considerably improves performance for Open edX. (by @regisb and @ghassanmas) +- [Bugfix] Do not display discussion units when the forum is not enabled. (by @regisb) +- [Improvement] Remove references to the wizard edition. (by @CodeWithEmad) + + +## v16.0.3 (2023-07-28) + +- [Bugfix] Improve `tutor ... do settheme default` so that it reverts to the default theme rather than trying to switch to a nonexistent theme named "default". This will clear up some error noise from LMS/CMS logs. (by @kdmccormick) +- [Security] Fix content libraries creation by unprivileged users in studio (see [security advisory](https://github.com/openedx/edx-platform/security/advisories/GHSA-3q74-3rfh-g37j)). (by @regisb) + + +## v16.0.2 (2023-06-22) + +- [Bugfix] On Kubernetes, fix mysql deployment by removing the `--ignore-db-dir` option, which no longer exists on MySQL 8. (by @regisb) + + +## v16.0.1 (2023-06-16) + +- [Bugfix] Fix loading default Kubernetes config. (by @regisb) + + +## v16.0.0 (2023-06-14) +- πŸ’₯[Feature] Upgrade to Palm. (by @regisb) + - [Bugfix] Rename ORA2 file upload folder from "SET-ME-PLEASE (ex. bucket-name)" to "openedxuploads". This has the effect of moving the corresponding folder from the `/data/lms/ora2` directory. MinIO users were not affected by this bug. + - πŸ’₯[Improvement] During registration, the honor code and terms of service links are no longer visible by default. For most platforms, these links did not work anyway. + - πŸ’₯[Deprecation] Halt support for Python 3.7. The binary release of Tutor is also no longer compatible with macOS 10. + - πŸ’₯[Deprecation] Drop support for `docker-compose`, also known as Compose V1. The `docker compose` (no hyphen) plugin must be installed. + - πŸ’₯[Refactor] We simplify the hooks API by getting rid of the `ContextTemplate`, `FilterTemplate` and `ActionTemplate` classes. As a consequences, the following changes occur: + - `APP` was previously a ContextTemplate, and is now a dictionary of contexts indexed by name. Developers who implemented this context should replace `Contexts.APP(...)` by `Contexts.app(...)`. + - Removed the `ENV_PATCH` filter, which was for internal use only anyway. + - The `PLUGIN_LOADED` ActionTemplate is now an Action which takes a single argument. (the plugin name) + - πŸ’₯[Refactor] We refactored the hooks API further by removing the static hook indexes and the hooks names. As a consequence, the syntactic sugar functions from the "filters" and "actions" modules were all removed: `get`, `add*`, `iterate*`, `apply*`, `do*`, etc. + - πŸ’₯[Deprecation] The obsolete filters `COMMANDS_PRE_INIT` and `COMMANDS_INIT` have been removed. Plugin developers should instead use `CLI_DO_INIT_TASKS` (with suitable priorities). + - πŸ’₯[Feature] The "openedx" Docker image is no longer built with docker-compose in development on `tutor dev start`. This used to be the case to make sure that it was always up-to-date, but it introduced a discrepancy in how images were build (`docker compose build` vs `docker build`). As a consequence: + - The "openedx" Docker image in development can be built with `tutor images build openedx-dev`. + - The `tutor dev/local start --skip-build` option is removed. It is replaced by opt-in `--build`. + - [Improvement] The `IMAGES_BUILD` filter now supports relative paths as strings, and not just as tuple of strings. + - [Improvement] Auto-complete the image names in the `images build/pull/push/printtag` commands. + - [Deprecation] For local installations, Docker v20.10.15 and Compose v2.0.0 are now the minimum required versions. + - [Bugfix] Make `tutor config printvalue ...` print actual yaml-formatted values, such as "true" and "null" + - πŸ’₯[Improvement] MongoDb was upgraded to 4.4. +- πŸ’₯[Improvement] Deprecate the `RUN_LMS` and `RUN_CMS` tutor settings, which should be mostly unused. (by @regisb) +- [Improvement] Greatly simplify ownership of bind-mounted volumes with docker-compose. Instead of running one service per application, we run just a single "permissions" service. This change should be backward-compatible. (by @regisb) +- [Feature] Add a `config save -a/--append -A/--remove` options to conveniently append and remove values to/from list entries. (by @regisb) +- [Improvement] Considerably accelerate building the "openedx" Docker image with `RUN --mount=type=cache`. This feature is only for Docker with BuildKit, so detection is performed at build-time. (by @regisb) +- [Improvement] Automatically pull Docker image cache from the remote registry. Again, this will considerably improve image build-time, particularly in "cold-start" scenarios, where the images need to be built from scratch. The registry cache can be disabled with the `tutor images build --no-registry-cache` option. (by @regisb) +- [Feature] Automatically mount host folders *at build time*. This is a really important feature, as it allows us to transparently build images using local forks of remote repositories. (by @regisb) +- πŸ’₯[Deprecation] Remove the various `--mount` options. These options are replaced by persistent mounts, which are managed by the `tutor mounts` commands. (by @regisb) +- [Feature] Add the `do importdemocourse --repo-dir=...` option, to import courses from subdirectories of git repositories. This allows us to import the openedx-test-course in Palm with: `tutor local do importdemocourse --repo=https://github.com/openedx/openedx-test-course --version=o +pen-release/palm.master --repo-dir=test-course/course`. (by @regisb) + + +## v15.3.7 (2023-06-13) + +- [Bugfix] Change `authSource` to `authsource`(LOWERCASE) in mongo db parameters. This allow to authenticate with credentials in openedx code.(by @johanv26) + - [Feature] Add support for loading in-cluster config when running inside a pod. In certain scenarios, Tutor may operate within a pod that has access to a cluster through role binding and a service account. In these instances, the ./kube/config file may not be present, but kubectl commands can still execute without any problems. (by @CodeWithEmad) +- [Improvement] Bump the default MongoDB Docker image reference from version 4.2.17 to 4.2.24. (by @fghaas) + + +## v15.3.6 (2023-05-22) + +- [Feature] Upgrade to open-release/olive.4. (by @regisb) + + +## v15.3.5 (2023-04-28) + +- [Feature] Make it possible to import the demo course from a different git repository or version. (by @regisb) +- [Feature] Add a convenient `do print-edx-platform-setting` command to print the value of an edx-platform setting. (by @regisb) +- [Improvement] Improve edx-platform logging by silencing a couple deprecation warnings. (by @regisb) +- [Feature] Add a convenient `do sqlshell` command to enter a SQL shell as root. (by @regisb) + + +## v15.3.4 (2023-04-13) + +- [Feature] Upgrade to open-release/olive.3. (by @regisb) + + +## v15.3.3 (2023-03-22) + +- [Improvement] Make it possible to extend or override the configuration of the uWSGI server. (by @MoisesGSalas) +- [Improvement] Running `tutor dev launch --mount=edx-platform` now performs all necessary setup for a local edx-platform development. This includes running setup.py, installing node modules, and building assets; previously, those steps had to be run explicitly after bind-mounting a local copy of edx-platform (by @kdmccormick). +- [Bugfix] Running jobs in development mode with `tutor dev do ...` will now correctly use the development image. Previously, it used the production image, just like `tutor local do ...`. (by @kdmccormick) +- [Improvement] Faster build with `npm clean-install` instead of `npm install` in the openedx Docker image. This may change the version of npm packages installed next to edx-platform. (by @regisb) +- [Feature] Introduce the `DOCKER_BUILD_COMMAND` filter which makes it possible to customize the `docker build` command. (by @regisb) +- [Improvement] During openedx image build, copy `dockerize` utility from Docker registry for better efficiency. (by @regisb) +- [Improvement] Better highlight enabled plugins in `tutor plugins list`. (by @regisb) + +- [Bugfix] Make sure that v0 plugin patches are applied in the same order as plugins are listed. (by @regisb) + + +## v15.3.2 (2023-03-13) + +- [Bugfix] Use supported YouTube API for transcripts imports. (by @mariajgrimaldi) +- [Feature] Add `tutor config patches list` CLI for listing available patches. (by @mafermazu) +- [Bugfix] Add the missing `UWSGI_WORKERS` env variables to the lms and cms k8s deployments. (by @MoisesGSalas) + + +## v15.3.1 (2023-02-28) + +- [Bugfix] `patchStrategicMerge` can now be applied to jobs. (by @keithgg) + + +## v15.3.0 (2023-02-10) + +- [Feature] Introduce plugin indexes, described in this [Tutor enhancement proposal](https://discuss.openedx.org/t/tutor-enhancement-proposal-tep-plugin-indices/8182). This new feature introduces a lot of new ``plugins`` commands. See the docs for more information. (by @regisb) +- [Improvement] Add the `plugins list --enabled` option. (by @regisb) +- πŸ’₯[Improvement] Modify the output of `plugins list`. Enabled plugins are indicated as "enabled". Installed but not enabled plugins are no longer indicated as "disabled" but as "installed". + +- πŸ’₯[Feature] Simplify the hooks API. The modules `tutor.hooks.actions`, `tutor.hooks.filters`, and `tutor.hooks.contexts` are no longer part of the API. This change should not affect most developers, who only use the `Actions` and `Filters` classes (notice the plural) from `tutor.hooks`. (by @regisb) + - Instead of `tutor.hooks.actions.get("some:action")`, use `tutor.hooks.Actions.SOME_ACTION`. + - Instead of `tutor.hooks.filters.get("some:filter")`, use `tutor.hooks.Filters.SOME_FILTER`. + - Instead of `tutor.hooks.actions.add("some:action")`, use `tutor.hooks.Actions.SOME_ACTION.add()`. The same applies to the `do` method. + - Instead of `tutor.hooks.filters.add("some:filter")`, use `tutor.hooks.Filters.SOME_FILTER.add()`. The same applies to the `add_item`, `add_items`, `apply`, and `iterate` methods. + - Instead of `tutor.hooks.contexts.enter`, use `tutor.core.hooks.contexts.enter`. + +- [Improvement] Make it possible to override the max upload size in the LMS and the CMS. This is achieved by moving the "caddyfile-lms" and "caddyfile-cms" patches just before the `import proxy` declarations. We also wrap the `request_body` directives within `handle` statements, which means that the `max_body` sizes can be overridden for specific paths. (by @regisb) + +- [Security] Fix grading issue in LTI consumer XBlock. See [security advisory](https://github.com/openedx/xblock-lti-consumer/security/advisories/GHSA-7j9p-67mm-5g87). (by @regisb) + +- [Feature] Upgrade all Open edX services to open-release/olive.2. (by @regisb) + + +## v15.2.0 (2023-01-19) + +- πŸ’₯[Bugfix] Fix "example.com" links in registration emails. This is a breaking change for platforms that have modified the "id" field of the LMS site object in the database. These platforms should set `SITE_ID=1` in the common settings via a plugin. (by @regisb) +- [Bugfix] Running `tutor k8s upgrade --from=maple` won't apply and won't wait for the MySQL deployment to be ready if `RUN_MYSQL: false` (When you host your MySQL somewhere else like RDS) (by @CodeWithEmad) +- [Bugfix] Fix HTML component editing in studio by cherry-picking [upstream fix](https://github.com/openedx/edx-platform/pull/31500). (by @regisb) +- [Improvement] Changes annotations from `typing` to use built-in generic types from `__future__.annotations` (by @Carlos-Muniz) +- [Improvement] Resolve `CORS_ORIGIN_WHITELIST` warnings that pollute the LMS and CMS logs. As far as we know they were not causing any issue, apart from being a nuisance. (by @regisb) + + +## v15.1.0 (2022-12-13) + +- [Improvement] Upgrade ipdb and ipython packages in the openedx development image. (by @regisb) +- [Improvement] Skip unnecessary image building in development. This should make `tutor dev launch` slightly faster. (by @regisb) +- [Bugfix] Fix Authn MFE login in development by disabling enterprise integration. (by @regisb) +- [Bugfix] Fix "Invalid value for β€˜--from’" when running `tutor local upgrade --from=nutmeg`. If you are facing this error, just run `tutor local launch` and your platform should be automatically upgraded. +- [Bugfix] Fix "TypeError: Parameters to Generic[...] must all be type variables" error. This error may occur when upgrading from a very old installation of Tutor. It is due to an old version of the typing-extensions package. +- πŸ’₯[Deprecation] Get rid of the `quickstart` command. v15.0.0 introduced a deprecation warning, but we actually want users to stop using this command. Instead, use `launch` (by @regisb). +- [Improvement] Backfill persistent grades during upgrade from Nutmeg. If you observe missing grades after the upgrade from Nutmeg, run `tutor local upgrade --from=nutmeg`. (by @regisb) + + +## v15.0.0 (2022-12-06) + +- πŸ’₯[Feature] Upgrade to Olive (by @regisb): + - Mypy type checking options were modified: developers are encouraged to add the `--implicit-reexport` option to their IDE. +- [Bugfix] Update problem templates according newer python versions. (by @mariajgrimaldi) +- [Improvement] Add the `-h` help option to all commands and subcommands. Previously, we could only use `--help`, which is quite long for lazy fingers. (by @regisb) +- πŸ’₯[Feature] Add an extensible `local/dev/k8s do ...` command to trigger custom job commands. These commands are used to run a series of bash scripts in designated containers. Any plugin can add custom jobs thanks to the `CLI_DO_COMMANDS` filter. This causes the following breaking changes: + - The "init", "createuser", "settheme", "importdemocourse" commands were all migrated to this new interface. For instance, `tutor local init` was replaced by `tutor local do init`. + - Plugin developers are encouraged to replace calls to the `COMMANDS_INIT` and `COMMANDS_PRE_INIT` filters by `CLI_DO_INIT_TASKS`. +- [Feature] Implement hook filter priorities, which work like action priorities. (by @regisb) +- πŸ’₯[Improvement] Remove the `local/dev bindmount` commands, which have been marked as deprecated for some time. The `--mount` option should be used instead. +- πŸ’₯[Bugfix] Fix local installation requirements. Plugins that implemented the "openedx-dockerfile-post-python-requirements" patch and that needed access to the edx-platform repo will no longer work. Instead, these plugins should implement the "openedx-dockerfile-pre-assets" patch. This scenario should be very rare, though. (by @regisb) +- πŸ’₯[Improvement] Rename the implementation of tutor quickstart to tutor launch. (by @Carlos-Muniz) +- πŸ’₯[Improvement] Remove the implementation of tutor dev runserver. (by @Carlos-Muniz) +- [Bugfix] Fix MongoDB replica set connection error resulting from edx-platform's pymongo (3.10.1 -> 3.12.3) upgrade ([edx-platform#30569](https://github.com/openedx/edx-platform/pull/30569)). (by @ormsbee) +- [Bugfix] Update ``celery`` invocations for lms-worker and cms-worker to be compatible with Celery 5 CLI. +- [Improvement] Point CMS at its config file using ``CMS_CFG`` environment variable instead of deprecated ``STUDIO_CFG``. + + +## v14.2.3 (2022-12-06) + +- [Security] Fix rotation of JWT tokens for disabled users. (by @regisb) + + +## v14.2.2 (2022-11-29) + +- [Bugfix] Fix `jinja2.exceptions.TemplateSyntaxError: expected token 'end of statement block', got '|'` error by bumping the minimum required version of the Jinja2 package. + +- [Feature] Add support for MongoDB SSL, authentication source, mechanism and replica set via the `MONGODB_USE_SSL`, `MONGODB_AUTH_MECHANISM`, `MONGODB_AUTH_SOURCE`, `MONGODB_REPLICA_SET` settings. (by @zakum1 and @regisb) + +- [Bugfix] Fix tag of "openedx" development Docker image. Previously, this Docker tag did not include the Tutor version. As a consequence, a different cached image could be used in some cases. For instance: when running `tutor dev run` commands. Now, the image tag is "openedx-dev:TUTOR_VERSION". + +- [Bugfix] Fix name of Swahili locale: it is "sw-ke" and not "sw" (by @regisb). + +- [Security] Apply drag-n-drop v2 xblock [security patch](https://discuss.openedx.org/t/upcoming-security-release-xblock-drag-and-drop-v2/8768/7). (by @regisb) + + +## v14.2.1 (2022-11-24) + +- [Improvement] Auto-completion of `plugins` and `config` arguments: `plugins enable/disable NAME`, `plugins install PATH`, `config save --set KEY=VAL`, `config save --unset KEY`, `config printvalue KEY`. (by @regisb) +- [Bugfix] Fix minimum click version (>= 8.0.0) when installing tutor from pip. +- [Improvement] Enable CORS by default for both LMS and CMS by moving those settings to the `common_all` partial. (by @arbrandes) + +## v14.2.0 (2022-11-21) + +- [Improvement] Auto-complete implicit `local/dev --mount /path/to/...` options. (by @regisb) +- πŸ’₯[Feature] Strong typing of action and filter hooks: this allows us to detect incorrect calls to `actions.add` or `filters.add` early. Strong typing forces us to break the `do` and `apply` API by removing the `context` named argument. Developers should replace `do(context=...)` by `do_from_context(..., )` (and similar for `apply`). + +## v14.1.2 (2022-11-02) + +- [Security] Fix edx-platform XSS vulnerability on "next" parameter. (by @regisb) + +## v14.1.1 (2022-10-25) + +- [Deprecation] Tutor is no longer compatible with Python 3.6. (by @regisb) +- [Security] Fix xblock ajax handler vulnerability. (by @regisb) +- [Improvement] Use web proxy gzip encoding to improve bandwidth. We observe a 75% size reduction on the LMS dashboard. (by @ghassanmas) + +## v14.1.0 (2022-10-10) + +- [Improvement] Upgrade Scorm XBlock to v14.0.0. (by @regisb) +- πŸ’₯[Improvement] The Richie plugin was transferred to the Openfun organization; thus, it is no longer officially supported and it is removed from the default set of plugins that ships with `pip install tutor[full]` or the Tutor pre-compiled binary. Users are encouraged to uninstall the `tutor-richie` Python package and install the `tutor-contrib-richie` package instead. +- [Feature] Upgrade edx-platform i18n strings to nutmeg.2. (by @regisb) + +## v14.0.5 (2022-08-29) + +- [Bugfix] Fix MongoDB replica set connection error resulting from edx-platform's pymongo (3.10.1 -> 3.12.3) upgrade ([edx-platform#30569](https://github.com/openedx/edx-platform/pull/30569)). (by @ormsbee) +- [Feature] Upgrade all applications to open-release/nutmeg.2. (by @BbrSofiane) + +## v14.0.4 (2022-07-29) + +- [Feature] Add the `-m/--mount` option to `tutor dev quickstart`. (by @regisb) +- [Bugfix] Fix `tutor dev start -m /path/to/frontend-app-learning` by introducing dev-specific `COMPOSE_DEV_TMP` and `COMPOSE_DEV_JOBS_TMP` filters (by @regisb). +- [Bugfix] Log the shell commands that Tutor executes more accurately. (by @kdmccormick) +- [Bugfix] `tutor dev quickstart` would fail under certain versions of docker-compose due to a bug in the logic that handled volume mounting. (by @kdmccormick) +- [Bugfix] The `tutor k8s start` command will succeed even when `k8s-override` and `kustomization-patches-strategic-merge` are not specified. (by @edazzocaisser) +- [BugFix] `kubectl wait` checks deployments instead of pods as it could hang indefinitely if there are extra pods in a broken state. (by @keithgg) + +## v14.0.3 (2022-07-09) + +- [Bugfix] Build openedx-dev Docker image even when the host user is root, for instance on Windows. (by @regisb) +- [Bugfix] Patch nutmeg.1 release with [LTI 1.3 fix](https://github.com/openedx/edx-platform/pull/30716). (by @ormsbee) +- [Improvement] Make it possible to override k8s resources in plugins using `k8s-override` patch. (by @foadlind) + +## v14.0.2 (2022-06-27) + +- [Bugfix] Update problem with hint template so it works with newer python versions. (by @mariajgrimaldi) +- [Feature] Add default PYTHONBREAKPOINT to openedx/Dockerfile (by @Carlos-Muniz) +- [Bugfix] Fix smtp server port in `cms.yml` which was causing email sending failures in the Studio. (by @regisb) +- [Bugfix] Skip waiting for MongoDB if it is served using SRV records. (by @gabor-boros) +- [Improvement] Use `git am` instead of `cherry-pick` to simplify patching process. +- [Improvement] Tutor is now compatible with Docker Compose subcommand. + +## v14.0.1 (2022-06-13) + +- [Bugfix] Fix missing tables on migration from maple ([#689](https://github.com/overhangio/tutor/issues/689)). (by @ibrmora) +- [Bugfix] Fix plugin patches in cms.env.yml template. + +## v14.0.0 (2022-06-09) + +- πŸ’₯[Feature] Upgrade to Nutmeg: (by @regisb) + - πŸ’₯[Feature] Hide a course from the `/course` search page in the LMS when the course visibility is set to "none" in the Studio. (thanks @ghassanmas!) + - πŸ’₯[Improvement] The `lms.env.json` and `cms.env.json` files are moved to `lms.env.yml` and `cms.env.yml`. As a consequence, plugin developers must reformat the following patches to use YAML format, and not JSON: "common-env-features", "lms-env-features", "cms-env-features", "lms-env", "cms-env", "openedx-auth". + - πŸ’₯[Feature] Persistent grades are now enabled by default. + - [Bugfix] Remove edX references from bulk emails ([issue](https://github.com/openedx/build-test-release-wg/issues/100)). + - [Improvement] For Tutor Nightly (and only Nightly), official plugins are now installed from their nightly branches on GitHub instead of a version range on PyPI. This will allow Nightly users to install all official plugins by running ``pip install -e ".[full]"``. + - [Bugfix] Start MongoDB when running migrations, because a new data migration fails if MongoDB is not running + +## v13.3.1 (2022-06-06) + +- [Bugfix] Crashing celery workers in development (#681). (by @regisb) +- [Bugfix] Fix studio logout issue. (by @Alec4r) + +## v13.3.0 (2022-06-03) + +- [Security] Apply logout redirect url security fix. (by @regisb) +- [Feature] Make it possible to force the rendering of a given template, even when the template path matches an ignore pattern. (by @regisb) +- πŸ’₯[Bugfix] Get rid of the `tutor config render` command, which is useless now that themes can be implemented as plugins. (by @regisb) + +## v13.2.3 (2022-05-30) + +- [Bugfix] Truncate site display name to 50 characters with a warning, fixing data too long error for long site names. (by @navinkarkera) +- [Feature] Add patch to allow overriding final openedx docker image CMD. +- [Bugfix] Ignore Python plugins that cannot be loaded. (by @regisb) +- [Improvement] Faster and more reliable builds with `npm clean-install` instead of `npm install`. (by @regisb. Thanks @ghassanmas!) +- [Bugfix] Fix 500 error during studio login. (by @regisb) +- [Bugfix] Fix updates for the Caddy deployment in multi-node Kubernetes clusters (#660). Previously, Caddy configuration updates might fail if the Kubernetes cluster had more than one worker node. (by @fghaas) + +## v13.2.2 (2022-05-06) + +- [Bugfix] Mounts were broken in dev mode. (by @kdmccormick) + +## v13.2.1 (2022-05-06) + +- [Bugfix] Fix broken file upload in studio because of unpinned studio-frontend requirement (see [discussion](https://discuss.overhang.io/t/missing-js-css-files-missing-from-openedx-docker-image-in-studio/2629) and [pull request](https://github.com/openedx/edx-platform/pull/30309)) (by @regisb. Thanks @uetuluk!). +- [Bugfix] "The Compose file is invalid" error on mounting dev-only folders. (by @regisb) +- [Bugfix] CMS settings in development. (by @regisb) + +## v13.2.0 (2022-04-24) + +- [Improvement] Add the `COMPOSE_PROJECT_STARTED` action and run `dev stop` on `local start` (and vice versa). (by @regisb) +- [Feature] Introduce `local/dev copyfrom` command to copy contents from a container. (by @regisb) +- [Bugfix] Fix a race condition that could prevent a newly provisioned LMS container from starting due to a `FileExistsError` when creating data folders. +- [Deprecation] Mark `tutor dev runserver` as deprecated in favor of `tutor dev start`. Since `start` now supports bind-mounting and breakpoint debugging, `runserver` is redundant and will be removed in a future release. (by @kdmccormick) +- [Improvement] Allow breakpoint debugging when attached to a service via `tutor dev start SERVICE`. (by @kdmccormick) +- [Security] Apply rate limiting security fix (see [commit](https://github.com/overhangio/edx-platform/commit/b5723e416e628cac4fa84392ca13e1b72817674f)). (by @regisb) +- [Feature] Introduce the ``-m/--mount`` option in ``local`` and ``dev`` commands to auto-magically bind-mount folders from the host. (by @regisb) +- [Feature] Add `tutor dev quickstart` command, which is similar to `tutor local quickstart`, except that it uses dev containers instead of local production ones and includes some other small differences for the convience of Open edX developers. This should remove some friction from the Open edX development setup process, which previously required that users provision using local producation containers (`tutor local quickstart`) but then stop them and switch to dev containers (`tutor local stop && tutor dev start -d`). (by @kdmccormick) +- πŸ’₯[Improvement] Make it possible to run `tutor k8s exec ` (#636). As a consequence, it is no longer possible to run quoted commands: `tutor k8s exec ""`. Instead, you should remove the quotes: `tutor k8s exec `. (by @regisb) +- πŸ’₯[Deprecation] Drop support for the `TUTOR_EDX_PLATFORM_SETTINGS` environment variable. It is now recommended to create a plugin instead. (by @regisb) +- πŸ’₯[Improvement] Complete overhaul of the plugin extension mechanism. Tutor now has a hook-based Python API: actions can be triggered at different points of the application life cycle and data can be modified thanks to custom filters. The v0 plugin API is still supported, for backward compatibility, but plugin developers are encouraged to migrate their plugins to the new API. See the new plugin tutorial for more information. (by @regisb) +- [Improvement] Improved the output of `tutor plugins list`. (by @regisb) +- [Feature] Add `tutor [dev|local|k8s] status` command, which provides basic information about the platform's status. (by @kdmccormick) + +## v13.1.11 (2022-04-12) + +- [Security] Apply SAML security fix. +- [Improvement] In addition to the Docker build arguments `EDX_PLATFORM_REPOSITORY` and `NPM_REGISTRY`, also support two corresponding and identically-named `config.yml` values serving the same purpose. + +## v13.1.10 (2022-04-11) + +- [Feature] Upgrade all applications to open-release/maple.3. + +## v13.1.9 (2022-04-06) + +- [Security] Fix open redirect vulnerability in inactive user flow (see [commit](https://github.com/rgraber/edx-platform/commit/fbbcfe71832e700f16aad3636b0ccb35585d1c95)) + +## v13.1.8 (2022-03-18) + +- [Bugfix] Fix "evalsymlink failure" during `k8s quickstart` (#611). +- [Bugfix] Fix "TypeError: upgrade() got an unexpected keyword argument 'non_interactive'" during `local upgrade`. + +## v13.1.7 (2022-03-17) + +- [Bugfix] Fix dockerize on arm64 by switching to the [powerman/dockerize](https://github.com/powerman/dockerize) fork (#591). +- [Bugfix] Fix "Unexpected args" error during service initialisation on Kubernetes (#611). + +## v13.1.6 (2022-03-15) + +- [Bugfix] Fix `local/k8s quickstart` commands when upgrading from an older release (#595). +- [Bugfix] Fix running the default exim-relay SMTP server on arm64 (#600). +- [Feature] Add `tutor k8s apply` command, which is a direct interface with `kubectl apply`. +- [Feature] Add `openedx-dockerfile-minimal` patch, which you can use to install custom packages and run commands as root in the Docker image. + +## v13.1.5 (2022-02-14) + +- [Improvement] Upgrade all services to open-release/maple.2. + +## v13.1.4 (2022-02-08) + +- [Security] Fix vulnerability in redirect URL during authentication (see [commit](https://github.com/overhangio/edx-platform/commit/06550411e34c04376fa3d757e1f068f464f816e6)). + +## v13.1.3 (2022-02-01) + +- [Security] Fix vulnerability in call to invalid enrollment API (see [commit](https://github.com/openedx/edx-platform/commit/a140c674799c527e961e37c5e46cb7dc1ffef5ac)). +- [Bugfix] Fix "Internal Server Error / AttributeError / object has no attribute 'get_metadata'" in learning MFE. +- [Improvement] Replace all links to github.com/edx by github.com/openedx, following the migration of all repositories. +- [Bugfix] Fix `k8s start caddy` command. + +## v13.1.2 (2022-01-30) + +- [Bugfix] Fix auto-renewal of certificates revoked by Let's Encrypt (see [discussion](https://community.letsencrypt.org/t/questions-about-renewing-before-tls-alpn-01-revocations/170449/21)). + +## v13.1.1 (2022-01-25) + +- [Bugfix] Fix authentication in development due to missing SameSite policy on session ID cookie. +- [Bugfix] Display properly themed favicon.ico image in LMS, Studio, and microfrontends. +- [Bugfix] Fix "LazyStaticAbsoluteUrl is not JSON serializable" error when sending bulk emails. +- [Bugfix] Fix `tutor local importdemocourse` fails when the platform is not up. + +## v13.1.0 (2022-01-08) + +- [Improvement] Provide much more comprehensive instructions when upgrading. +- [Bugfix] During the upgrade, make sure that the environment is up-to-date before prompting to rebuild the custom images. +- [Bugfix] Fix ownership of MySQL data, in particular when upgrading a Kubernetes cluster to Maple. +- [Bugfix] Ensure that ``tutor k8s upgrade`` is run during ``tutor k8s quickstart``, when necessary. +- πŸ’₯[Bugfix] By default, detect the current version during ``tutor k8s/local upgrade``. +- [Bugfix] Fix upgrading from Lilac to Maple on Kubernetes by deleting deployments and services. + +## v13.0.3 (2022-01-04) + +- [Security] Upgrade Django to 3.2.11 in edx-platform. +- [Security] Prevent non-staff users from searching usernames by email by abusing the logout URL. + +## v13.0.2 (2021-12-22) + +- [Security] Prevent non-staff users from searching usernames by email. + +## v13.0.1 (2021-12-20) + +- [Bugfix] Missing requirements file in `pip install tutor[full]`. + +## v13.0.0 (2021-12-20) + +- πŸ’₯[Improvement] Upgrade to Maple + - Install all official plugins as part of the `tutor[full]` package. + - Don't print error messages about loading plugins during autocompletion. + - Prompt for image building when upgrading from one release to the next. + - πŸ’₯ Allow concurrent logins to the LMS and the CMS. + - Add `tutor local start --skip-build` option to skip building Docker images. +- [Feature] Better support of Caddy as a load balancer in Kubernetes: + - Make it possible to start/stop a selection of resources with ``tutor k8s start/stop [names...]``. + - Make it easy to deploy an independent LoadBalancer by converting the caddy service to a ClusterIP when ``ENABLE_WEB_PROXY=false``. + - Add a ``app.kubernetes.io/component: loadbalancer`` label to the LoadBalancer service. + - Add ``app.kubernetes.io/name`` labels to all services. + - Preserve the LoadBalancer service in ``tutor k8s stop`` commands. + - Wait for the caddy deployment to be ready before running initialisation jobs. + - Fix running Caddy container in k8s, which should always be the case even if `ENABLE_WEB_PROXY` is false. +- [Security] On Kubernetes, convert all NodePort services to ClusterIP to guarantee network isolation from outside the cluster. +- πŸ’₯[Improvement] Move the Open edX forum to a [dedicated plugin](https://github.com/overhangio/tutor-forum/) (#450). +- πŸ’₯[Improvement] Drop Python 3.5 compatibility. +- πŸ’₯[Bugfix] No longer track the Tutor version number in resource labels (and label selectors, which breaks the update of Deployment resources), but instead do so in resource annotations. +- πŸ’₯[Improvement] Get rid of the "tutor-openedx" package, which is no longer supported. +- πŸ’₯[Improvement] Run all services as unprivileged containers, for better security. This has multiple consequences: + - The "openedx-dev" image is now built with `tutor dev dc build lms`. + - The "smtp" service now runs the "devture/exim-relay" Docker image, which is unprivileged. Also, the default SMTP port is now 8025. +- πŸ’₯[Feature] Get rid of the Nginx container and service, which is now replaced by Caddy. this has the following consequences: + - Patches "nginx-cms", "nginx-lms", "nginx-extra", "local-docker-compose-nginx-aliases" are replaced by "caddyfile-cms", "caddyfile-lms", "caddyfile", " local-docker-compose-caddy-aliases". + - Patches "k8s-deployments-nginx-volume-mounts", "k8s-deployments-nginx-volumes" were obsolete and are removed. + - The `NGINX_HTTP_PORT` setting is renamed to `CADDY_HTTP_PORT`. +- [Bugfix] Fix building of the `openedx` image on ARM64 due to missing `libgeos-dev` + +## v12.2.0 (2021-12-08) + +- [Bugfix] Fix incorrect "from" address in course bulk emails (see [pull request](https://github.com/openedx/edx-platform/pull/29001)). +- πŸ’₯[Improvement] Fail on incorrect image name argument in `images build/pull/push/printtag` commands. +- [Bugfix] Remove trailing slashes in docker-compose files for [compatibility with docker-compose v2 in WSL](https://github.com/docker/compose/issues/8558). +- [Improvement] `settheme` now works with the preview domain. +- [Feature] Allow specifying extra pip packages through config.yml. + +## v12.1.7 (2021-11-18) + +- [Security] Timed exam security fix [29347](https://github.com/openedx/edx-platform/pull/29347). +- [Feature] Add [tutor-richie](https://github.com/overhangio/tutor-richie) to the plugins that are bundled with the tutor binary. +- [Improvement] Make `tutor plugins list` print plugins sorted by name. +- [Improvement] Ignore Python plugins that cannot be loaded. +- [Bugfix] When configured with `RUN_FORUM: false`, omit forum-related [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/) from the manifests that `tutor k8s` generates. (#525) + +## v12.1.6 (2021-11-02) + +- [Improvement] Upgrade all services to open-release/lilac.3. +- [Feature] Make it possible to override job configuration in development: if they exist, `dev/docker-compose.jobs.yml` and `dev/docker-compose.jobs.override.yml` will be loaded when running jobs. +- [Improvement] Faster `tutor local start` by building only necessary images. + +## v12.1.5 (2021-10-25) + +- πŸ’₯[Improvement] Change the `settheme` command such that, by default, a custom theme is assigned to the LMS and the CMS, both in production and development mode. +- [Bugfix] Change the `local quickstart` command to check for adequate Docker memory allocation on macOS (#463) + +## v12.1.4 (2021-10-11) + +- [Feature] Add configuration setting `PREVIEW_LMS_BASE` for custom preview domain. +- [Improvement] Enable milestones application flag `MILESTONES_APP` and prerequisite courses feature flags `ENABLE_PREREQUISITE_COURSES` by default. + +## v12.1.3 (2021-09-28) + +- [Bugfix] Fix 500 error during user registration. +- [Bugfix] Fix Mongodb compatibility version upgrade when upgrading from Koa to Lilac. +- [Feature] Ability to pass [docker build options](https://docs.docker.com/engine/reference/commandline/build/#options) via ``--docker-arg`` option in ``tutor images build`` command. + +## v12.1.2 (2021-09-18) + +- [Bugfix] Fix (again) forum starting issue: "NoMethodError: undefined method 'encode' for nil:NilClass". + +## v12.1.1 (2021-09-17) + +- [Bugfix] Fix forum starting issue: "NoMethodError: undefined method 'encode' for nil:NilClass". + +## v12.1.0 (2021-09-17) + +- [Improvement] Make it easier to run edx-platform unit tests. +- [Bugfix] Fix segmentation fault during `tutor config save` on Mac OS M1 (#473). Thanks @ghassanmas! +- [Bugfix] Fix a bug that prevented connecting to external MongoDB instances. +- [Improvement] Make sure that the logo included in email notifications (including discussion responses) is the same as the site logo. +- [Bugfix] Install IPython directly from PyPI instead of installing it from source (the reason it was installed from source is no longer relevant). The effect of this shall speed up the process of building the openedx-dev Docker image. +- [Feature] Add "openedx-dockerfile-post-git-checkout" patch. +- [Improvement] In the "openedx" Docker images, convert git patches to cherry-picks for a cleaner source tree. +- πŸ’₯[Feature] Make it possible to override local job configuration. This deprecates the older model for running jobs which dates back from a long time ago. + +## v12.0.4 (2021-08-12) + +- [Security] Apply security patch [28442](https://github.com/openedx/edx-platform/pull/28442). + +## v12.0.3 (2021-08-10) + +- [Improvement] Upgrade all services to open-release/lilac.2. +- [Bugfix] Fix "`sh` is not a recognized command" in some plugins, including minio. +- [Improvement] Set the default contact mailing email address +- [Bugfix] Fix minio initialisation in Kubernetes. + +## v12.0.2 (2021-07-06) + +- [Bugfix] Fix "Invalid command argument" during upgrade from Koa to Lilac. +- [Bugfix] Fix MySQL initialisation in docker-compose==2.0.0beta4. +- [Improvement] Tutor is now published on PyPI as "tutor". + +## v12.0.1 (2021-06-22) + +- [Bugfix] Fix double pulling MongoDB image when upgrading from Koa to Lilac. +- [Improvement] Better logging during `plugins disable`. +- [Bugfix] Fix "upstream sent too big header" error during login of existing users after a Koa to Lilac upgrade. +- [Feature] Added the ability to skip `config.yml` file modification while running `tutor config save` command with `-e` or `--env-only` flag. +- [Feature] Add new config value `FORUM_MONGODB_DATABASE` to set the forum database name. + +## v12.0.0 (2021-06-09) + +- πŸ’₯[Improvement] Upgrade all services to open-release/lilac.master. +- πŸ’₯[Feature] Migrate Android app building and the WebUI frontend away from core Tutor and to dedicated plugins (see [TEP](https://discuss.overhang.io/c/community/tep/9)). The `DOCKER_IMAGE_ANDROID` setting is thus renamed to `ANDROID_DOCKER_IMAGE`. +- [Feature] Run `docker-compose build` as part of `tutor local start`. + +## v11.3.1 (2021-06-08) + +- [Improvement] Avoid permission issues in Kubernetes/Openshift for users who do not have the rights to edit their namespace. +- [Improvement] Better Kubernetes object creation. + +## v11.3.0 (2021-06-06) + +- πŸ’₯[Security] Disable python-evaluated input by default as we don't run codejail. +- [Bugfix] Fix missing discussion notifications. +- [Improvement] Better error logging when loading a plugin from an incompatible version. + +## v11.2.11 (2021-05-18) + +- [Feature] Add Redis database configuration for both cache and celery. + +## v11.2.10 (2021-05-17) + +- [Security] Apply Django security patches by upgrading from 2.2.20 to 2.2.23. +- [Bugfix] Fix video unit completion (see [pull request](https://github.com/openedx/edx-platform/pull/27230)). + +## v11.2.9 (2021-05-12) + +- [Bugfix] Fix crashing installation because of a major release of all Pallets projects. +- [Bugfix] Fix crash in `local quickstart -p` command. +- [Bugfix] Fix 502 error on request to lms with header larger than the maximum uwsgi buffer size. + +## v11.2.8 (2021-04-27) + +- [Bugfix] Fix parsing of YAML-formatted values in ``config save --set KEY=VALUE`` commands, in use for instance with Ecommerce. + +## v11.2.7 (2021-04-23) + +- [Security] Apply security patch [27394](https://github.com/openedx/edx-platform/pull/27394). +- [Feature] Add patches to extend python requirements installation process in openedx and openedx-dev Dockerfiles. +- [Improvement] Apply edx-platform patches during Docker image build using tutor patch 'openedx-dockerfile-git-patches-default'. + +## v11.2.6 (2021-04-09) + +- [Improvement] Upgrade all services to open-release/koa.3. +- [Feature] Make it possible to build the openedx Docker image with a custom openedx-i18n version with the ``--build-arg OPENEDX_I18N_VERSION=custom`` command line argument. + +## v11.2.5 (2021-03-30) + +- [Bugfix] Fix edge case where `PLUGINS` entry is null in config.yml. +- [Bugfix] Fix missing py2neo dependency in `images build openedx` (#411). + +## v11.2.4 (2021-03-17) + +- [Bugfix] Fix "Invalid Request" error during SAML authentication (thanks @Ant1x!). +- [Feature] Add `make pull-base-images` command to update base images. +- [Improvement] Annotate types all over the Tutor code base. +- [Bugfix] Fix parsing of YAML CLI arguments that include equal "=" signs. +- [Bugfix] Fix minor edge case in `long_to_base64` utility function. +- [Improvement] Add openedx patches to add settings during the build process. + +## v11.2.3 (2021-02-20) + +- [Bugfix] Make LMS celery workers actually process LMS tasks and not CMS tasks. + +## v11.2.2 (2021-02-17) + +- [Security] Apply security patch [26592](https://github.com/openedx/edx-platform/pull/26592). + +## v11.2.1 (2021-02-16) + +- [Bugfix] Actually persist Redis data. + +## v11.2.0 (2021-02-10) + +- [Bugfix] Upgrade all services to open-release/koa.2. + +## v11.1.5 (2021-02-09) + +- [Security] Apply security patch [26432](https://github.com/openedx/edx-platform/pull/26432). +- [Bugfix] Print warnings to stderr. + +## v11.1.4 (2021-02-04) + +- [Security] Apply security patch [26358](https://github.com/openedx/edx-platform/pull/26358). + +## v11.1.3 (2021-01-31) + +- [Security] Apply security patch [26253](https://github.com/openedx/edx-platform/pull/26253). + +## v11.1.2 (2021-01-29) + +- [Bugfix] Fix django setting value and static asset loading in openedx-dev image. + +## v11.1.1 (2021-01-20) + +- [Feature] Add a `tutor images build --target=...` argument for [multi-stage Docker builds](https://docs.docker.com/develop/develop-images/multistage-build/). +- [Feature] Create a test version of the openedx-dev Docker image for running edx-platform unit tests. +- [Security] Apply security patch [26112](https://github.com/openedx/edx-platform/pull/26112). +- [Bugfix] Fix `local exec` command which crashed with a `AttributeError`. + +## v11.1.0 (2021-01-13) + +- [Bugfix] Fix "Read-only file system" errors when running `tutor dev runserver` commands on Mac OS (again, see #392). +- [Feature] Automatically bind-mount volumes from the `volumes/` directory with the `--volume=/...` syntax. + +## v11.0.7 (2021-01-11) + +- [Security] Apply security patch [26029](https://github.com/openedx/edx-platform/pull/26029). + +## v11.0.6 (2021-01-05) + +- [Security] Apply security patch [25974](https://github.com/openedx/edx-platform/pull/25974). + +## v11.0.5 (2020-12-30) + +- [Bugfix] Fix "Invalid type for parameter ContentType" error on js upload in Scorm xblock ([openedx-scorm-xblock #16](https://github.com/overhangio/openedx-scorm-xblock/issues/16)). ## v11.0.4 (2020-12-17) -- [Bugfix] Fix "Read-only file system" errors when running `tutor dev runserver` commands on Mac OS. (#392) +- [Bugfix] Fix "Read-only file system" errors when running `tutor dev runserver` commands on Mac OS (#392). ## v11.0.3 (2020-12-15) -- [Bugfix] Fix upload of video transcripts to S3 (again) +- [Bugfix] Fix upload of video transcripts to S3 (again). ## v11.0.2 (2020-12-12) -- [Bugfix] Fix missing celery tasks from edx-platform (see [upstream PR](https://github.com/edx/edx-platform/pull/25840)) +- [Bugfix] Fix missing celery tasks from edx-platform (see [upstream PR](https://github.com/openedx/edx-platform/pull/25840)). ## v11.0.1 (2020-12-10) -- [Security] Apply security patch [25834](https://github.com/edx/edx-platform/pull/25834) -- [Bugfix] Fix Android apk directory mount path +- [Security] Apply security patch [25834](https://github.com/openedx/edx-platform/pull/25834). +- [Bugfix] Fix Android apk directory mount path. ## v11.0.0 (2020-12-09) -- πŸ’₯[Improvement] Upgrade Open edX to Koa +- πŸ’₯[Improvement] Upgrade Open edX to Koa. - πŸ’₯ Setting changes: - The ``ACTIVATE_HTTPS`` setting was renamed to ``ENABLE_HTTPS``. - Other ``ACTIVATE_*`` variables were all renamed to ``RUN_*``. @@ -31,33 +872,33 @@ Note: Breaking changes between versions are indicated by "πŸ’₯". - The ``NGINX_HTTPS_PORT`` setting is deprecated. - Architectural changes: - Use Caddy as a web proxy for automated SSL/TLS certificate generation: - - Nginx no longer listens to port 443 for https traffic + - Nginx no longer listens to port 443 for HTTPS traffic. - The Caddy configuration file comes with a new ``caddyfile`` patch for much simpler SSL/TLS management. - Configuration files for web proxies are no longer provided. - Kubernetes deployment no longer requires setting up a custom Ingress resource or custom manager. - - Gunicorn and Whitenoise are replaced by uwsgi: this increases boostrap performance and makes it no longer necessary to mount media folders in the Nginx container. - - Replace memcached and rabbitmq by redis. + - Gunicorn and Whitenoise are replaced with uwsgi: this increases bootstrap performance and makes it no longer necessary to mount media folders in the Nginx container. + - Replace Memcached and RabbitMQ with Redis. - Additional features: - Make it possible to disable all plugins at once with ``plugins disable all``. - - Add ``tutor k8s wait`` command to wait for a pod to become ready - - Faster, more reliable static assets with local memory caching + - Add ``tutor k8s wait`` command to wait for a pod to become ready. + - Faster, more reliable static assets with local memory caching. - Deprecation: proxy files for Apache and Nginx are no longer provided out of the box. - Removed plugin `{{ patch (...) }}` statements: - "https-create", "k8s-ingress-rules", "k8s-ingress-tls-hosts": these are no longer necessary. Instead, declare your app in the "caddyfile" patch. - - "local-docker-compose-nginx-volumes": this patch was primarily used to serve media assets. The recommended is now to serve assets with uwsgi. + - "local-docker-compose-nginx-volumes": this patch was primarily used to serve media assets. The recommended solution is now to serve assets with uwsgi. ## v10.5.3 (2020-12-09) -- [Security] Apply upstream edx-platform [security patch](https://github.com/edx/edx-platform/pull/25782) +- [Security] Apply upstream edx-platform [security patch](https://github.com/openedx/edx-platform/pull/25782). ## v10.5.2 (2020-12-07) -- [Improvement] Increase the timeout of the gunicorn worker command in openedx Dockerfile +- [Improvement] Increase the timeout of the gunicorn worker command in openedx Dockerfile. ## v10.5.1 (2020-11-30) -- [Bugfix] Fix Dockerfile parsing on Windows -- [Improvement] Add option to patch lms and cms nginx server blocks +- [Bugfix] Fix Dockerfile parsing on Windows. +- [Improvement] Add option to patch lms and cms Nginx server blocks. ## v10.5.0 (2020-11-19) @@ -66,11 +907,11 @@ Note: Breaking changes between versions are indicated by "πŸ’₯". - πŸ’₯[Improvement] Remove the undocumented `local run_hook` command. Instead, run `local init --limit=...`. - πŸ’₯[Improvement] Remove `tutor android pullimage` command. Instead, run `tutor images pull android`. - [Bugfix] Fix `config save` on Windows. -- [Bugfix] Fix platform checking in user ID function +- [Bugfix] Fix platform checking in user ID function. ## v10.4.1 (2020-11-11) -- [Bugfix] Fix dependency error during `pip install tutor` due to urllib3 incompatibility +- [Bugfix] Fix dependency error during `pip install tutor` due to urllib3 incompatibility. - [Bugfix] Fix user ID checking under Windows. - [Bugfix] Fix template rendering for Windows users. - [Improvement] Switch to `bcrypt` for htpasswd password generation, for better portability on Windows. @@ -82,536 +923,536 @@ Note: Breaking changes between versions are indicated by "πŸ’₯". curl -fsSL https://overhang.io/tutor/ami/upgrade.sh | sh -- [Improvement] Mount config and setting files in read-only mode +- [Improvement] Mount config and setting files in read-only mode. - πŸ’₯[Improvement] Enable unit completion tracking by default. -- [Bugfix] Run `apt update` before `apt install` when installing deps in the openedx Dockerfile +- [Bugfix] Run `apt update` before `apt install` when installing deps in the openedx Dockerfile. ## v10.3.1 (2020-10-16) -- [Improvement] Make all commands considerably faster -- [Improvement] Make it easier to override Mongodb connection parameters -- [Bugfix] Add support for .woff and .woff2 font files in themes (thanks @mrtndwrd!) +- [Improvement] Make all commands considerably faster. +- [Improvement] Make it easier to override Mongodb connection parameters. +- [Bugfix] Add support for .woff and .woff2 font files in themes (thanks @mrtndwrd!). ## v10.3.0 (2020-10-13) -- πŸ’₯[Improvement] Simplify CORS configuration +- πŸ’₯[Improvement] Simplify CORS configuration. ## v10.2.4 (2020-10-06) -- [Bugfix] Fix Apache proxy configuration when HTTPS is enabled (#370) +- [Bugfix] Fix Apache proxy configuration when HTTPS is enabled (#370). ## v10.2.3 (2020-10-02) -- [Feature] Add ``images printtag`` command -- [Improvement] Make it possible to override individual contact email addresses with plugins -- [Bugfix] Replace "no-reply@LMS_BASE" email address by regular contact email address -- [Bugfix] Disable learner records globally by default -- [Improvement] Upgrade to the latest release of MySQL 5.6 -- [Improvement] Non-plugin settings added by "set" directives are now automatically removed when the plugin is disabled (#241) +- [Feature] Add ``images printtag`` command. +- [Improvement] Make it possible to override individual contact email addresses with plugins. +- [Bugfix] Replace "no-reply@LMS_BASE" email address by regular contact email address. +- [Bugfix] Disable learner records globally by default. +- [Improvement] Upgrade to the latest release of MySQL 5.6. +- [Improvement] Non-plugin settings added by "set" directives are now automatically removed when the plugin is disabled (#241). ## v10.2.2 (2020-09-05) -- [Improvement] Add CORS basic configuration to LMS for subdomains of the LMS -- [Feature] Add support for `images build --add-host` option (thanks @grinderz!) -- [Bugfix] Fix podman compatibility by replacing `docker-compose rm` command by `docker-compose stop` when stopping containers -- [Improvement] Improve plugin data deletion -- [Improvement] Introduce the `OPENEDX_COMMON_VERSION` setting -- [Bugfix] Make it possible to run init jobs without starting the entire platform -- [Improvement] Reduce "openedx" Docker image size with static asset de-duplication +- [Improvement] Add CORS basic configuration to LMS for subdomains of the LMS. +- [Feature] Add support for `images build --add-host` option (thanks @grinderz!). +- [Bugfix] Fix podman compatibility by replacing `docker-compose rm` command with `docker-compose stop` when stopping containers. +- [Improvement] Improve plugin data deletion. +- [Improvement] Introduce the `OPENEDX_COMMON_VERSION` setting. +- [Bugfix] Make it possible to run init jobs without starting the entire platform. +- [Improvement] Reduce "openedx" Docker image size with static asset de-duplication. ## v10.2.1 (2020-08-27) -- [Bugfix] Upgrade all services to open-release/juniper.3 -- [Bugfix] Fix upload of video transcripts to S3 -- [Improvement] Memorize whether the user is running a production platform during interactive configuration +- [Bugfix] Upgrade all services to open-release/juniper.3. +- [Bugfix] Fix upload of video transcripts to S3. +- [Improvement] Memorize whether the user is running a production platform during interactive configuration. ## v10.2.0 (2020-08-16) -- [Bugfix] Fix incorrect loading of some resources from localhost:18000 in development -- [Bugfix] Fix Samesite=None Secure=False cookie error for users accessing the LMS with the latest release of Google Chrome -- [Security] Apply javascript security patch ([pull request](https://github.com/edx/edx-platform/pull/24762)) -- [Bugfix] Fix "FileError" on Scorm package upload in Scorm XBlock -- πŸ’₯[Improvement] Serve openedx static assets with [whitenoise](http://whitenoise.evans.io/en/stable/) instead of nginx. This removes the `k8s-deployments-nginx-init-containers` patch. Plugins are encouraged to implement static asset serving with Whitenoise as well. -- [Bugfix] Fix dependency on mysql service when mysql is not activated -- [Improvement] Improve openedx Docker image build time and size with multi-stage build -- πŸ’₯[Feature] Get rid of outdated sysadmin dashboard in LMS at /sysadmin +- [Bugfix] Fix incorrect loading of some resources from localhost:18000 in development. +- [Bugfix] Fix Samesite=None Secure=False cookie error for users accessing the LMS with the latest release of Google Chrome. +- [Security] Apply javascript security patch ([pull request](https://github.com/openedx/edx-platform/pull/24762)). +- [Bugfix] Fix "FileError" on Scorm package upload in Scorm XBlock. +- πŸ’₯[Improvement] Serve openedx static assets with [whitenoise](http://whitenoise.evans.io/en/stable/) instead of Nginx. This removes the `k8s-deployments-nginx-init-containers` patch. Plugins are encouraged to implement static asset serving with Whitenoise as well. +- [Bugfix] Fix dependency on MySQL service when MySQL is not activated. +- [Improvement] Improve openedx Docker image build time and size with the multi-stage build. +- πŸ’₯[Feature] Get rid of outdated sysadmin dashboard in LMS at /sysadmin. ## v10.1.0 (2020-07-23) -- [Security] Apply edx-platform upstream xss security fixes ([pull request](https://github.com/edx/edx-platform/pull/24568)) +- [Security] Apply edx-platform upstream xss security fixes ([pull request](https://github.com/openedx/edx-platform/pull/24568)). - πŸ’₯[Feature] Make it possible to override the docker registry for just a few services by setting `DOCKER_IMAGE_SERVICENAME` values. ## v10.0.11 (2020-07-16) -- [Feature] Upgrade all repositories to open-release/juniper.3 -- [Bugfix] Fix `reload-gunicorn` command -- [Feature] Enable sysadmin dashboard in LMS at /sysadmin +- [Feature] Upgrade all repositories to open-release/juniper.3. +- [Bugfix] Fix `reload-gunicorn` command. +- [Feature] Enable sysadmin dashboard in LMS at /sysadmin. ## v10.0.10 (2020-07-01) -- [Bugfix] Fix pycontracts installation error when building openedx Docker image -- [Bugfix] Fix access to dicussion forum in development mode +- [Bugfix] Fix pycontracts installation error when building openedx Docker image. +- [Bugfix] Fix access to the discussion forum in development mode. ## v10.0.9 (2020-07-01) -- [Bugfix] Share grade download settings between the LMS and the CMS +- [Bugfix] Share grade download settings between the LMS and the CMS. ## v10.0.8 (2020-06-23) -- [Bugfix] Fix android user creation during init -- [Bugfix] Fix undefined settings in k8s scripts, such as `createuser` +- [Bugfix] Fix android user creation during init. +- [Bugfix] Fix undefined settings in k8s scripts, such as `createuser`. ## v10.0.7 (2020-06-22) -- [Bugfix] Fix TypeError on viewing xblock -- [Bugfix] Fix authentication in Android mobile application +- [Bugfix] Fix TypeError on viewing xblock. +- [Bugfix] Fix authentication in Android mobile application. ## v10.0.6 (2020-06-22) -- [Bugfix] Fix unsent activation emails and other asynchronous tasks +- [Bugfix] Fix unsent activation emails and other asynchronous tasks. ## v10.0.5 (2020-06-21) -- [Security] Apply edx-platform upstream xss security fixes ([pull request](https://github.com/edx/edx-platform/pull/24258)) +- [Security] Apply edx-platform upstream xss security fixes ([pull request](https://github.com/openedx/edx-platform/pull/24258)). ## v10.0.4 (2020-06-19) -- [Bugfix] Fix broken `tutor ui` command in binary +- [Bugfix] Fix broken `tutor ui` command in binary. ## v10.0.3 (2020-06-19) -- [Bugfix] Fix error on android user creation +- [Bugfix] Fix error on android user creation. ## v10.0.2 (2020-06-17) -- [Bugfix] Fix crash when viewing problem in LMS -- [Bugfix] Fix missing webpack-stats.json in openedx Docker image +- [Bugfix] Fix crash when viewing the problem in LMS. +- [Bugfix] Fix missing webpack-stats.json in openedx Docker image. ## v10.0.1 (2020-06-15) -- [Bugfix] Fix KeyError when running ``local quickstart`` for the first time +- [Bugfix] Fix KeyError when running ``local quickstart`` for the first time. ## v10.0.0 (2020-06-15) -- πŸ’₯[Improvement] Upgrade to Juniper 🍾 -- [Bugfix] Fix nginx resolver address to address container restarts -- [Feature] Add `--limit=myplugin` option to `init` commands to limit execution of initialisation to certain services and plugins +- πŸ’₯[Improvement] Upgrade to Juniper 🍾. +- [Bugfix] Fix Nginx resolver address to address container restarts. +- [Feature] Add `--limit=myplugin` option to `init` commands to limit the execution of initialisation to certain services and plugins. ## v3.12.6 (2020-06-01) -- [Improvement] Add `dig`, `ping` utilities to openedx-dev Docker image -- [Bugfix] Resolve "Can't connect to MySQL server" on init -- [Improvement] Make it possible to customize the MySQL root username, for connecting to external MySQL databases +- [Improvement] Add `dig`, `ping` utilities to openedx-dev Docker image. +- [Bugfix] Resolve "Can't connect to MySQL server" on init. +- [Improvement] Make it possible to customise the MySQL root username, for connecting to external MySQL databases. ## v3.12.5 (2020-05-20) -- [Improvement] Upgrade Android app to v2.21.1 and enable many features, such as downloading videos to SD card. Thanks for the help @ejklock! -- [Bugfix] Fix Android app crash when accessing course +- [Improvement] Upgrade Android app to v2.21.1 and enable many features, such as downloading videos to SD card. Thanks for the help @ejklock!. +- [Bugfix] Fix Android app crash when accessing the course. ## v3.12.4 (2020-05-18) -- [Improvement] Add ability to rescore SCORM units -- [Bugfix] Fix scoring of graded SCORM units +- [Improvement] Add ability to rescore SCORM units. +- [Bugfix] Fix scoring of graded SCORM units. - [Improvement] Increase maximum uploaded file size in the CMS from 10 to 100 Mb. ## v3.12.3 (2020-05-05) -- [Security] Apply most recent edx-platform [security patches](https://discuss.openedx.org/t/security-patch-for-edit-chapter-xss-lint-issues/2030) +- [Security] Apply most recent edx-platform [security patches](https://discuss.openedx.org/t/security-patch-for-edit-chapter-xss-lint-issues/2030). ## v3.12.2 (2020-04-29) -- [Bugfix] Fix oauth2 authentication with 3rd-party services, such as discovery -- [Bugfix] Upgrade scorm xblock to solve caching issue +- [Bugfix] Fix oauth2 authentication with 3rd-party services, such as discovery. +- [Bugfix] Upgrade scorm xblock to solve caching issue. ## v3.12.1 (2020-04-27) -- [Improvement] Increase max upload allowed size to 250Mb in CMS +- [Improvement] Increase max upload allowed size to 250Mb in CMS. ## v3.12.0 (2020-04-26) -- πŸ’₯[Improvement] Do not deploy an ingress or SSL/TLS certificate issuer ressource by default in Kubernetes -- [Improvement] Fix tls certificate generation in k8s +- πŸ’₯[Improvement] Do not deploy an ingress or SSL/TLS certificate issuer resource by default in Kubernetes. +- [Improvement] Fix tls certificate generation in k8s. - πŸ’₯[Improvement] Radically change the way jobs are run: we no longer "exec", but instead run a dedicated container. -- πŸ’₯[Improvement] Upgrade k8s certificate issuer to cert-manager.io/v1alpha2 -- [Feature] Add SCORM XBlock to default openedx docker image +- πŸ’₯[Improvement] Upgrade k8s certificate issuer to cert-manager.io/v1alpha2. +- [Feature] Add SCORM XBlock to default openedx docker image. ## v3.11.12 (2020-04-16) -- [Feature] Make it easy to add custom translation strings to the openedx Docker image -- [Improvement] Make it possible to rely on a different npm registry for faster image building +- [Feature] Make it easy to add custom translation strings to the openedx Docker image. +- [Improvement] Make it possible to rely on a different npm registry for faster image building. ## v3.11.11 (2020-04-15) -- [Bugfix] Make sure all emails (including "password reset") are properly saved to a local file in development mode (#315) -- [Improvement] Add `openedx-development-settings` patch to patch the LMS and the CMS simultaneously in development -- [Bugfix] Fix missing celery tasks in the CMS +- [Bugfix] Make sure all emails (including "password reset") are properly saved to a local file in development mode (#315). +- [Improvement] Add `openedx-development-settings` patch to patch the LMS and the CMS simultaneously in development. +- [Bugfix] Fix missing celery tasks in the CMS. ## v3.11.10 (2020-04-14) -- [Bugfix] Fix updates to the course structure in the LMS (#302) +- [Bugfix] Fix updates to the course structure in the LMS (#302). ## v3.11.9 (2020-04-07) -- [Bugfix] Fix Android app build for domain names that include a dash ("-") +- [Bugfix] Fix Android app build for domain names that include a dash ("-"). ## v3.11.8 (2020-04-06) -- [Feature] Add `encrypt` template filter to conveniently add htpasswd-based authentication to nginx -- [Bugfix] Fix "missing tty" during init in cron jobs +- [Feature] Add `encrypt` template filter to conveniently add htpasswd-based authentication to Nginx. +- [Bugfix] Fix "missing tty" during init in cron jobs. ## v3.11.7 (2020-04-01) -- [Bugfix] Fix missing js translations -- [Bugfix] Fix tls certificate generation in cron jobs +- [Bugfix] Fix missing js translations. +- [Bugfix] Fix tls certificate generation in cron jobs. ## v3.11.6 (2020-03-13) -- [Bugfix] Fix "Unable to resolve dependency" error during forum initialisation -- [Feature] Add `settheme` command to easily assign a theme to a domain name -- [Improvement] Modify nginx access logs to include request scheme and server name (plugin developers should use the "tutor" log format) -- [Bugfix] Fix DNS resolution of restarted service -- [Feature] Restart multiple services with `local restart` -- [Feature] Make it possible to easily reload openedx gunicorn process with `tutor local exec lms reload-gunicorn` -- [Improvement] Rename lms/cms_worker to lms/cms-worker in local deployment -- [Improvement] Add the management plugin to the rabbitmq container -- [Improvement] Make it possible to run an Elasticsearch service on https +- [Bugfix] Fix "Unable to resolve dependency" error during forum initialisation. +- [Feature] Add `settheme` command to easily assign a theme to a domain name. +- [Improvement] Modify Nginx access logs to include request scheme and server name (plugin developers should use the "tutor" log format). +- [Bugfix] Fix DNS resolution of restarted service. +- [Feature] Restart multiple services with `local restart`. +- [Feature] Make it possible to easily reload the openedx gunicorn process with `tutor local exec lms reload-gunicorn`. +- [Improvement] Rename lms/cms_worker to lms/cms-worker in local deployment. +- [Improvement] Add the management plugin to the RabbitMQ container. +- [Improvement] Make it possible to run an Elasticsearch service on HTTPS. ## v3.11.5 (2020-02-27) -- [Improvement] Switch edx-platform from open-release/ironwood.2 tag to the open-release/ironwood.master branch -- [Security] Upgrade django to 1.11.28 -- [Improvement] Make it possible to configure the elasticsearch heap size -- [Bugfix] Fix broken elasticsearch environment variables +- [Improvement] Switch edx-platform from open-release/ironwood.2 tag to the open-release/ironwood.master branch. +- [Security] Upgrade django to 1.11.28. +- [Improvement] Make it possible to configure the Elasticsearch heap size. +- [Bugfix] Fix broken Elasticsearch environment variables. - [Improvement] Restore more recent Android app version (#289). ## v3.11.4 (2020-02-16) -- [Bugfix] Fix auth problem in Android app (#289) +- [Bugfix] Fix auth problem in Android app (#289). -## 3.11.3 (2020-01-21) +## v3.11.3 (2020-01-21) -- [Bugfix] Fix incorrectly parsed empty strings for `config save --set ...=...` commands +- [Bugfix] Fix incorrectly parsed empty strings for `config save --set ...=...` commands. -## 3.11.2 (2020-01-17) +## v3.11.2 (2020-01-17) -- [Bugfix] Make sure `docker-compose.override.yml` are loaded in dev and local contexts +- [Bugfix] Make sure `docker-compose.override.yml` is loaded in dev and local contexts. -## 3.11.1 (2020-01-16) +## v3.11.1 (2020-01-16) -- [Feature] Add `config render` command +- [Feature] Add `config render` command. -## 3.11.0 (2020-01-14) +## v3.11.0 (2020-01-14) -- [Feature] Add support for simple, YAML-based plugins -- πŸ’₯[Improvement] The output of `plugins list` now includes plugin version numbers +- [Feature] Add support for simple, YAML-based plugins. +- πŸ’₯[Improvement] The output of `plugins list` now includes plugin version numbers. -## 3.10.1 (2020-01-13) +## v3.10.1 (2020-01-13) -- [Improvement] Explicitely point to docker.io images, when necessary, for [podman](https://podman.io/) compatibility +- [Improvement] Explicitly point to docker.io images, when necessary, for [podman](https://podman.io/) compatibility. -## 3.10.0 (2020-01-10) +## v3.10.0 (2020-01-10) -- [Bugfix] Fix oauth authentication in dev mode -- [Improvement] Upgrade to the 3.7 docker-compose syntax -- [Improvement] The `dev runserver` command can now be run for just any service -- πŸ’₯[Feature] `dev run/exec` commands now support generic options which are passed to docker-compose. Consequently, defining the `TUTOR_EDX_PLATFORM_PATH` environment variable no longer works. Instead, users are encouraged to explicitely pass the `-v` option, define a command alias or create a `docker-compose.override.yml` file. +- [Bugfix] Fix OAuth authentication in dev mode. +- [Improvement] Upgrade to the 3.7 docker-compose syntax. +- [Improvement] The `dev runserver` command can now be run for just any service. +- πŸ’₯[Feature] `dev run/exec` commands now support generic options which are passed to docker-compose. Consequently, defining the `TUTOR_EDX_PLATFORM_PATH` environment variable no longer works. Instead, users are encouraged to explicitly pass the `-v` option, define a command alias or create a `docker-compose.override.yml` file. -## 3.9.1 (2020-01-08) +## v3.9.1 (2020-01-08) -- [Improvement] Make it possible to override the project name in development mode -- [Bugfix] Fix user switching in development mode -- [Bugfix] Fix "k8s quickstart" crash +- [Improvement] Make it possible to override the project name in development mode. +- [Bugfix] Fix user switching in development mode. +- [Bugfix] Fix "k8s quickstart" crash. -## 3.9.0 (2019-12-21) +## v3.9.0 (2019-12-21) -- [Security] Apply django 1.11.21 -> 1.11.27 security patches -- [Bugfix] Fix running dev image with `sudo` -- [Improvement] Add `cms/lms-env-features` patches (#276) -- [Feature] Add plugin subcommands -- πŸ’₯[Improvement] Move ``-r/--root`` option to parent command level -- [Bugfix] Fix course about page visibility -- [Improvement] Print gunicorn access logs in the console -- πŸ’₯[Improvement] Get rid of the `indexcourses` and `portainer` command (#269) +- [Security] Apply django 1.11.21 -> 1.11.27 security patches. +- [Bugfix] Fix running dev image with `sudo`. +- [Improvement] Add `cms/lms-env-features` patches (#276). +- [Feature] Add plugin subcommands. +- πŸ’₯[Improvement] Move ``-r/--root`` option to parent command level. +- [Bugfix] Fix course about page visibility. +- [Improvement] Print gunicorn access logs in the console. +- πŸ’₯[Improvement] Get rid of the `indexcourses` and `portainer` command (#269). -## 3.8.0 (2019-11-22) +## v3.8.0 (2019-11-22) -- [Improvement] Add `k8s-deployments-nginx-volume-mounts` patch -- [Bugfix] Fix running forum locally when both elasticsearch and mongodb are not activated (#266) -- [Bugfix] Fix MongoDb url in forum when running separate service (#267) +- [Improvement] Add `k8s-deployments-nginx-volume-mounts` patch. +- [Bugfix] Fix running forum locally when both Elasticsearch and MongoDB are not activated (#266). +- [Bugfix] Fix MongoDB URL in the forum when running a separate service (#267). - πŸ’₯[Improvement] Better `dev` commands, with dedicated development docker image. One of the consequences is that the `dev watchthemes` command is replaced by `dev run lms watchthemes`. -- [Improvement] `images` commands now accept multiple `image` arguments +- [Improvement] `images` commands now accept multiple `image` arguments. -## 3.7.4 (2019-10-19) +## v3.7.4 (2019-10-19) -- [Bugfix] Fix missing requirements file in pypi package (#261) -- [Improvement] Add missing cms/lms production/development setting patches -- [Improvement] Allow SigV4 authentication for video upload to S3 -- [Bugfix] Fix cms development settings +- [Bugfix] Fix missing requirements file in PyPI package (#261). +- [Improvement] Add missing cms/lms production/development setting patches. +- [Improvement] Allow SigV4 authentication for video upload to S3. +- [Bugfix] Fix cms development settings. -## 3.7.3 (2019-10-17) +## v3.7.3 (2019-10-17) -- [Improvement] Upgrade openedx locales to Ironwood +- [Improvement] Upgrade openedx locales to Ironwood. -## 3.7.2 (2019-10-09) +## v3.7.2 (2019-10-09) -- [Improvement] Properly set studio title (#246) -- [Improvement] Enable Mysql strict mode +- [Improvement] Properly set studio title (#246). +- [Improvement] Enable Mysql strict mode. -## 3.7.1 (2019-10-06) +## v3.7.1 (2019-10-06) -- [Feature] Introduce tutor docker image +- [Feature] Introduce tutor docker image. - [Feature] Introduce `local hook` command for plugin development. -- [Bugfix] Persist `private.txt` file between two runs of `config save`. (#247) +- [Bugfix] Persist `private.txt` file between two runs of `config save` (#247). - [Improvement] Added configuration values to limit the number of gunicorn workers for the LMS and CMS. -## 3.7.0 (2019-09-03) +## v3.7.0 (2019-09-03) -- πŸ’₯[Improvement] Get rid of mysql-client container -- [Improvement] Add "local-docker-compose-lms/cms-dependencies" plugin patches -- [Improvement] Use "exec" instead of "run" to initialise local platform +- πŸ’₯[Improvement] Get rid of mysql-client container. +- [Improvement] Add "local-docker-compose-lms/cms-dependencies" plugin patches. +- [Improvement] Use "exec" instead of "run" to initialise local platform. -## 3.6.3 (2019-08-31) +## v3.6.3 (2019-08-31) -- [Security] Fix CustomTagModule mako template injection -- [Improvement] Move all plugins outside of the tutor repo -- [Bugfix/Improvement] Add all plugins (with data) into binary bundle (#242) +- [Security] Fix CustomTagModule mako template injection. +- [Improvement] Move all plugins outside of the tutor repo. +- [Bugfix/Improvement] Add all plugins (with data) into binary bundle (#242). -## 3.6.2 (2019-08-07) +## v3.6.2 (2019-08-07) -- [Bugfix] Fix missing templates in bundled plugins -- [Bugfix] Enable html certificate view +- [Bugfix] Fix missing templates in bundled plugins. +- [Bugfix] Enable HTML certificate view. -## 3.6.1 (2019-07-27) +## v3.6.1 (2019-07-27) -- [Bugfix] Fix missing patches from minio plugin (thanks @Wejie!) +- [Bugfix] Fix missing patches from minio plugin (thanks @Wejie!). -## 3.6.0 (2019-07-11) +## v3.6.0 (2019-07-11) -- [Feature] Modify ``createuser`` commands to define a password from the command line -- [Improvement] Better yaml value parsing from command line -- [Feature] Add `dev exec` command -- [Bugfix] Fix incorrect notes settings definition -- [Improvement] Make it possible to start/stop/reboot a selection of services -- [Improvement] Add `local/k8s reboot` commands -- [Improvement] Add `-U/--unset` option to `config save` -- [Bugfix] Fix insecure static asset loading when web proxy is enabled -- [Improvement] Rename `SECRET_KEY` configuration parameter to `OPENEDX_SECRET_KEY` -- [Improvement] Add support for SSL and TLS in external SMTP server (#231) -- [Bugfix] Fix missing video transcripts in LMS (#229) -- [Improvement] Make it possible to enable/disable multiple plugins at once -- [Improvement] Add a few local and k8s patches for plugins +- [Feature] Modify ``createuser`` commands to define a password from the command line. +- [Improvement] Better YAML value parsing from the command line. +- [Feature] Add `dev exec` command. +- [Bugfix] Fix incorrect notes settings definition. +- [Improvement] Make it possible to start/stop/reboot a selection of services. +- [Improvement] Add `local/k8s reboot` commands. +- [Improvement] Add `-U/--unset` option to `config save`. +- [Bugfix] Fix insecure static asset loading when web proxy is enabled. +- [Improvement] Rename `SECRET_KEY` configuration parameter to `OPENEDX_SECRET_KEY`. +- [Improvement] Add support for SSL and TLS in external SMTP server (#231). +- [Bugfix] Fix missing video transcripts in LMS (#229). +- [Improvement] Make it possible to enable/disable multiple plugins at once. +- [Improvement] Add a few local and k8s patches for plugins. -## 3.5.3 (2019-07-05) +## v3.5.3 (2019-07-05) -- [Bugfix] Add pre-init hook for correct initialisation of minio +- [Bugfix] Add pre-init hook for correct initialisation of minio. -## 3.5.2 (2019-07-05) +## v3.5.2 (2019-07-05) -- [Security] Apply certificate XSS security patch +- [Security] Apply certificate XSS security patch. -## 3.5.1 (2019-07-04) +## v3.5.1 (2019-07-04) -- [Feature] Make it possible for plugins to patch the build templates -- [Improvement] Move Xqueue and Student notes to a dedicated plugin +- [Feature] Make it possible for plugins to patch the build templates. +- [Improvement] Move Xqueue and Student notes to a dedicated plugin. -## 3.4.3 (2019-06-24) +## v3.4.3 (2019-06-24) -- [Bugfix] Fix missing password values from generated configuration +- [Bugfix] Fix missing password values from generated configuration. -## 3.4.2 (2019-06-23) +## v3.4.2 (2019-06-23) -- [Bugfix] Fix incorrect settings during lms/cms init (#224) +- [Bugfix] Fix incorrect settings during lms/cms init (#224). -## 3.4.1 (2019-06-23) +## v3.4.1 (2019-06-23) -- [Bugfix] Fix install from pypi -- [Improvement] Get rid of kubernetes python package dependency +- [Bugfix] Fix install from PyPI. +- [Improvement] Get rid of Kubernetes python package dependency. -## 3.4.0 (2019-06-17) +## v3.4.0 (2019-06-17) -- [Feature] Creation of a plugin system -- [Feature] Kubernetes support out of beta -- [Improvement] Switch to pinned image tags for easier upgrades +- [Feature] Creation of a plugin system. +- [Feature] Kubernetes support out of beta. +- [Improvement] Switch to pinned image tags for easier upgrades. - πŸ’₯[Improvement] Remove the `-y/--yes` option: `tutor config save` is now non-interactive by default. Use `-i/--interactive` to force interactive mode. -- πŸ’₯[Improvement] Replace the `databases` command by `init`. -- [Improvement] Upgrade to ironwood.2 -- [Improvement] Add `-y/--yes` option to `local quickstart` for non-interactive quickstart -- [Improvement] Persist LMS/CMS logs to disk by default (with collaboration from @silviot πŸ’ͺ) -- [Bugfix] Fix installing a locally cloned requirement repository -- [Improvement] Add `--no-cache` option to `images build` -- [Improvement] Make it possible to configure the notes service hostname -- [Improvement] Better, more robust MySQL initialisation +- πŸ’₯[Improvement] Replace the `databases` command with `init`. +- [Improvement] Upgrade to ironwood.2. +- [Improvement] Add `-y/--yes` option to `local quickstart` for non-interactive quickstart. +- [Improvement] Persist LMS/CMS logs to disk by default (with collaboration from @silviot πŸ’ͺ). +- [Bugfix] Fix installing a locally cloned requirement repository. +- [Improvement] Add `--no-cache` option to `images build`. +- [Improvement] Make it possible to configure the notes service hostname. +- [Improvement] Better, more robust MySQL initialisation. -## 3.3.10 (2019-05-15) +## v3.3.10 (2019-05-15) -- [Bugfix] Fix boolean configuration choices +- [Bugfix] Fix boolean configuration choices. -## 3.3.9 (2019-05-13) -- [Improvement] Add `local exec` command for running commands inside existing containers -- [Bugfix] Fix triple display of courses in LMS search (upstream patch, #189) +## v3.3.9 (2019-05-13) +- [Improvement] Add `local exec` command for running commands inside existing containers. +- [Bugfix] Fix triple display of courses in LMS search (upstream patch, #189). -## 3.3.8 (2019-04-28) +## v3.3.8 (2019-04-28) -- [Bugfix] Reduce the number of gunicorn workers to address RAM usage (#207) +- [Bugfix] Reduce the number of gunicorn workers to address RAM usage (#207). -## 3.3.7 (2019-04-28) +## v3.3.7 (2019-04-28) -- [Bugfix] Fix "Timeout after 20s" on running migrations +- [Bugfix] Fix "Timeout after 20s" on running migrations. -## 3.3.6 (2019-04-27) +## v3.3.6 (2019-04-27) -- [Bugfix] Fix KeyError on first quickstart -- [Improvement] De-duplication of prod/dev settings. Thanks @silviot! 😺 +- [Bugfix] Fix KeyError on the first quickstart. +- [Improvement] De-duplication of prod/dev settings. Thanks, @silviot! 😺. -## 3.3.5 (2019-04-22) +## v3.3.5 (2019-04-22) -- [Feature] Pluggable LMS/CMS/forum -- [Improvement] Safer environment overwrite. Thanks @silviot! πŸ‘ -- [Security] Fix Jinja2 vulnerability -- [Improvement] Improve CLI cold start performance -- [Improvement] Allow uppercase "Y" and "N" as answers to boolean questions +- [Feature] Pluggable LMS/CMS/forum. +- [Improvement] Safer environment overwrite. Thanks, @silviot! πŸ‘. +- [Security] Fix Jinja2 vulnerability. +- [Improvement] Improve CLI cold start performance. +- [Improvement] Allow uppercase "Y" and "N" as answers to boolean questions. -## 3.3.4 (2019-04-09) +## v3.3.4 (2019-04-09) -- [Improvement] Rename `--silent` option to `-y/--yes` -- [Bugfix] Fix (again) login from studio when https is activated (#193) +- [Improvement] Rename `--silent` option to `-y/--yes`. +- [Bugfix] Fix (again) login from studio when HTTPS is activated (#193). -## 3.3.3 (2019-03-29) +## v3.3.3 (2019-03-29) -- [Bugfix] Fix `pip install tutor-openedx` +- [Bugfix] Fix `pip install tutor-openedx`. -## 3.3.2 (2019-03-27) +## v3.3.2 (2019-03-27) -- [Bugfix] Fix login from localhost -- [Bugfix] Fix Android app image build +- [Bugfix] Fix login from localhost. +- [Bugfix] Fix Android app image build. -## 3.3.1 (2019-03-25) +## v3.3.1 (2019-03-25) -- [Improvement] Improve assets building for development -- [Improvement] Support CMS login when the CMS is not a subdomain of the LMS +- [Improvement] Improve assets building for development. +- [Improvement] Support CMS login when the CMS is not a subdomain of the LMS. -## 3.3.0 (2019-03-22) +## v3.3.0 (2019-03-22) -- [Feature] Upgrade from Hawthorn to Ironwood +- [Feature] Upgrade from Hawthorn to Ironwood. -## 3.2.2 (2019-03-21) +## v3.2.2 (2019-03-21) -- [Feature] Multiple platforms on a single server \o/ -- [Feature] Easily configure web proxy on the host -- [Bugfix] Fix `images pull all` command which failed on "all" image -- [Improvement] Add configurable mongodb, SMTP and rabbitmq authentication -- [Improvement] Harmonize mysql username/password configuration parameters -- [Feature] Configurable and pluggable data storage backends (#114) +- [Feature] Multiple platforms on a single server \o/. +- [Feature] Easily configure web proxy on the host. +- [Bugfix] Fix `images pull all` command which failed on "all" image. +- [Improvement] Add configurable MongoDB, SMTP and RabbitMQ authentication. +- [Improvement] Harmonize MySQL username/password configuration parameters. +- [Feature] Configurable and pluggable data storage backends (#114). -## 3.2.1 (2019-03-19) +## v3.2.1 (2019-03-19) -- [Feature] Enable grade downloads by default (#143) -- [Improvement] Remove orphan containers on `local start` +- [Feature] Enable grade downloads by default (#143). +- [Improvement] Remove orphan containers on `local start`. -## 3.2.0 (2019-03-18) +## v3.2.0 (2019-03-18) -- [Improvement] `images pull` now also pulls vendor images -- [Feature] Add convenient `config printvalue` command -- [Feature] Customize docker registry -- [Feature] Load configuration parameters from the system environment -- [Improvement] Automatic environment re-generation after re-configuration -- [Improvement] Error and interrupt handling in UI and web UI -- [Bugfix] Fix missing webui env directory +- [Improvement] `images pull` now also pulls vendor images. +- [Feature] Add convenient `config printvalue` command. +- [Feature] Customise docker registry. +- [Feature] Load configuration parameters from the system environment. +- [Improvement] Automatic environment re-generation after re-configuration. +- [Improvement] Error and interrupt handling in UI and web UI. +- [Bugfix] Fix missing webui env directory. -## 3.1.0 (2019-03-09) +## v3.1.0 (2019-03-09) -- [Improvement] Install python requirements in virtual env in docker image -- [Bugfix] Add missing volume for theme development -- [Improvement] Rename "config [non]interactive" command to "config save [--silent]" -- [Improvement] More explicit logging during environment generation -- [Improvement] Configurable docker images (#122) -- [Bugfix] Fix "android pullimage" command -- [Improvement] Do not upgrade images as part of quickstart -- [Bugfix] Fix USERID setup in development mode and various dev-related docs (#177) +- [Improvement] Install python requirements in virtual env in docker image. +- [Bugfix] Add missing volume for theme development. +- [Improvement] Rename "config [non]interactive" command to "config save [--silent]". +- [Improvement] More explicit logging during environment generation. +- [Improvement] Configurable docker images (#122). +- [Bugfix] Fix "android pullimage" command. +- [Improvement] Do not upgrade images as part of quickstart. +- [Bugfix] Fix USERID setup in development mode and various dev-related docs (#177). -## 3.0.6 (2019-02-26) +## v3.0.6 (2019-02-26) -- [Bugfix] Fix import/export of demo course (#175) +- [Bugfix] Fix import/export of demo course (#175). -## 3.0.5 (2019-02-14) +## v3.0.5 (2019-02-14) -- [Feature] Add cloud deployment script -- [Improvement] Run `images pull` command -- [Improvement] Run `indexcourses` on importing demo course -- [Improvement] Add `runserver stop` command +- [Feature] Add cloud deployment script. +- [Improvement] Run `images pull` command. +- [Improvement] Run `indexcourses` on importing demo course. +- [Improvement] Add `runserver stop` command. -## 3.0.4 (2019-02-13) +## v3.0.4 (2019-02-13) -- [Minor] Minimum required `click` version is 7.0 (#171) -- [Bugfix] Fix `runserver` dev command (#172) -- [Minor] Fix non-https link to documentation in pypi -- [Minor] Fix `createuser` documentation +- [Minor] Minimum required `click` version is 7.0 (#171). +- [Bugfix] Fix `runserver` dev command (#172). +- [Minor] Fix non-https link to documentation in PyPI. +- [Minor] Fix `createuser` documentation. -## 3.0.3 (2019-02-12) +## v3.0.3 (2019-02-12) -- [Bugfix] Add missing template data to pypi package -- [Bugfix] Fix quickstart on Kubernetes (#164) -- [Improvement] Add datatases task to Kubernetes quickstart (#167) +- [Bugfix] Add missing template data to the PyPI package. +- [Bugfix] Fix quickstart on Kubernetes (#164). +- [Improvement] Add databases task to Kubernetes quickstart (#167). -## 3.0.2 (2019-02-12) - -- [Bugfix] Fix import paths -- πŸš€ thanks @silviot! -- [Bugfix] Properly set docker project name in mysql logs -- 🦊 thanks again @silviot! - -## 3.0.1 (2019-02-11) - -- [Bugfix] fix mysql initialisation (#159, #160) -- [Improvement] Better handling of continuous integration -- [Bugfix] fix `tutor --version` (#156) -- [Improvement] Absolute settings imports -- πŸ“― thanks @tonytan4ever! - -## 3.0.0 (2019-02-09) - -- [Improvement] Complete rewrite of Tutor: switch from a make-based project to a single binary which runs all commands. -- [Feature] An web user interface can be created with `tutor webui start` -- [Bugfix] Add missing elasticsearch to Kubernetes deployment (#147) -- [Improvement] Upload `tutor-openedx` to pypi - -## Older changes - -- 2019-01-27 [Bugfix] Fix video transcript/srt upload and download of user-uploaded files. Thanks @dannielariola! -- 2019-01-20 [Improvement] Make it easy to load custom settings for the local production install -- 2019-01-19 [Improvement] Upgrade to Ironwood -- 2019-01-16 [Improvement] Switch license from MIT to AGPL -- 2019-01-04 [Bugfix] Fix xqueue consumer command -- 2018-12-26 [Improvement] Upgrade nodejs to 5.5.1 -- 2018-12-07 [Improvement] Bundle theme and production static assets in the openedx docker image -- 2018-12-02 [Feature] Download extra locales from [openedx-i18n](https://github.com/openedx/openedx-i18n/) to the Open edX Docker image -- 2018-11-28 [Feature] Easily change openedx docker image -- 2018-11-28 [Feature] Enable comprehensive theming! -- 2018-11-28 [Improvement] Get rid of datadog -- 2018-11-28 [Improvement] Upgrade docker images to ubuntu 18.04 for android, forum, notes, xqueue -- 2018-11-28 [Feature] Make it possible to define default platform language interactively -- 2018-11-26 [Improvement] Make it easier to run a forked version of edx-platform -- 2018-11-25 [Feature] Use local filesystem for open assessment file upload -- 2018-11-23 [Improvement] Faster container bootstrapping without "chmod", as suggested by @silviot -- 2018-11-20 [Bugfix] Fix cross-platform theme assets generation -- 2018-11-17 [Improvement] Custom nginx port mapping. :crossed_swords: @frob @frohro -- 2018-11-17 [Improvement] Add "make restart-openedx" command. :+1: @frob -- 2018-11-13 [Improvement] Facilitate install of extra XBlocks. Thanks @frob! -- 2018-10-30 [Bugfix] Fix rabbitmq restart policy -- 2018-10-03 [Improvement/Bugfix] Fix and accelerate Android application build -- 2018-10-02 [Improvement] Bump Open edX version to hawthorn.2 -- 2018-09-30 [Bugfix] Fix CMS celery worker, including export tasks -- 2018-09-30 [Improvement] Simplify boolean feature flags definition -- 2018-09-29 [Improvement] Add logging commands -- 2018-09-29 [Improvement] Add self-documented help with "make help" -- 2018-09-29 [Feature] Add [Portainer](https://portainer.io) as an optional web UI to administer docker containers -- 2018-09-15 [Feature] Add student notes as an optional feature -- 2018-09-15 [Feature] Add templates to configurator container, which can now be run separately -- 2018-09-15 [Improvement] Rename "up" and "daemon" commands to "run" and "daemonize" -- 2018-09-15 [Feature] Activate course search and discovery -- 2018-09-15 [Bugfix] Deduplicate console logs from lms/cms -- 2018-09-05 [Improvement] Use a single email address for all inbound email -- 2018-09-04 [Bugfix] Get make commands to work with 'sudo' -- 2018-09-02 [Bugfix] Get HTTPS to work for CMS. Thanks @flytreeleft! -- 2018-08-28 [Bugfix] Fix certbot image updating -- 2018-08-27 [Improvement] Add development requirements to openedx image -- 2018-08-27 [Bugfix] Upgrade mongodb -- 2018-08-19 [Improvement] Make Xqueue an optional feature -- 2018-08-16 [Feature] Add HTTPS support +## v3.0.2 (2019-02-12) + +- [Bugfix] Fix import paths -- πŸš€ thanks @silviot!. +- [Bugfix] Properly set docker project name in MySQL logs -- 🦊 thanks again @silviot!. + +## v3.0.1 (2019-02-11) + +- [Bugfix] fix MySQL initialisation (#159, #160). +- [Improvement] Better handling of continuous integration. +- [Bugfix] fix `tutor --version` (#156). +- [Improvement] Absolute settings imports -- πŸ“― thanks @tonytan4ever!. + +## v3.0.0 (2019-02-09) + +- [Improvement] Complete rewrite of Tutor: switch from a make-based project to a single binary that runs all commands. +- [Feature] An web user interface can be created with `tutor webui start`. +- [Bugfix] Add missing Elasticsearch to Kubernetes deployment (#147). +- [Improvement] Upload `tutor-openedx` to PyPI . + +## Older changes (< 3.0.0) + +- 2019-01-27 [Bugfix] Fix video transcript/srt upload and download of user-uploaded files. Thanks @dannielariola!. +- 2019-01-20 [Improvement] Make it easy to load custom settings for the local production install. +- 2019-01-19 [Improvement] Upgrade to Ironwood. +- 2019-01-16 [Improvement] Switch license from MIT to AGPL. +- 2019-01-04 [Bugfix] Fix xqueue consumer command. +- 2018-12-26 [Improvement] Upgrade nodejs to 5.5.1. +- 2018-12-07 [Improvement] Bundle theme and production static assets in the openedx docker image. +- 2018-12-02 [Feature] Download extra locales from [openedx-i18n](https://github.com/openedx/openedx-i18n/) to the Open edX Docker image. +- 2018-11-28 [Feature] Easily change openedx docker image. +- 2018-11-28 [Feature] Enable comprehensive theming!. +- 2018-11-28 [Improvement] Get rid of datadog. +- 2018-11-28 [Improvement] Upgrade docker images to ubuntu 18.04 for android, forum, notes, xqueue. +- 2018-11-28 [Feature] Make it possible to define default platform language interactively. +- 2018-11-26 [Improvement] Make it easier to run a forked version of edx-platform. +- 2018-11-25 [Feature] Use local filesystem for open assessment file upload. +- 2018-11-23 [Improvement] Faster container bootstrapping without "chmod", as suggested by @silviot. +- 2018-11-20 [Bugfix] Fix cross-platform theme assets generation. +- 2018-11-17 [Improvement] Custom Nginx port mapping. :crossed_swords: @frob @frohro. +- 2018-11-17 [Improvement] Add "make restart-openedx" command. :+1: @frob. +- 2018-11-13 [Improvement] Facilitate install of extra XBlocks. Thanks @frob!. +- 2018-10-30 [Bugfix] Fix RabbitMQ restart policy. +- 2018-10-03 [Improvement/Bugfix] Fix and accelerate Android application build. +- 2018-10-02 [Improvement] Bump Open edX version to hawthorn.2. +- 2018-09-30 [Bugfix] Fix CMS celery worker, including export tasks. +- 2018-09-30 [Improvement] Simplify boolean feature flags definition. +- 2018-09-29 [Improvement] Add logging commands. +- 2018-09-29 [Improvement] Add self-documented help with "make help". +- 2018-09-29 [Feature] Add [Portainer](https://portainer.io) as an optional web UI to administer docker containers. +- 2018-09-15 [Feature] Add student notes as an optional feature. +- 2018-09-15 [Feature] Add templates to configurator container, which can now be run separately. +- 2018-09-15 [Improvement] Rename "up" and "daemon" commands to "run" and "daemonize". +- 2018-09-15 [Feature] Activate course search and discovery. +- 2018-09-15 [Bugfix] Deduplicate console logs from lms/cms. +- 2018-09-05 [Improvement] Use a single email address for all inbound email. +- 2018-09-04 [Bugfix] Get make commands to work with 'sudo'. +- 2018-09-02 [Bugfix] Get HTTPS to work for CMS. Thanks @flytreeleft!. +- 2018-08-28 [Bugfix] Fix certbot image updating. +- 2018-08-27 [Improvement] Add development requirements to openedx image. +- 2018-08-27 [Bugfix] Upgrade MongoDB. +- 2018-08-19 [Improvement] Make Xqueue an optional feature. +- 2018-08-16 [Feature] Add HTTPS support. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ad357f5185..671a050b6d 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,4 +1,4 @@ Contribution Guidelines ======================= -Please check the relevant section of the Tutor docs: `https://docs.tutor.overhang.io/tutor.html#contributing `__. +Please check the relevant section of the Tutor docs: `https://docs.tutor.edly.io/tutor.html#contributing `__. diff --git a/Dockerfile b/Dockerfile index 03a7cdd6bd..a3d6d4ba53 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,15 +10,15 @@ # Because this image is still experimental, and we are not quite sure if it's going to # be very useful, we do not provide any usage documentation. -FROM docker.io/python:3.7-slim-stretch +FROM docker.io/python:3.8-slim-stretch # As per https://github.com/docker/compose/issues/3918 COPY --from=library/docker:19.03 /usr/local/bin/docker /usr/bin/docker COPY --from=docker/compose:1.24.0 /usr/local/bin/docker-compose /usr/bin/docker-compose -RUN pip install tutor-openedx +RUN pip install tutor RUN mkdir /opt/tutor -ENV TUTOR_ROOT /opt/tutor +ENV TUTOR_ROOT=/opt/tutor EXPOSE 80 EXPOSE 443 diff --git a/MANIFEST.in b/MANIFEST.in index a4d0269a51..7d53c0c3bd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,5 @@ include requirements/base.in +include requirements/plugins.txt +include requirements/dev.txt recursive-include tutor/templates * +include tutor/py.typed diff --git a/Makefile b/Makefile index 355924d9d0..b1755a5dc0 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,11 @@ .DEFAULT_GOAL := help .PHONY: docs -SRC_DIRS = ./tutor ./tests ./bin +SRC_DIRS = ./tutor ./tests ./bin ./docs BLACK_OPTS = --exclude templates ${SRC_DIRS} ###### Development -docs: ## Build html documentation +docs: ## Build HTML documentation $(MAKE) -C docs compile-requirements: ## Compile requirements files @@ -18,65 +18,79 @@ upgrade-requirements: ## Upgrade requirements files pip-compile --upgrade requirements/dev.in pip-compile --upgrade requirements/docs.in -build-pythonpackage: ## Build a python package ready to upload to pypi +build-pythonpackage: build-pythonpackage-tutor ## Build Python packages ready to upload to pypi + +build-pythonpackage-tutor: ## Build the "tutor" python package for upload to pypi python setup.py sdist -push-pythonpackage: ## Push python packages to pypi - twine upload --skip-existing dist/tutor-*.tar.gz +push-pythonpackage: ## Push python package to pypi + twine upload --skip-existing dist/tutor-$(shell make version).tar.gz + +test: test-lint test-unit test-types test-format test-pythonpackage ## Run all tests by decreasing order of priority -test: test-lint test-unit test-format test-pythonpackage ## Run all tests by decreasing order or priority +test-static: test-lint test-types test-format ## Run only static tests test-format: ## Run code formatting tests black --check --diff $(BLACK_OPTS) test-lint: ## Run code linting tests - pylint --errors-only --ignore=templates ${SRC_DIRS} + pylint --errors-only --enable=unused-import,unused-argument --ignore=templates --ignore=docs/_ext ${SRC_DIRS} test-unit: ## Run unit tests python -m unittest discover tests +test-types: ## Check type definitions + mypy --exclude=templates --ignore-missing-imports --implicit-reexport --strict ${SRC_DIRS} + test-pythonpackage: build-pythonpackage ## Test that package can be uploaded to pypi - twine check dist/tutor-openedx-$(shell make version).tar.gz + twine check dist/tutor-$(shell make version).tar.gz + +test-k8s: ## Validate the k8s format with kubectl. Not part of the standard test suite. + tutor k8s apply --dry-run=client --validate=true format: ## Format code automatically black $(BLACK_OPTS) -bootstrap-dev: ## Install dev requirements - pip install . - pip install -r requirements/dev.txt +isort: ## Sort imports. This target is not mandatory because the output may be incompatible with black formatting. Provided for convenience purposes. + isort --skip=templates ${SRC_DIRS} -bootstrap-dev-plugins: bootstrap-dev ## Install dev requirement and all supported plugins - pip install -r requirements/plugins.txt +changelog-entry: ## Create a new changelog entry + scriv create + +changelog: ## Collect changelog entries in the CHANGELOG.md file + scriv collect + +###### Code coverage + +coverage: ## Run unit-tests before analyzing code coverage and generate report + $(MAKE) --keep-going coverage-tests coverage-report + +coverage-tests: ## Run unit-tests and analyze code coverage + coverage run -m unittest discover -###### Deployment +coverage-report: ## Generate CLI report for the code coverage + coverage report + +coverage-html: coverage-report ## Generate HTML report for the code coverage + coverage html + +coverage-browse-report: coverage-html ## Open the HTML report in the browser + sensible-browser htmlcov/index.html + +###### Continuous integration tasks bundle: ## Bundle the tutor package in a single "dist/tutor" executable pyinstaller tutor.spec -dist/tutor: - $(MAKE) bundle - -nightly: ## Create a "nightly" release - $(MAKE) tag TAG=nightly - -release: test ## Create a release tag and push it to origin - $(MAKE) retag release-origin release-overhangio TAG=v$(shell make version) - -retag: - @echo "=== Creating tag $(TAG)" - git tag -d $(TAG) || true - git tag $(TAG) -release-origin: - @echo "=== Pushing tag $(TAG) to origin" - git push origin - git push origin :$(TAG) || true - git push origin $(TAG) -release-overhangio: - @echo "=== Pushing tag $(TAG) to overhangio" - git push overhangio - git push overhangio :$(TAG) || true - git push overhangio $(TAG) -###### Continuous integration tasks +bootstrap-dev: ## Install dev requirements + pip install . + pip install -r requirements/dev.txt + +bootstrap-dev-plugins: bootstrap-dev ## Install dev requirements and all supported plugins + pip install -r requirements/plugins.txt + +pull-base-images: # Manually pull base images + docker image pull docker.io/ubuntu:20.04 ci-info: ## Print info about environment python --version @@ -89,35 +103,10 @@ ci-test-bundle: ## Run basic tests on bundle yes "" | ./dist/tutor config save --interactive ./dist/tutor config save ./dist/tutor plugins list - # ./dist/tutor plugins enable discovery ecommerce figures license minio notes xqueue - ./dist/tutor plugins enable discovery ecommerce license minio notes xqueue + ./dist/tutor plugins enable android discovery ecommerce forum license mfe minio notes webui xqueue ./dist/tutor plugins list ./dist/tutor license --help -./releases/github-release: ## Download github-release binary - mkdir -p releases/ - cd releases/ \ - && curl -sSL -o ./github-release.bz2 "https://github.com/meterup/github-release/releases/download/v0.7.5/$(shell uname -s | tr "[:upper:]" "[:lower:]")-amd64-github-release.bz2" \ - && bzip2 -d -f ./github-release.bz2 \ - && chmod a+x ./github-release - -ci-push-bundle: ./releases/github-release ## Upload assets to github - sed "s/TUTOR_VERSION/v$(shell make version)/g" docs/_release_description.md > releases/description.md - git log -1 --pretty=format:%b >> releases/description.md - ./releases/github-release release \ - --user overhangio \ - --repo tutor \ - --tag "v$(shell make version)" \ - --name "v$(shell make version)" \ - --description "$$(cat releases/description.md)" || true - ./releases/github-release upload \ - --user overhangio \ - --repo tutor \ - --tag "v$(shell make version)" \ - --name "tutor-$$(uname -s)_$$(uname -m)" \ - --file ./dist/tutor \ - --replace - ci-bootstrap-images: pip install . tutor config save @@ -125,10 +114,10 @@ ci-bootstrap-images: ###### Additional commands version: ## Print the current tutor version - @python -c 'import io, os; about = {}; exec(io.open(os.path.join("tutor", "__about__.py"), "rt", encoding="utf-8").read(), about); print(about["__version__"])' + @python -c 'import io, os; about = {}; exec(io.open(os.path.join("tutor", "__about__.py"), "rt", encoding="utf-8").read(), about); print(about["__package_version__"])' ESCAPE =  help: ## Print this help @grep -E '^([a-zA-Z_-]+:.*?## .*|######* .+)$$' Makefile \ - | sed 's/######* \(.*\)/\n $(ESCAPE)[1;31m\1$(ESCAPE)[0m/g' \ + | sed 's/######* \(.*\)/@ $(ESCAPE)[1;31m\1$(ESCAPE)[0m/g' | tr '@' '\n' \ | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/README.rst b/README.rst index 5b64a1e079..fc0d3b2504 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,6 @@ -.. _readme_intro_start: - -Tutor: the docker-based Open edX distribution designed for peace of mind +Tutor: the Docker-based Open edX distribution designed for peace of mind ======================================================================== -| - .. image:: https://overhang.io/static/img/tutor-logo.svg :alt: Tutor logo :width: 500px @@ -12,66 +8,76 @@ Tutor: the docker-based Open edX distribution designed for peace of mind | -.. image:: https://img.shields.io/travis/overhangio/tutor.svg?label=Release%20build&style=flat-square - :alt: Release build status - :target: https://travis-ci.org/overhangio/tutor +.. _readme_intro_start: + +.. image:: https://img.shields.io/static/v1?logo=github&label=Git&style=flat-square&color=brightgreen&message=Source%20code + :alt: Source code + :target: https://github.com/overhangio/tutor -.. image:: https://img.shields.io/badge/docs-current-blue.svg?style=flat-square - :alt: Documentation - :target: https://docs.tutor.overhang.io +.. image:: https://img.shields.io/static/v1?logo=discourse&label=Forums&style=flat-square&color=ff0080&message=discuss.openedx.org + :alt: Forums + :target: https://discuss.openedx.org/tag/tutor -.. image:: https://img.shields.io/github/issues/overhangio/tutor.svg?style=flat-square - :alt: GitHub issues - :target: https://github.com/overhangio/tutor/issues +.. image:: https://img.shields.io/static/v1?logo=readthedocs&label=Documentation&style=flat-square&color=blue&message=docs.tutor.edly.io + :alt: Documentation + :target: https://docs.tutor.edly.io -.. image:: https://img.shields.io/github/issues-closed/overhangio/tutor.svg?colorB=brightgreen&style=flat-square - :alt: GitHub closed issues - :target: https://github.com/overhangio/tutor/issues?q=is%3Aclosed +.. image:: https://img.shields.io/pypi/v/tutor?logo=python&logoColor=white + :alt: PyPI releases + :target: https://pypi.org/project/tutor .. image:: https://img.shields.io/github/license/overhangio/tutor.svg?style=flat-square - :alt: AGPL License - :target: https://www.gnu.org/licenses/agpl-3.0.en.html + :alt: AGPL License + :target: https://www.gnu.org/licenses/agpl-3.0.en.html -**Tutor** is a docker-based `Open edX `_ distribution, both for production and local development. The goal of Tutor is to make it easy to deploy, customize, upgrade and scale Open edX. Tutor is reliable, fast, extensible, and it is already used by dozens of Open edX platforms around the world. +.. image:: https://img.shields.io/static/v1?logo=youtube&label=YouTube&style=flat-square&color=ff0000&message=@tutor-edly + :alt: Follow us on Youtube + :target: https://www.youtube.com/@tutor-edly -Do you need professional assistance setting up or managing your Open edX platform? Overhang.IO provides online support as part of its `Long Term Support (LTS) offering `__. +**Tutor** is the official Docker-based `Open edX `_ distribution, both for production and local development. The goal of Tutor is to make it easy to deploy, customise, upgrade and scale Open edX. Tutor is reliable, fast, extensible, and it is already used to deploy hundreds of Open edX platforms around the world. + +Do you need professional assistance setting up or managing your Open edX platform? `Edly `__ provides online support as part of its `Open edX installation service `__. Features -------- * 100% `open source `__ * Runs entirely on Docker -* World-famous 1-click `installation and upgrades `__ -* Comes with batteries included: `theming `__, `SCORM `__, `HTTPS `__, `web-based administration interface `__, `mobile app `__, `custom translations `__... -* Extensible architecture with `plugins `__ -* Works with `Kubernetes `__ -* No technical skill required with the `1-click Tutor AWS image `__ -* Amazing plugins available with `Tutor Wizard Edition `__ +* World-famous 1-click `installation and upgrades `__ +* Comes with batteries included: `theming `__, `SCORM `__, `HTTPS `__, `web-based administration interface `__, `mobile app `__, `custom translations `__... +* Extensible architecture with `plugins `__ +* Works with `Kubernetes `__ +* No technical skill required with the `zero-click Tutor AWS image `__ .. _readme_intro_end: -.. image:: ./docs/img/quickstart.gif - :alt: Tutor local quickstart - :target: https://terminalizer.com/view/91b0bfdd557 +.. image:: ./docs/img/launch.webp + :alt: Tutor local launch + :target: https://www.terminalizer.com/view/3a8d55835686 Quickstart ---------- 1. Install the `latest stable release `_ of Tutor -2. Run ``tutor local quickstart`` +2. Run ``tutor local launch`` 3. You're done! Documentation ------------- -Extensive documentation is available online: https://docs.tutor.overhang.io/ +Extensive documentation is available: https://docs.tutor.edly.io/ + +Is there a problem? +------------------- + +Please follow the instructions from the `troubleshooting section `__ in the docs. .. _readme_support_start: Support ------- -To get community support, go to the official discussion forums: https://discuss.overhang.io. For official support, please subscribe to a Long Term Support (LTS) license at https://overhang.io/tutor/pricing. +To get community support, go to the official Open edX discussion forum: https://discuss.openedx.org. For official support, `Edly `__ provides professional assistance as part of its `Open edX installation service `__. .. _readme_support_end: @@ -80,6 +86,11 @@ To get community support, go to the official discussion forums: https://discuss. Contributing ------------ -We welcome contributions to Tutor! To learn how you can contribute, please check the relevant section of the Tutor docs: `https://docs.tutor.overhang.io/tutor.html#contributing `__. +We welcome contributions to Tutor! To learn how you can contribute, please check the relevant section of the Tutor docs: `https://docs.tutor.edly.io/tutor.html#contributing `__. + +.. _readme_contributing_end: + +License +------- -.. _readme_contributing_end: \ No newline at end of file +This work is licensed under the terms of the `GNU Affero General Public License (AGPL) `_. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..38079484f8 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,19 @@ +# Tutor Ethical Vulnerability Disclosure Policy + + +## Reporting a Vulnerability + +To ensure the health of the codebase and the larger Open edX and Tutor communities, please do not create GitHub issues for a security vulnerability. Report any security vulnerabilities or concerns by sending an email to [security.tutor@edly.io](mailto:security.tutor@edly.io). To ensure a timely triage and fix of the security issue, include as many details you can when reporting the vulnerability. Some pieces of information to consider: + +* The nature of the vulnerability, e.g. + * Authentication and Authorization + * Data Integrity and Confidentiality + * Security Configurations + * Third-party dependencies +* The impact of the security risk +* A detailed description of the steps necessary to reproduce the issue +* The links to the vulnerable code +* The links to third-party libraries/packages if the vulnerability is present in such a dependency. + +## Bug Bounty +Edly/Tutor does not offer a bug bounty for reported vulnerabilities. diff --git a/bin/main.py b/bin/main.py index feaf04c2c7..d5e570f7e6 100755 --- a/bin/main.py +++ b/bin/main.py @@ -1,21 +1,17 @@ #!/usr/bin/env python3 -from tutor.plugins import OfficialPlugin +from tutor import hooks +from tutor.commands.cli import main +from tutor.plugins.v0 import OfficialPlugin -# Manually install plugins (this is for creating the bundle) -for plugin_name in [ - "discovery", - "ecommerce", - # "figures", - "license", - "minio", - "notes", - "xqueue", -]: - try: - OfficialPlugin.load(plugin_name) - except ImportError: - pass -from tutor.commands.cli import main +@hooks.Actions.CORE_READY.add() +def _discover_official_plugins() -> None: + # Manually discover plugins: that's because entrypoint plugins are not properly + # detected within the binary bundle. + with hooks.Contexts.PLUGINS.enter(): + OfficialPlugin.discover_all() + -main() +if __name__ == "__main__": + # Call the regular main function, which will not detect any entrypoint plugin + main() diff --git a/changelog.d/20240726_202449_danyal.faheem_mysql_upgrade_5_7_to_8_4.md b/changelog.d/20240726_202449_danyal.faheem_mysql_upgrade_5_7_to_8_4.md new file mode 100644 index 0000000000..01275c273d --- /dev/null +++ b/changelog.d/20240726_202449_danyal.faheem_mysql_upgrade_5_7_to_8_4.md @@ -0,0 +1,2 @@ +- [Bugfix] Do not directly upgrade MySQL from v5.7 to v8.4 when upgrading from quince as MySQL does not allow that. First, upgrade to v8.1 and then to v8.4. (by @Danyal-Faheem) + This process should be automatic for most users. However, if you are running a third-party MySQL (i.e., RUN_MYSQL=false), you are expected to perform this process yourself. Please refer to the third-party provider's documentation for detailed instructions. Ensuring that your MySQL version is up-to-date is crucial for maintaining compatibility and security. \ No newline at end of file diff --git a/changelog.d/20241018_122745_danyal.faheem_run_mysql_8_1_as_separate_container.md b/changelog.d/20241018_122745_danyal.faheem_run_mysql_8_1_as_separate_container.md new file mode 100644 index 0000000000..a4b5403c75 --- /dev/null +++ b/changelog.d/20241018_122745_danyal.faheem_run_mysql_8_1_as_separate_container.md @@ -0,0 +1 @@ +- [Bugfix] Run MySQL 8.1 as a separate container during upgrade from Olive to Redwood as it crashed otherwise due to the `--mysql-native-password` option not being present. (by @Danyal-Faheem) \ No newline at end of file diff --git a/changelog.d/20241031_144431_regis_no_delete_env.md b/changelog.d/20241031_144431_regis_no_delete_env.md new file mode 100644 index 0000000000..e84aac0782 --- /dev/null +++ b/changelog.d/20241031_144431_regis_no_delete_env.md @@ -0,0 +1 @@ +- [Improvement] Do not prompt for environment deletion by default on `tutor config save --interactive`. (by @regisb) diff --git a/changelog.d/20241111_163102_dawoud.sheraz_remove_py38_references.md b/changelog.d/20241111_163102_dawoud.sheraz_remove_py38_references.md new file mode 100644 index 0000000000..aeead7e20c --- /dev/null +++ b/changelog.d/20241111_163102_dawoud.sheraz_remove_py38_references.md @@ -0,0 +1 @@ +- πŸ’₯ [Deprecation] Drop support for python 3.8 and set Python 3.9 as the minimum supported python version. (by @DawoudSheraz) diff --git a/changelog.d/20241119_100430_crisgarta8_celery_queues.md b/changelog.d/20241119_100430_crisgarta8_celery_queues.md new file mode 100644 index 0000000000..d0ec11de84 --- /dev/null +++ b/changelog.d/20241119_100430_crisgarta8_celery_queues.md @@ -0,0 +1 @@ +- πŸ’₯ [Feature] Add a filter to define the celery workers startup command. (by @Ian2012) diff --git a/changelog.d/20241119_111602_fghaas_image_manifest.md b/changelog.d/20241119_111602_fghaas_image_manifest.md new file mode 100644 index 0000000000..28e6cc901c --- /dev/null +++ b/changelog.d/20241119_111602_fghaas_image_manifest.md @@ -0,0 +1,8 @@ +- [Improvement] When building images with + `tutorΒ imagesΒ buildΒ --cache-to-registry`, use an OCI-compliant cache + artifact format that should be universally compatible with all + registries. This enables the use of that option when working with + third-party registries such as [Harbor](https://goharbor.io/) or + [ECR](https://aws.amazon.com/ecr/). Requires + [BuildKitΒ 0.12](https://github.com/moby/buildkit/releases/tag/v0.12.0) + or later. (by @angonz and @fghaas) diff --git a/changelog.d/scriv.ini b/changelog.d/scriv.ini new file mode 100644 index 0000000000..6333f3ac5c --- /dev/null +++ b/changelog.d/scriv.ini @@ -0,0 +1,8 @@ +[scriv] +version = literal: tutor/__about__.py: __version__ +categories = +format = md +md_header_level = 2 +new_fragment_template = file: changelog.d/scriv/new_fragment.${config:format}.j2 +entry_title_template = file: changelog.d/scriv/entry_title.${config:format}.j2 +ghrel_template = file: changelog.d/scriv/github_release.${config:format}.j2 diff --git a/changelog.d/scriv/entry_title.md.j2 b/changelog.d/scriv/entry_title.md.j2 new file mode 100644 index 0000000000..7afe89477c --- /dev/null +++ b/changelog.d/scriv/entry_title.md.j2 @@ -0,0 +1 @@ +{% if version %}v{{ version }} {% endif %}({{ date.strftime('%Y-%m-%d') }}) diff --git a/changelog.d/scriv/github_release.md.j2 b/changelog.d/scriv/github_release.md.j2 new file mode 100644 index 0000000000..8d78d8ca2a --- /dev/null +++ b/changelog.d/scriv/github_release.md.j2 @@ -0,0 +1,14 @@ +Install this version from pip with: + + pip install "tutor[full]=={{ version.vtext[1:] }}" + +Or download the compiled binaries: + + sudo curl -L "https://github.com/overhangio/tutor/releases/download/{{ version }}/tutor-$(uname -s)_$(uname -m)" -o /usr/local/bin/tutor + sudo chmod 0755 /usr/local/bin/tutor + +See the [installation docs](https://docs.tutor.edly.io/install.html) for more installation options and instructions. + +## Changes + +{{ body }} diff --git a/changelog.d/scriv/new_fragment.md.j2 b/changelog.d/scriv/new_fragment.md.j2 new file mode 100644 index 0000000000..78988a42c7 --- /dev/null +++ b/changelog.d/scriv/new_fragment.md.j2 @@ -0,0 +1,11 @@ + + + + diff --git a/docs/Makefile b/docs/Makefile index 8b29286b31..8c3882be42 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,11 +1,17 @@ .DEFAULT_GOAL := html .PHONY: help +build: + sphinx-build -b html -a -E -n $(BUILD_ARGS) "." "_build/html" + html: - sphinx-build -b html -a -E "." "_build/html" + $(MAKE) build BUILD_ARGS="-W --keep-going" browse: sensible-browser _build/html/index.html -watch: html browse - while true; do inotifywait -e modify *.rst */*.rst ../*.rst conf.py; $(MAKE) html; done +watch: build browse + while true; do $(MAKE) wait-for-change build || true; done + +wait-for-change: + inotifywait -e modify $(shell find . -name "*.rst") ../*.rst ../tutor/hooks/*.py conf.py diff --git a/docs/__init__.py b/docs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/_ext/tutordocs.py b/docs/_ext/tutordocs.py new file mode 100644 index 0000000000..eb8758770b --- /dev/null +++ b/docs/_ext/tutordocs.py @@ -0,0 +1,15 @@ +""" +This module is heavily inspired by Django's djangodocs.py: +https://github.com/django/django/blob/main/docs/_ext/djangodocs.py +""" + +from sphinx.application import Sphinx + + +def setup(app: Sphinx) -> None: + # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_crossref_type + app.add_crossref_type( + directivename="patch", + rolename="patch", + indextemplate="pair: %s; patch", + ) diff --git a/docs/_release_description.md b/docs/_release_description.md deleted file mode 100644 index 0e34d97c6f..0000000000 --- a/docs/_release_description.md +++ /dev/null @@ -1,8 +0,0 @@ -Tutor can be installed simply by downloading the compiled binaries: - - sudo curl -L "https://github.com/overhangio/tutor/releases/download/TUTOR_VERSION/tutor-$(uname -s)_$(uname -m)" -o /usr/local/bin/tutor - sudo chmod 0755 /usr/local/bin/tutor - -See the [installation docs](https://docs.tutor.overhang.io/install.html) for more installation options and instructions. - -## Changes diff --git a/docs/conf.py b/docs/conf.py index a29183d896..f73d34cd6f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,9 @@ +from __future__ import annotations + import io import os +import sys +from typing import Any, Dict, List import docutils import docutils.parsers.rst @@ -7,8 +11,8 @@ # -- Project information ----------------------------------------------------- project = "Tutor" -copyright = "" -author = "Overhang.io" +copyright = "" # pylint: disable=redefined-builtin +author = "Overhang.IO" # The short X.Y version version = "" @@ -21,10 +25,60 @@ templates_path = ["_templates"] source_suffix = ".rst" master_doc = "index" -language = None +language = "en" exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] pygments_style = None +# Autodocumentation of modules +extensions.append("sphinx.ext.autodoc") +autodoc_typehints = "description" +# For the life of me I can't get the docs to compile in nitpicky mode without these +# ignore statements. You are most welcome to try and remove them. +# To make matters worse, some ignores are only required for some versions of Python, +# from 3.8 to 3.10... +nitpick_ignore = [ + # Sphinx does not handle ParamSpec arguments + ("py:class", "T.args"), + ("py:class", "T.kwargs"), + ("py:class", "T2.args"), + ("py:class", "T2.kwargs"), + # Sphinx doesn't know about the following classes + ("py:class", "click.Command"), + ("py:class", "t.Any"), + ("py:class", "t.Callable"), + ("py:class", "t.Iterator"), + ("py:class", "t.Optional"), + # python 3.10 + ("py:class", "NoneType"), + ("py:class", "click.core.Command"), + # Python 3.12 + ("py:class", "FilterCallbackFunc"), +] +# Resolve type aliases here +# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases +autodoc_type_aliases: dict[str, str] = { + # python 3.10 + "T": "tutor.core.hooks.actions.T", + "T2": "tutor.core.hooks.filters.T2", + # # python 3.12 + "L": "tutor.core.hooks.filters.L", + "FilterCallbackFunc": "tutor.core.hooks.filters.FilterCallbackFunc", + # https://stackoverflow.com/questions/73223417/type-aliases-in-type-hints-are-not-preserved + # https://github.com/sphinx-doc/sphinx/issues/10455 + # https://github.com/sphinx-doc/sphinx/issues/10785 + # https://github.com/emdgroup/baybe/pull/67 + "Action": "tutor.core.hooks.actions.Action", + "Filter": "tutor.core.hooks.filters.Filter", +} + + +# -- Sphinx-Click configuration +# https://sphinx-click.readthedocs.io/ +extensions.append("sphinx_click") +# This is to avoid the addition of the local username to the docs +os.environ["HOME"] = "~" +# Make sure that sphinx-click can find the tutor module +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) # -- Options for HTML output ------------------------------------------------- html_theme = "sphinx_rtd_theme" @@ -42,7 +96,7 @@ html_static_path = ["img"] # Custom settings -html_logo = "./img/tutor-logo.png" +html_logo = "https://overhang.io/static/img/tutor-logo.svg" html_favicon = "./img/favicon.png" html_show_sourcelink = False html_display_github = True @@ -55,49 +109,51 @@ # Custom variables here = os.path.abspath(os.path.dirname(__file__)) -about = {} +about: Dict[str, str] = {} with io.open( os.path.join(here, "..", "tutor", "__about__.py"), "rt", encoding="utf-8" ) as f: + # pylint: disable=exec-used exec(f.read(), about) -rst_prolog = """ -.. |tutor_version| replace:: {} -""".format( - about["__version__"], -) +rst_prolog = f""" +.. |tutor_version| replace:: {about["__version__"]} +""" # Custom directives def youtube( - _name, - _args, - _options, - content, - _lineno, - _contentOffset, - _blockText, - _state, - _stateMachine, -): - """ Restructured text extension for inserting youtube embedded videos """ + _name: Any, + _args: Any, + _options: Any, + content: List[str], + _lineno: Any, + _contentOffset: Any, + _blockText: Any, + _state: Any, + _stateMachine: Any, +) -> Any: + """Restructured text extension for inserting youtube embedded videos""" if not content: return [] video_id = content[0] return [ docutils.nodes.raw( "", - """ + f""" """.format( - video_id=video_id - ), +""", format="html", ) ] -youtube.content = True -docutils.parsers.rst.directives.register_directive("youtube", youtube) +# Tutor's own extension +sys.path.append(os.path.join(os.path.dirname(__file__), "_ext")) +extensions.append("tutordocs") + + +setattr(youtube, "content", True) +docutils.parsers.rst.directives.register_directive("youtube", youtube) # type: ignore diff --git a/docs/configuration.rst b/docs/configuration.rst index d61d9494b0..6e40797a14 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -3,19 +3,19 @@ Configuration and customisation =============================== -Tutor offers plenty of possibilities for platform customisation out of the box. There are two main ways in which the base Open edX installation can be customized: +Tutor offers plenty of possibilities for platform customisation out of the box. There are two main ways in which the base Open edX installation can be customised: a. Modifying the Tutor :ref:`configuration parameters `. b. Modifying the :ref:`Open edX docker image ` that runs the Open edX platform. -This section does not cover :ref:`plugin development `. For simple changes, such as modifying the ``*.env.json`` files or the edx-platform settings, *you should not fork edx-platform or tutor*! Instead, you should create a simple :ref:`plugin for Tutor `. +This section does not cover :ref:`plugin development `. For simple changes, such as modifying the ``*.env.yml`` files or the edx-platform settings, *you should not fork edx-platform or tutor*! Instead, you should create a simple :ref:`plugin for Tutor `. .. _configuration: Configuration ------------- -With Tutor, all Open edX deployment parameters are stored in a single ``config.yml`` file. This is the file that is generated when you run ``tutor local quickstart`` or ``tutor config save``. To view the content of this file, run:: +With Tutor, all Open edX deployment parameters are stored in a single ``config.yml`` file. This is the file that is generated when you run ``tutor local launch`` or ``tutor config save``. To view the content of this file, run:: cat "$(tutor config printroot)/config.yml" @@ -31,7 +31,7 @@ Or from the system environment:: export TUTOR_PARAM1=VALUE1 -Once the base configuration is created or updated, the environment is automatically re-generated. The environment is the set of all files required to manage an Open edX platform: Dockerfile, ``lms.env.json``, settings files, etc. You can view the environment files in the ``env`` folder:: +Once the base configuration is created or updated, the environment is automatically re-generated. The environment is the set of all files required to manage an Open edX platform: Dockerfile, ``lms.env.yml``, settings files, etc. You can view the environment files in the ``env`` folder:: ls "$(tutor config printroot)/env" @@ -40,9 +40,6 @@ With an up-to-date environment, Tutor is ready to launch an Open edX platform an Individual service activation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``RUN_LMS`` (default: ``true``) -- ``RUN_CMS`` (default: ``true``) -- ``RUN_FORUM`` (default: ``true``) - ``RUN_ELASTICSEARCH`` (default: ``true``) - ``RUN_MONGODB`` (default: ``true``) - ``RUN_MYSQL`` (default: ``true``) @@ -61,10 +58,48 @@ Custom images ************* - ``DOCKER_IMAGE_OPENEDX`` (default: ``"{{ DOCKER_REGISTRY }}overhangio/openedx:{{ TUTOR_VERSION }}"``) -- ``DOCKER_IMAGE_ANDROID`` (default: ``"{{ DOCKER_REGISTRY }}overhangio/openedx-android:{{ TUTOR_VERSION }}"``) -- ``DOCKER_IMAGE_FORUM`` (default: ``"{{ DOCKER_REGISTRY }}overhangio/openedx-forum:{{ TUTOR_VERSION }}"``) -These configuration parameters define which image to run for each service. By default, the docker image tag matches the Tutor version it was built with. +This configuration parameter defines the name of the Docker image to run for the lms and cms containers. By default, the Docker image tag matches the Tutor version it was built with. + +- ``DOCKER_IMAGE_OPENEDX_DEV`` (default: ``"openedx-dev:{{ TUTOR_VERSION }}"``) + +This configuration parameter defines the name of the Docker image to run the development version of the lms and cms containers. By default, the Docker image tag matches the Tutor version it was built with. + +.. https://hub.docker.com/r/devture/exim-relay/tags + +- ``DOCKER_IMAGE_CADDY`` (default: ``"docker.io/caddy:2.6.2"``) + +This configuration parameter defines which Caddy Docker image to use. + +- ``DOCKER_IMAGE_ELASTICSEARCH`` (default: ``"docker.io/elasticsearch:7.17.9"``) + +This configuration parameter defines which Elasticsearch Docker image to use. + +- ``DOCKER_IMAGE_MONGODB`` (default: ``"docker.io/mongo:7.0.7"``) + +This configuration parameter defines which MongoDB Docker image to use. + +.. https://hub.docker.com/_/mysql/tags?page=1&name=8.0 + +- ``DOCKER_IMAGE_MYSQL`` (default: ``"docker.io/mysql:8.4.0"``) + +This configuration parameter defines which MySQL Docker image to use. + +.. https://hub.docker.com/_/redis/tags + +- ``DOCKER_IMAGE_REDIS`` (default: ``"docker.io/redis:7.2.4"``) + +This configuration parameter defines which Redis Docker image to use. + +.. https://hub.docker.com/r/devture/exim-relay/tags + +- ``DOCKER_IMAGE_SMTP`` (default: ``"docker.io/devture/exim-relay:4.96-r1-0``) + +This configuration parameter defines which Simple Mail Transfer Protocol (SMTP) Docker image to use. + +- ``DOCKER_IMAGE_PERMISSIONS`` (default: ``"{{ DOCKER_REGISTRY }}overhangio/openedx-permissions:{{ TUTOR_VERSION }}"``) + +This configuration parameter defines the Docker image to be used for setting file permissions. The default image sets all containers to be run as unprivileged users. Custom registry *************** @@ -77,35 +112,102 @@ You may want to pull/push images from/to a custom docker registry. For instance, (the trailing ``/`` is important) +.. _openedx_configuration: + +Compose +******* + +- ``DEV_PROJECT_NAME`` (default: ``"{{ TUTOR_APP }}_dev"``) + +This configuration parameter sets the Development version of the Docker Compose project name. + +- ``LOCAL_PROJECT_NAME`` (default: ``"{{ TUTOR_APP }}_local"``) + +This configuration parameter sets the Local version of the Docker Compose project name. + Open edX customisation ~~~~~~~~~~~~~~~~~~~~~~ -- ``OPENEDX_COMMON_VERSION`` (default: ``"open-release/koa.1"``) +- ``EDX_PLATFORM_REPOSITORY`` (default: ``"https://github.com/openedx/edx-platform.git"``) + +This defines the git repository from which you install Open edX platform code. If you run an Open edX fork with custom patches, set this to your own git repository. You may also override this configuration parameter at build time, by providing a ``--build-arg`` option. + +- ``OPENEDX_COMMON_VERSION`` (default: ``"open-release/redwood.3"``, or ``master`` in :ref:`nightly `) This defines the default version that will be pulled from all Open edX git repositories. +- ``EDX_PLATFORM_VERSION`` (default: the value of ``OPENEDX_COMMON_VERSION``) + +This defines the version that will be pulled from just the Open edX platform git repositories. You may also override this configuration parameter at build time, by providing a ``--build-arg`` option. + - ``OPENEDX_CMS_UWSGI_WORKERS`` (default: ``2``) - ``OPENEDX_LMS_UWSGI_WORKERS`` (default: ``2``) -By default there are 2 `uwsgi worker processes `__ to serve requests for the LMS and the CMS. However, each workers requires upwards of 500 Mb of RAM. You should reduce this value to 1 if your computer/server does not have enough memory. +By default, there are 2 `uwsgi worker processes `__ to serve requests for the LMS and the CMS. However, each worker requires upwards of 500 Mb of RAM. You should reduce this value to 1 if your computer/server does not have enough memory. +- ``OPENEDX_CELERY_REDIS_DB`` (default: ``0``) +- ``OPENEDX_CACHE_REDIS_DB`` (default: ``1``) -Vendor services -~~~~~~~~~~~~~~~ +These two configuration parameters define which Redis database to use for Open edX cache and celery task. -Caddy -***** +.. _openedx_extra_pip_requirements: + +- ``OPENEDX_EXTRA_PIP_REQUIREMENTS`` (default: ``[]``) + +Define extra pip packages that are going to be installed for edx-platform. + +- ``NPM_REGISTRY`` (default: ``"https://registry.npmjs.org/"``) + +This defines the registry from which you'll be pulling NPM packages when building Docker images. Like ``EDX_PLATFORM_REPOSITORY``, this can be overridden at build time with a ``--build-arg`` option. + +- ``OPENEDX_AWS_ACCESS_KEY`` (default: ``""``) + +This configuration parameter sets the Django setting ``AWS_ACCESS_KEY_ID`` in edx-platform's LMS, CMS, envs, and production.py for use by the library django-storages with Amazon S3. + +- ``OPENEDX_AWS_SECRET_ACCESS_KEY`` (default: ``""``) + +This configuration parameter sets the Django setting ``AWS_SECRET_ACCESS_KEY`` in edx-platform's LMS, CMS, envs, and production.py for use by the library django-storages with Amazon S3. + +- ``OPENEDX_MYSQL_DATABASE`` (default: ``"openedx"``) + +This configuration parameter sets the name of the MySQL Database to be used by the Open edX Instance. + +- ``OPENEDX_MYSQL_USERNAME`` (default: ``"openedx"``) + +This configuration parameter sets the username associated with the MySQL Database. + +CMS OAUTH2 SSO +~~~~~~~~~~~~~~ + +- ``CMS_OAUTH2_KEY_SSO`` (default: ``"cms-sso"``) + +This defines the Studio's (CMS) OAUTH 2.0 Login (Key or Client ID) for SSO in the production environment. + +- ``CMS_OAUTH2_KEY_SSO_DEV`` (default: ``"cms-sso-dev"``) + +This defines the Studio's (CMS) OAUTH 2.0 Login (Key or Client ID) for SSO in the development environment. + +For more information, see `Enabling OAuth for Studio login `__. -- ``RUN_CADDY`` (default: ``true``) +JWTs +~~~~ -`Caddy `__ is a web server used in Tutor as a web proxy for the generation of SSL/TLS certificates at runtime. If ``RUN_CADDY`` is set to ``false`` then we assume that SSL termination does not occur in the Caddy container, and thus the ``caddy`` container is not started. +- ``JWT_COMMON_AUDIENCE`` (default: ``"openedx"``) +- ``JWT_COMMON_ISSUER`` (default: ``"{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}/oauth2"``) +- ``JWT_COMMON_SECRET_KEY`` (default: ``"{{ OPENEDX_SECRET_KEY }}"``) -Nginx +These configuration parameters are rendered into the ``JWT_AUTH`` dictionary with keys ``JWT_AUDIENCE``, ``JWT_ISSUER``, and ``JWT_SECRET_KEY``, respectively. These parameters may be changed in order to create a custom user login for testing purposes. + +Vendor services +~~~~~~~~~~~~~~~ + +Caddy ***** -- ``NGINX_HTTP_PORT`` (default: ``80``) +- ``CADDY_HTTP_PORT`` (default: ``80``) +- ``ENABLE_WEB_PROXY`` (default: ``true``) -Nginx is used to route web traffic to the various applications and to serve static assets. When ``RUN_CADDY`` is false, the ``NGINX_HTTP_PORT`` is exposed on the host. +`Caddy `__ is a web server used in Tutor both as a web proxy and for the generation of SSL/TLS certificates at runtime. Port indicated by ``CADDY_HTTP_PORT`` is exposed on the host, in addition to port 443. If ``ENABLE_WEB_PROXY`` is set to ``false`` then we assume that SSL termination does not occur in the Caddy container and only ``CADDY_HTTP_PORT`` is exposed on the host. MySQL ***** @@ -116,13 +218,16 @@ MySQL - ``MYSQL_ROOT_USERNAME`` (default: ``"root"``) - ``MYSQL_ROOT_PASSWORD`` (default: randomly generated) Note that you are responsible for creating the root user if you are using a managed database. -By default, a running Open edX platform deployed with Tutor includes all necessary 3rd-party services, such as MySQL, MongoDb, etc. But it's also possible to store data on a separate database, such as `Amazon RDS `_. For instance, to store data on an external MySQL database, set the following configuration:: +By default, a running Open edX platform deployed with Tutor includes all necessary 3rd-party services, such as MySQL, MongoDb, etc. But it's also possible to store data on a separate database, such as `Amazon RDS `_. For instance, to store data on an external MySQL database set the following configuration:: RUN_MYSQL: false MYSQL_HOST: yourhost MYSQL_ROOT_USERNAME: MYSQL_ROOT_PASSWORD: +.. note:: + When configuring an external MySQL database, please make sure it is using version 8.4. + Elasticsearch ************* @@ -131,22 +236,28 @@ Elasticsearch - ``ELASTICSEARCH_PORT`` (default: ``9200``) - ``ELASTICSEARCH_HEAP_SIZE`` (default: ``"1g"``) -Mongodb +MongoDB ******* - ``RUN_MONGODB`` (default: ``true``) -- ``MONGODB_HOST`` (default: ``"mongodb"``) - ``MONGODB_DATABASE`` (default: ``"openedx"``) +- ``MONGODB_HOST`` (default: ``"mongodb"``) +- ``MONGODB_PASSWORD`` (default: ``""``) - ``MONGODB_PORT`` (default: ``27017``) - ``MONGODB_USERNAME`` (default: ``""``) -- ``MONGODB_PASSWORD`` (default: ``""``) +- ``MONGODB_USE_SSL`` (default: ``false``) +- ``MONGODB_REPLICA_SET`` (default: ``""``) +- ``MONGODB_AUTH_MECHANISM`` (default: ``""``) +- ``MONGODB_AUTH_SOURCE`` (default: ``"admin"``) + +Note that most of these settings will have to be modified to connect to a MongoDB cluster that runs separately of Tutor, such as `Atlas `__. In particular, the authentication source, mechanism and the SSL connection parameters should not be specified as part of the `host URI `__ but as separate Tutor settings. Supported values for ``MONGODB_AUTH_MECHANISM`` are the same as for pymongo (see the `pymongo documentation `__). Redis ***** - ``RUN_REDIS`` (default: ``true``) - ``REDIS_HOST`` (default: ``"redis"``) -- ``REDIS_HOST`` (default: ``6379``) +- ``REDIS_PORT`` (default: ``6379``) - ``REDIS_USERNAME`` (default: ``""``) - ``REDIS_PASSWORD`` (default: ``""``) @@ -157,7 +268,7 @@ SMTP - ``RUN_SMTP`` (default: ``true``) - ``SMTP_HOST`` (default: ``"smtp"``) -- ``SMTP_PORT`` (default: ``25``) +- ``SMTP_PORT`` (default: ``8025``) - ``SMTP_USERNAME`` (default: ``""``) - ``SMTP_PASSWORD`` (default: ``""``) - ``SMTP_USE_TLS`` (default: ``false``) @@ -170,22 +281,40 @@ SSL/TLS certificates for HTTPS access - ``ENABLE_HTTPS`` (default: ``false``) -By activating this feature, a free SSL/TLS certificate from the `Let's Encrypt `_ certificate authority will be created for your platform. With this feature, **your platform will no longer be accessible in HTTP**. Calls to http urls will be redirected to https url. +When ``ENABLE_HTTPS`` is ``true``, the whole Open edX platform will be reconfigured to work with "https" URIs. Calls to "http" URIs will be redirected to "https". By default, SSL/TLS certificates will automatically be generated by Tutor (thanks to `Caddy `__) from the `Let's Encrypt `_ certificate authority. The following DNS records must exist and point to your server:: LMS_HOST (e.g: myopenedx.com) - preview.LMS_HOST (e.g: preview.myopenedx.com) + PREVIEW_LMS_HOST (e.g: preview.myopenedx.com) CMS_HOST (e.g: studio.myopenedx.com) Thus, **this feature will (probably) not work in development** because the DNS records will (probably) not point to your development machine. -The SSL/TLS certificates will automatically be generated and updated by the Caddy proxy server container at runtime. Thus, as of v11.0.0 you no longer have to generate the certificates manually. +If you would like to perform SSL/TLS termination with your own custom certificates, you will have to keep ``ENABLE_HTTPS=true`` and turn off the Caddy load balancing with ``ENABLE_WEB_PROXY=false``. See the corresponding :ref:`tutorial ` for more information. .. _customise: .. _custom_openedx_docker_image: +Kubernetes +~~~~~~~~~~ + +- ``K8S_NAMESPACE`` (default: ``"openedx"``) + +This configuration parameter sets the Kubernetes Namespace. + +Miscellaneous Project Settings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- ``CONTACT_EMAIL`` (default: ``"contact@{{ LMS_HOST }}"``) + +This configuration parameter sets the Contact Email. + +- ``PLATFORM_NAME`` (default: ``"My Open edX"``) + +This configuration parameter sets the Platform Name. + Custom Open edX docker image ---------------------------- @@ -199,17 +328,16 @@ The following sections describe how to modify various aspects of the docker imag tutor local stop -The custom image will be used the next time you run ``tutor local quickstart`` or ``tutor local start``. Do not attempt to run ``tutor local restart``! Restarting will not pick up the new image and will continue to use the old image. +The custom image will be used the next time you run ``tutor local launch`` or ``tutor local start``. Do not attempt to run ``tutor local restart``! Restarting will not pick up the new image and will continue to use the old image. -openedx Docker Image build arguments -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"openedx" Docker image build arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When building the "openedx" Docker image, it is possible to specify a few `arguments `__: -- ``EDX_PLATFORM_REPOSITORY`` (default: ``"https://github.com/edx/edx-platform.git"``) -- ``EDX_PLATFORM_VERSION`` (default: ``"open-release/koa.1"``) -- ``EDX_PLATFORM_VERSION_DATE`` (default: ``"20200227"``) -- ``NPM_REGISTRY`` (default: ``"https://registry.npmjs.org/"``) +- ``EDX_PLATFORM_REPOSITORY`` (default: ``"{{ EDX_PLATFORM_REPOSITORY }}"``) +- ``EDX_PLATFORM_VERSION`` (default: ``"{{ EDX_PLATFORM_VERSION }}"``, which if unset defaults to ``{{ OPENEDX_COMMON_VERSION }}``) +- ``NPM_REGISTRY`` (default: ``"{{ NPM_REGISTRY }}"``) These arguments can be specified from the command line, `very much like Docker `__. For instance:: @@ -218,56 +346,27 @@ These arguments can be specified from the command line, `very much like Docker < Adding custom themes ~~~~~~~~~~~~~~~~~~~~ -Comprehensive theming is enabled by default, but only the default theme is compiled. `Indigo `__ is a better, ready-to-run theme which you can start using today. - -To compile your own theme, add it to the ``env/build/openedx/themes/`` folder:: - - git clone https://github.com/me/myopenedxtheme.git "$(tutor config printroot)/env/build/openedx/themes/myopenedxtheme" - -The ``themes`` folder should have the following structure:: - - openedx/themes/ - mycustomtheme1/ - cms/ - ... - lms/ - ... - mycustomtheme2/ - ... - -Then you must rebuild the openedx Docker image:: - - tutor images build openedx - -Finally, you should enable your theme with the :ref:`settheme command `. +See :ref:`the corresponding tutorial `. .. _custom_extra_xblocks: Installing extra xblocks and requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Would you like to include custom xblocks, or extra requirements to your Open edX platform? Additional requirements can be added to the ``env/build/openedx/requirements/private.txt`` file. For instance, to include the `polling xblock from Opencraft `_:: +Would you like to include custom xblocks, or extra requirements to your Open edX platform? Additional requirements can be added to the ``OPENEDX_EXTRA_PIP_REQUIREMENTS`` parameter in the :ref:`config file `. For instance, to include the `polling xblock from Opencraft `_:: - echo "git+https://github.com/open-craft/xblock-poll.git" >> "$(tutor config printroot)/env/build/openedx/requirements/private.txt" + tutor config save --append OPENEDX_EXTRA_PIP_REQUIREMENTS=git+https://github.com/open-craft/xblock-poll.git Then, the ``openedx`` docker image must be rebuilt:: tutor images build openedx -To install xblocks from a private repository that requires authentication, you must first clone the repository inside the ``openedx/requirements`` folder on the host:: - - git clone git@github.com:me/myprivaterepo.git "$(tutor config printroot)/env/build/openedx/requirements/myprivaterepo" - -Then, declare your extra requirements with the ``-e`` flag in ``openedx/requirements/private.txt``:: - - echo "-e ./myprivaterepo" >> "$(tutor config printroot)/env/build/openedx/requirements/private.txt" - .. _edx_platform_fork: Running a fork of ``edx-platform`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You may want to run your own flavor of edx-platform instead of the `official version `_. To do so, you will have to re-build the openedx image with the proper environment variables pointing to your repository and version:: +You may want to run your own flavor of edx-platform instead of the `official version `_. To do so, you will have to re-build the openedx image with the proper environment variables pointing to your repository and version:: tutor images build openedx \ --build-arg EDX_PLATFORM_REPOSITORY=https://mygitrepo/edx-platform.git \ @@ -279,58 +378,28 @@ Note that your edx-platform version must be a fork of the latest release **tag** If you don't create your fork from this tag, you *will* have important compatibility issues with other services. In particular: -- Do not try to run a fork from an older (pre-Koa) version of edx-platform: this will simply not work. +- Do not try to run a fork from an older (pre-Redwood) version of edx-platform: this will simply not work. - Do not try to run a fork from the edx-platform master branch: there is a 99% probability that it will fail. -- Do not try to run a fork from the open-release/koa.master branch: Tutor will attempt to apply security and bug fix patches that might already be included in the open-release/koa.master but which were not yet applied to the latest release tag. Patch application will thus fail if you base your fork from the open-release/koa.master branch. +- Do not try to run a fork from the open-release/redwood.master branch: Tutor will attempt to apply security and bug fix patches that might already be included in the open-release/redwood.master but which were not yet applied to the latest release tag. Patch application will thus fail if you base your fork from the open-release/redwood.master branch. .. _i18n: -Adding custom translations -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you are not running Open edX in English, chances are that some strings will not be properly translated. In most cases, this is because not enough contributors have helped translate Open edX in your language. It happens! With Tutor, available translated languages include those that come bundled with `edx-platform `__ as well as those from `openedx-i18n `__. - -Tutor offers a relatively simple mechanism to add custom translations to the openedx Docker image. You should create a folder that corresponds to your language code in the "build/openedx/locale" folder of the Tutor environment. This folder should contain a "LC_MESSAGES" folder. For instance:: - - mkdir -p "$(tutor config printroot)/env/build/openedx/locale/fr/LC_MESSAGES" - -The language code should be similar to those used in edx-platform or openedx-i18n (see links above). - -Then, add a "django.po" file there that will contain your custom translations:: - - msgid "String to translate" - msgstr "δ½ ηΏ»θ―‘ηš„δΈœθ₯Ώ la traduction de votre bidule" - -The "String to translate" part should match *exactly* the string that you would like to translate. You cannot make it up! The best way to find this string is to copy-paste it from the `upstream django.po file for the English language `__. - -If you cannot find the string to translate in this file, then it means that you are trying to translate a string that is used in some piece of javascript code. Those strings are stored in a different file named "djangojs.po". You can check it out `in the edx-platform repo as well `__. Your custom javascript strings should also be stored in a "djangojs.po" file that should be placed in the same directory. - -To recap, here is an example. To translate a few strings in French, both from django.po and djangojs.po, we would have the following file hierarchy:: - - $(tutor config printroot)/env/build/openedx/locale/ - fr/ - LC_MESSAGES/ - django.po - djangojs.po - -With django.po containing:: - - msgid "It works! Powered by Open edX{registered_trademark}" - msgstr "Γ‡a marche ! PropulsΓ© by Open edX{registered_trademark}" - -And djangojs.po:: +Getting and customizing Translations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - msgid "%(num_points)s point possible (graded, results hidden)" - msgid_plural "%(num_points)s points possible (graded, results hidden)" - msgstr[0] "%(num_points)s point possible (notΓ©, rΓ©sultats cachΓ©s)" - msgstr[1] "%(num_points)s points possibles (notΓ©s, rΓ©sultats cachΓ©s)" +Tutor builds images with the latest translations using the ``atlas pull`` `command `_. -Then you will have to re-build the openedx Docker image:: +By default the translations are pulled from the `openedx-translations repository `_ +from the ``ATLAS_REVISION`` branch. You can use custom translations on your fork of the openedx-translations repository by setting the following configuration parameters: - tutor images build openedx openedx-dev +- ``ATLAS_REVISION`` (default: ``"main"`` on nightly and ``"{{ OPENEDX_COMMON_VERSION }}"`` if a named release is used) +- ``ATLAS_REPOSITORY`` (default: ``"openedx/openedx-translations"``). There's a feature request to `support GitLab and other providers `_. +- ``ATLAS_OPTIONS`` (default: ``""``) Pass additional arguments to ``atlas pull``. Refer to the `atlas documentations `_ for more information. -Beware that this will take a long time! Unfortunately it's difficult to accelerate this process, as translation files need to be compiled prior to collecting the assets. In development it's possible to accelerate the iteration loop -- but that exercise is left to the reader. +If you are not running Open edX in English (``LANGUAGE_CODE`` default: ``"en"``), chances are that some strings will not be properly translated. In most cases, this is because not enough contributors have helped translate Open edX into your language. It happens! +With ``atlas``, it's possible to add custom translations by either `contributing to the Translations project in Transifex `_ or forking the `openedx-translations repository `_ +and making custom changes as explained in `the repository docs `_. Running a different ``openedx`` Docker image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -343,4 +412,18 @@ By default, Tutor runs the `overhangio/openedx `.) -The customised Docker image tag value will then be used by Tutor to run the platform, for instance when running ``tutor local quickstart``. +The customised Docker image tag value will then be used by Tutor to run the platform, for instance when running ``tutor local launch``. + + +Passing custom docker build options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can set a limited set of Docker build options via ``tutor images build`` command. In some situations it might be necessary to tweak the docker build command, ex- setting up build caching using buildkit. +In these situations, you can set ``--docker-arg`` flag in the ``tutor images build`` command. You can set any `supported options `_ in the docker build command, For example:: + + tutor images build openedx \ + --build-arg BUILDKIT_INLINE_CACHE=1 \ + --docker-arg="--cache-from" \ + --docker-arg="docker.io/myusername/openedx:mytag" + +This will result in passing the ``--cache-from`` option with the value ``docker.io/myusername/openedx:mytag`` to the docker build command. diff --git a/docs/dev.rst b/docs/dev.rst index e2a42995e0..4d6c2249e2 100644 --- a/docs/dev.rst +++ b/docs/dev.rst @@ -5,37 +5,84 @@ Open edX development In addition to running Open edX in production, Tutor can be used for local development of Open edX. This means that it is possible to hack on Open edX without setting up a Virtual Machine. Essentially, this replaces the devstack provided by edX. -The following commands assume you have previously launched a :ref:`local ` Open edX platform. If you have not done so already, you should run:: +For detailed explanations on how to work on edx-platform and its dependencies, see the :ref:`edx_platform` tutorial. - tutor local quickstart +.. _edx_platform_dev_env: -In order to run the platform in development mode, you **must** answer no ("n") to the question "Are you configuring a production platform". +First-time setup +---------------- -Note that the local.overhang.io `domain `__ and its `subdomains `__ all point to 127.0.0.1. This is just a domain name that was setup to conveniently access a locally running Open edX platform. +Firstly, either :ref:`install Tutor ` (for development against the named releases of Open edX) or :ref:`install Tutor Nightly ` (for development against Open edX's master branches). -Once the local platform has been configured, you should stop it so that it does not interfere with the development environment:: +Then, optionally, tell Tutor to use a local fork of edx-platform:: - tutor local stop + tutor mounts add ./edx-platform -Finally, you should build the ``openedx-dev`` docker image:: +Then, launch the developer platform setup process:: tutor images build openedx-dev + tutor dev launch -This ``openedx-dev`` development image differs from the ``openedx`` production image: +This will perform several tasks. It will: -- The user that runs inside the container has the same UID as the user on the host, in order to avoid permission problems inside mounted volumes (and in particular in the edx-platform repository). -- Additional python and system requirements are installed for convenient debugging: `ipython `__, `ipdb `__, vim, telnet. -- The edx-platform `development requirements `__ are installed. +* build the "openedx-dev" Docker image, which is based on the "openedx" production image but is `specialized for developer usage`_ (eventually with your fork), +* stop any existing locally-running Tutor containers, +* disable HTTPS, +* set ``LMS_HOST`` to `local.edly.io `_ (a convenience domain that simply `points at 127.0.0.1 `_), +* prompt for a platform details (with suitable defaults), +* start LMS, CMS, supporting services, and any plugged-in services, +* ensure databases are created and migrated, and +* run service initialization scripts, such as service user creation and Waffle configuration. -Since the ``openedx-dev`` is based upon the ``openedx`` docker image, it should be re-built every time the ``openedx`` docker image is modified. +Additionally, when a local clone of edx-platform is bind-mounted, it will: -Run a local development webserver ---------------------------------- +* re-run setup.py, +* clean-reinstall Node modules, and +* regenerate static assets. -:: +Once setup is complete, the platform will be running in the background: - tutor dev runserver lms # Access the lms at http://local.overhang.io:8000 - tutor dev runserver cms # Access the cms at http://studio.local.overhang.io:8001 +* LMS will be accessible at `http://local.edly.io:8000 `_. +* CMS will be accessible at `http://studio.local.edly.io:8001 `_. +* Plugged-in services should be accessible at their documented URLs. + +Now, use the ``tutor dev ...`` command-line interface to manage the development environment. Some common commands are described below. + +.. note:: + + If you've added your edx-platform to the bind-mounted folders, you can remove at any time by running:: + + tutor mounts remove ./edx-platform + + At any time, check your configuration by running:: + + tutor mounts list + + Read more about bind-mounts :ref:`below `. + +Stopping the platform +--------------------- + +To bring down the platform's containers, simply run:: + + tutor dev stop + +Starting the platform back up +----------------------------- + +Once first-time setup has been performed with ``launch``, the platform can be started going forward with the lighter-weight ``start -d`` command, which brings up containers *detached* (that is: in the background), but does not perform any initialization tasks:: + + tutor dev start -d + +Or, to start with platform with containers *attached* (that is: in the foreground, the current terminal), omit the ``-d`` flag:: + + tutor dev start + +When running containers attached, stop the platform with ``Ctrl+c``, or switch to detached mode using ``Ctrl+z``. + +Finally, the platform can also be started back up with ``launch``. It will take longer than ``start``, but it will ensure that config is applied, databases are provisioned & migrated, plugins are fully initialized, and (if applicable) the bind-mounted edx-platform is set up. Notably, ``launch`` is idempotent, so it is always safe to run it again without risk to data. Including the ``--pullimages`` flag will also ensure that container images are up-to-date:: + + tutor dev launch --pullimages Running arbitrary commands -------------------------- @@ -52,136 +99,160 @@ To open a python shell in the LMS or CMS, run:: You can then import edx-platform and django modules and execute python code. -To collect assets, you can use the ``openedx-assets`` command that ships with Tutor:: +To rebuild assets, you can run the ``build-dev`` NPM script that comes with edx-platform:: - tutor dev run lms openedx-assets --env=dev + tutor dev run lms npm run build-dev -Point to a local edx-platform ------------------------------ -If you have one, you can point to a local version of `edx-platform `_ on your host machine. To do so, pass a ``-v/--volume`` option to the ``run`` and runserver commands. For instance:: +.. _specialized for developer usage: - tutor dev run -v /path/to/edx-platform:/openedx/edx-platform lms bash +Rebuilding the openedx-dev image +-------------------------------- -If you don't want to rewrite this option every time, you can define a command alias:: +The ``openedx-dev`` Docker image is based on the same ``openedx`` image used by ``tutor local ...`` to run LMS and CMS. However, it has a few differences to make it more convenient for developers: - alias tutor-dev-run-lms="tutor dev run -v /path/to/edx-platform:/openedx/edx-platform lms" +- The user that runs inside the container has the same UID as the user on the host, to avoid permission problems inside mounted volumes (and in particular in the edx-platform repository). +- Additional Python and system requirements are installed for convenient debugging: `ipython `__, `ipdb `__, vim, telnet. +- The edx-platform `development requirements `__ are installed. -For technical reasons, the ``-v`` option is only supported for the ``run`` and ``runserver`` commands. With these commands, only one service is started. But there are cases where you may want to launch and debug a complete Open edX platform with ``tutor dev start`` and mount a custom edx-platform fork. For instance, this might be needed when testing the interaction between multiple services. To do so, you should create a ``docker-compose.override.yml`` file that will specify a custom volume to be used with all ``dev`` commands:: - vim "$(tutor config printroot)/env/dev/docker-compose.override.yml" +If you are using a custom ``openedx`` image, then you will need to rebuild ``openedx-dev`` every time you modify ``openedx``. To so, run:: -Then, add the following content:: + tutor images build openedx-dev - version: "3.7" - services: - lms: - volumes: - - /path/to/edx-platform/:/openedx/edx-platform - cms: - volumes: - - /path/to/edx-platform/:/openedx/edx-platform - lms-worker: - volumes: - - /path/to/edx-platform/:/openedx/edx-platform - cms-worker: - volumes: - - /path/to/edx-platform/:/openedx/edx-platform +Alternatively, the image will be automatically rebuilt every time you run:: -This override file will be loaded when running any ``tutor dev ..`` command. The edx-platform repo mounted at the specified path will be automaticall mounted inside all LMS and CMS containers. With this file, you should no longer specify the ``-v`` option from the command line with the ``run`` or ``runserver`` commands. + tutor dev launch -**Note:** containers are built on the Koa release. If you are working on a different version of Open edX, you will have to rebuild the ``openedx`` docker images with the version. See the :ref:`fork edx-platform section `. -Prepare the edx-platform repo -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. _bind_mounts: -In order to run a fork of edx-platform, dependencies need to be properly installed and assets compiled in that repo. To do so, run:: +Sharing directories with containers +----------------------------------- - tutor dev run -v /path/to/edx-platform:/openedx/edx-platform lms bash - pip install --requirement requirements/edx/development.txt - python setup.py install - npm install - openedx-assets build --env=dev +It may sometimes be convenient to mount container directories on the host, for instance: for editing and debugging. Tutor provides different solutions to this problem. -Debug edx-platform -~~~~~~~~~~~~~~~~~~ +.. _persistent_mounts: -To debug a local edx-platform repository, add a ``import ipdb; ipdb.set_trace()`` breakpoint anywhere in your code and run:: +Persistent bind-mounted volumes with ``tutor mounts`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - tutor dev runserver -v /path/to/edx-platform:/openedx/edx-platform lms +``tutor mounts`` is a set of Tutor command to manage bind-mounted host directories. Directories are mounted `both` at build time and run time: -XBlock and edx-platform plugin development ------------------------------------------- +- At build time: some of the host directories will be added the `Docker build context `__. This makes it possible to transparently build a Docker image using a locally checked-out repository. +- At run time: host directories will be bind-mounted in running containers, using either an automatic or a manual configuration. -In some cases you will have to develop features for packages that are pip-installed next to edx-platform. This is quite easy with Tutor. Just add your packages to the ``$(tutor config printroot)/env/build/openedx/requirements/private.txt`` file. To avoid re-building the openedx Docker image at every change, you should add your package in editable mode. For instance:: - echo "-e ./mypackage" >> "$(tutor config printroot)/env/build/openedx/requirements/private.txt" +After some directories have been added with ``tutor mounts add``, all ``tutor dev`` and ``tutor local`` commands will make use of these bind-mount volumes. -The ``requirements`` folder should have the following content:: +Values passed to ``tutor mounts add ...`` can take one of two forms. The first is explicit:: - env/build/openedx/requirements/ - private.txt - mypackage/ - setup.py - ... + tutor mounts add lms:/path/to/edx-platform:/openedx/edx-platform -You will have to re-build the openedx Docker image once:: +The second is implicit:: - tutor images build openedx + tutor mounts add /path/to/edx-platform -You should then run the development server as usual, with ``runserver``. Every change made to the ``mypackage`` folder will be picked up and the development server will be automatically reloaded. +With the explicit form, the value means "bind-mount the host folder /path/to/edx-platform to /openedx/edx-platform in the lms container at run time". -.. _theming: +If you use the explicit format, you will quickly realise that you usually want to bind-mount folders in multiple containers at a time. For instance, you will want to bind-mount the edx-platform repository in the "cms" container, but also the "lms-worker" and "cms-worker" containers. To do that, write instead:: -Customised themes ------------------ + # each service is added to a coma-separated list + tutor mounts add lms,cms,lms-worker,cms-worker:/path/to/edx-platform:/openedx/edx-platform -With Tutor, it's pretty easy to develop your own themes. Start by placing your files inside the ``env/build/openedx/themes`` directory. For instance, you could start from the ``edx.org`` theme present inside the ``edx-platform`` repository:: +This command line is a bit cumbersome. In addition, with this explicit form, the edx-platform repository will *not* be added to the build context at build time. But Tutor can be smart about bind-mounting folders to the right containers in the right place when you use the implicit form of the ``tutor mounts add`` command. For instance, the following implicit form can be used instead of the explicit form above:: - cp -r /path/to/edx-platform/themes/edx.org "$(tutor config printroot)/env/build/openedx/themes/" + tutor mounts add /path/to/edx-platform -.. warning:: - You should not create a soft link here. If you do, it will trigger a ``Theme not found in any of the themes dirs`` error. This is because soft links are not properly resolved from inside docker containers. +With this implicit form, the edx-platform repo will be bind-mounted in the containers at run time, just like with the explicit form. But in addition, the edx-platform will also automatically be added to the Docker image at build time. -Then, run a local webserver:: +To check whether you have used the correct syntax, you should run ``tutor mounts list``. This command will indicate whether your folders will be bind-mounted at build time, run time, or both. For instance:: - tutor dev runserver lms + $ tutor mounts add /path/to/edx-platform + $ tutor mounts list + - name: /path/to/edx-platform + build_mounts: + - image: openedx + context: edx-platform + - image: openedx-dev + context: edx-platform + compose_mounts: + - service: lms + container_path: /openedx/edx-platform + - service: cms + container_path: /openedx/edx-platform + - service: lms-worker + container_path: /openedx/edx-platform + - service: cms-worker + container_path: /openedx/edx-platform + - service: lms-job + container_path: /openedx/edx-platform + - service: cms-job + container_path: /openedx/edx-platform -The LMS can then be accessed at http://local.overhang.io:8000. You will then have to :ref:`enable that theme ` for the development domain names:: +So, when should you *not* be using the implicit form? That would be when Tutor does not know where to bind-mount your host folders. For instance, if you wanted to bind-mount your edx-platform virtual environment located in ``~/venvs/edx-platform``, you should not write ``mounts add ~/venvs/edx-platform``, because that folder would be mounted in a way that would override the edx-platform repository in the container. Instead, you should write:: - tutor dev settheme mythemename local.overhang.io:8000 studio.local.overhang.io:8001 + tutor mounts add lms:~/venvs/edx-platform:/openedx/venv -Re-build development docker image (and compile assets):: +Verify the configuration with the ``list`` command:: - tutor images build openedx-dev + $ tutor mounts list + - name: lms:~/venvs/edx-platform:/openedx/venv + build_mounts: [] + compose_mounts: + - service: lms + container_path: /openedx/venv + +.. note:: Remember to setup your edx-platform repository for development! See :ref:`edx_platform_dev_env`. -Watch the themes folders for changes (in a different terminal):: +Copy files from containers to the local filesystem +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - tutor dev run watchthemes +Sometimes, you may want to modify some of the files inside a container for which you don't have a copy on the host. A typical example is when you want to troubleshoot a Python dependency that is installed inside the application virtual environment. In such cases, you want to first copy the contents of the virtual environment from the container to the local filesystem. To that end, Tutor provides the ``tutor dev copyfrom`` command. First, copy the contents of the container folder to the local filesystem:: -Make changes to some of the files inside the theme directory: the theme assets should be automatically recompiled and visible at http://local.overhang.io:8000. + tutor dev copyfrom lms /openedx/venv ~ -Custom edx-platform settings ----------------------------- +Then, bind-mount that folder back in the container with the ``MOUNTS`` setting (described :ref:`above `):: -By default, tutor settings files are mounted inside the docker images at ``/openedx/edx-platform/lms/envs/tutor/`` and ``/openedx/edx-platform/cms/envs/tutor/``. In the various ``dev`` commands, the default ``edx-platform`` settings module is set to ``tutor.development`` and you don't have to do anything to set up these settings. + tutor mounts add lms:~/venv:/openedx/venv -If, for some reason, you want to use different settings, you will need to pass the ``-e SETTINGS=mycustomsettings`` option to each command. Alternatively, you can define the ``TUTOR_EDX_PLATFORM_SETTINGS`` environment variable. +You can then edit the files in ``~/venv`` on your local filesystem and see the changes live in your "lms" container. -For instance, let's assume you have created the ``/path/to/edx-platform/lms/envs/mysettings.py`` and ``/path/to/edx-platform/cms/envs/mysettings.py`` modules. These settings should be pretty similar to the following files:: +Manual bind-mount to any directory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - $(tutor config printroot)/env/apps/openedx/tutor/lms/development.py - $(tutor config printroot)/env/apps/openedx/tutor/cms/development.py +.. warning:: Manually bind-mounting volumes with the ``--volume`` option makes it difficult to simultaneously bind-mount to multiple containers. Also, the ``--volume`` options are not compatible with ``start`` commands. As an alternative, you should consider following the instructions above: :ref:`persistent_mounts`. -Alternatively, the ``mysettings.py`` files can import the tutor development settings:: +The above solution may not work for you if you already have an existing directory, outside of the "volumes/" directory, which you would like mounted in one of your containers. For instance, you may want to mount your copy of the `edx-platform `__ repository. In such cases, you can simply use the ``-v/--volume`` `Docker option `__:: - # Beginning of mysettings.py - from .tutor.development import * + tutor dev run --volume=/path/to/edx-platform:/openedx/edx-platform lms bash -You should then specify the settings to use on the host:: +Override docker-compose volumes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - export TUTOR_EDX_PLATFORM_SETTINGS=mysettings +.. warning:: While the option described below "works", it will only bind-mount directories at run-time. In many cases you really want to bind-mount directories at build-time. For instance: when working on edx-platform requirements. As an alternative, you should consider following the instructions above: :ref:`persistent_mounts`. + +Adding items to the ``MOUNTS`` setting effectively adds new bind-mount volumes to the ``docker-compose.yml`` files. But you might want to have more control over your volumes, such as adding read-only options, or customising other fields of the different services. To address these issues, you can create a ``docker-compose.override.yml`` file that will specify custom volumes to be used with all ``dev`` commands:: + + vim "$(tutor config printroot)/env/dev/docker-compose.override.yml" + +You are then free to bind-mount any directory to any container. For instance, to mount your own edx-platform fork:: + + services: + lms: + volumes: + - /path/to/edx-platform:/openedx/edx-platform + cms: + volumes: + - /path/to/edx-platform:/openedx/edx-platform + lms-worker: + volumes: + - /path/to/edx-platform:/openedx/edx-platform + cms-worker: + volumes: + - /path/to/edx-platform:/openedx/edx-platform -From then on, all ``dev`` commands will use the ``mysettings`` module. For instance:: +This override file will be loaded when running any ``tutor dev ..`` command. The edx-platform repo mounted at the specified path will be automatically mounted inside all LMS and CMS containers. - tutor dev runserver -v /path/to/edx-platform:/openedx/edx-platform lms +.. note:: + The ``tutor local`` commands load the ``docker-compose.override.yml`` file from the ``$(tutor config printroot)/env/local/docker-compose.override.yml`` directory. One-time jobs from initialisation commands load the ``local/docker-compose.jobs.override.yml`` and ``dev/docker-compose.jobs.override.yml``. diff --git a/docs/cli_download.rst b/docs/download/binary.rst similarity index 82% rename from docs/cli_download.rst rename to docs/download/binary.rst index a71fd60c79..69c1c75ccf 100644 --- a/docs/cli_download.rst +++ b/docs/download/binary.rst @@ -3,4 +3,4 @@ .. parsed-literal:: sudo curl -L "\ https\ ://github.com/overhangio/tutor/releases/download/v\ |tutor_version|/tutor-$(uname -s)_$(uname -m)" -o /usr/local/bin/tutor - sudo chmod 0755 /usr/local/bin/tutor + sudo chmod 0755 /usr/local/bin/tutor \ No newline at end of file diff --git a/docs/download/pip.rst b/docs/download/pip.rst new file mode 100644 index 0000000000..015dce421e --- /dev/null +++ b/docs/download/pip.rst @@ -0,0 +1,3 @@ +.. parsed-literal:: + + pip install "tutor[full]" diff --git a/docs/extra.rst b/docs/extra.rst deleted file mode 100644 index 5da269bb85..0000000000 --- a/docs/extra.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. _extra: - -Extra features -============== - -.. _webui: - -Web UI ------- - -Tutor comes with a web user interface (UI) that allows you to administer your Open edX platform remotely. It's especially convenient for remote administration of the platform. - -Launching the web UI -~~~~~~~~~~~~~~~~~~~~ - -:: - - tutor webui start - -You can then access the interface at http://localhost:3737, or http://youserverurl:3737. - -.. image:: img/webui.png - -All ``tutor`` commands can be executed from this web UI: you just don't need to prefix the commands with ``tutor``. For instance, to deploy a local Open edX instance, run:: - - local quickstart - -instead of ``tutor local quickstart``. - -Authentication -~~~~~~~~~~~~~~ - -**WARNING** Once you launch the web UI, it is accessible by everyone, which means that your Open edX platform is at risk. If you are planning to leave the web UI up for a long time, you should setup a user and password for authentication:: - - tutor webui configure - -.. _mobile: - -Mobile Android application --------------------------- - -With Tutor, you can build an Android mobile application for your platform. To build the application in debug mode, run:: - - tutor android build debug - -The ``.apk`` file will then be available in ``$(tutor config printroot)/data/android``. Transfer it to an Android phone to install the application. You should be able to sign in and view available courses. - -Building a custom Android app -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Android app is built from the `official edx-app-android repository `__. To change this repository or the app version, you can simply build a different docker image with:: - - tutor images build \ - --build-arg ANDROID_APP_REPOSITORY=https://github.com/mycustomfork/edx-app-android \ - --build-arg ANDROID_APP_VERSION=master \ - android - -Releasing an Android app -~~~~~~~~~~~~~~~~~~~~~~~~ - -**Note**: this is an untested feature. - -Releasing an Android app on the Play Store requires to build the app in release mode. To do so, edit the ``$TUTOR_ROOT/config.yml`` configuration file and define the following variables:: - - ANDROID_RELEASE_STORE_PASSWORD - ANDROID_RELEASE_KEY_PASSWORD - ANDROID_RELEASE_KEY_ALIAS - -Then, place your keystore file in ``$(tutor config printroot)/env/android/app.keystore``. Finally, build the application with:: - - tutor android build release - -Customising the Android app -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Customising the application, such as the logo or the background image, is currently not supported. If you are interested by this feature, please tell us about it in the Tutor `discussion forums `_. diff --git a/docs/faq.rst b/docs/faq.rst index bf82a7199a..373519f9c0 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -6,58 +6,40 @@ FAQ What is Tutor? -------------- -Tutor is an open source distribution of `Open edX `_. It uses the original code from the various Open edX repositories, such as `edx-platform `_, `cs_comments_service `_, etc. and packages everything in a way that makes it very easy to install, administer and upgrade Open edX. In particular, all services are run inside Docker containers. +Tutor is an open source distribution of the `Open edX platform `_. It uses the original code from the various Open edX repositories, such as `edx-platform `_, `cs_comments_service `_, etc. and packages everything in a way that makes it very easy to install, administer and upgrade an Open edX installation. In particular, all services are run inside Docker containers. -Tutor makes it possible to deploy Open edX locally, with `docker-compose `_ or on an existing `Kubernetes cluster `_. +Tutor makes it possible to deploy an Open edX instance locally, with `docker-compose `_ or on an existing `Kubernetes cluster `_. Want to learn more? Take a look at the :ref:`getting started concepts `. What is the purpose of Tutor? ----------------------------- -To make it possible to deploy, administer and upgrade Open edX anywhere, easily. +To make it possible to deploy, administer and upgrade an Open edX installation anywhere, easily. .. _native: -What's the difference with the official "native" installation? --------------------------------------------------------------- - -The `native installation `_ maintained by edX relies on `Ansible scripts `_ to deploy Open edX on one or multiple servers. These scripts suffer from a couple issues that Tutor tries to address: - -1. Complexity: the scripts contain close to 35k lines of code spread over 780 files. They are really hard to understand, debug, and modify, and they are extremly slow. As a consequence, Open edX is often wrongly perceived as a project that is overly complex to manage. In contrast, Tutor generates mostly ``Dockerfile`` and ``docker-compose.yml`` files that make it easy to understand what is going on. Also, the whole installation should take about 10 minutes. -2. Isolation from the OS: Tutor barely needs to touch your server because the entire platform is packaged inside Docker containers. You are thus free to run other services on your server without fear of indirectly crashing your Open edX platform. -3. Compatibility: Open edX is only compatible with Ubuntu 16.04, but that shouldn't mean you are forced to run this specific OS. With Tutor, you can deploy Open edX on just any server you like: Ubuntu 18.04, Red Hat, Debian... All docker-compatible platforms are supported. -4. Security: because you are no longer bound to a single OS, with Tutor you are now free to install security-related upgrades as soon as they become available. -5. Portability: Tutor makes it easy to move your platform from one server to another. Just zip-compress your Tutor project root, send it to another server and you're done. - -There are also many features that are not included in the native installation, such as a :ref:`web user interface ` for remotely installing the platform, :ref:`Kubernetes deployment `, additional languages, etc. You'll discover these differences as you explore Tutor :) - -What's the difference with the official devstack? +What's the difference between Tutor and Devstack? ------------------------------------------------- -The `devstack `_ is meant for development only, not for production deployment. Tutor can be used both for production deployment and :ref:`locally hacking on Open edX `. +The `devstack `_ was a tool meant only for local development; it is now deprecated, in favor of Tutor. Tutor can be used both for production deployment and :ref:`locally hacking on Open edX `. -Is Tutor officially supported by edX? -------------------------------------- +Is Tutor officially supported by the Open edX project? +------------------------------------------------------ -No. Tutor is developed independently from edX. That means that the folks at edX.org are *not* responsible for troubleshooting issues of this project. Please don't bother Ned ;-) +As of the Open edX Maple release (December 9th 2021), Tutor is the only community-supported installation method for the Open edX platform: see the `official installation instructions `__. What features are missing from Tutor? ------------------------------------- -Tutor tries very hard to support all major Open edX features, notably in the form of :ref:`plugins `. In particular, the discovery and ecommerce services, once unavailable in Tutor, can now be easily installed via plugins. If you are interested in sponsoring the development of a new plugin, please `get in touch `__! +Tutor tries very hard to support all major Open edX platform features, notably in the form of :ref:`plugins `. If you are interested in sponsoring the development of a new plugin, please `get in touch `__! -It should be noted that the `Analytics `__ stack is currently unsupported, and will likely stay so in the future, as it would require a tremendous amount of work to containerize all the components. For generating great-looking analytics reports, we recommend the `Figures plugin `__. +It should be noted that the `Insights `__ stack is currently unsupported, because of its complexity, lack of support, and extensibility. To replace it, Edly has developed `Cairn `__ the next-generation analytics solution for the Open edX platform, and the Open edX community is working on a new analytics project, in beta as of the Redwood release, called `Aspects `_. You should check them out πŸ˜‰ Are there people already running this in production? ---------------------------------------------------- -Yes, many of them. There is no way to count precisely how many running Open edX platforms were deployed with Tutor, but from feedback collected directly from real users, there must be dozens, if not hundreds. Tutor is also used by some Open edX providers who are hosting platforms for their customers. - -Why should I trust software written by some random guy on the Internet? ------------------------------------------------------------------------ +Yes: system administrators all around the world use Tutor to run their Open edX platforms, from single-class school teachers to renowned universities, Open edX SaaS providers, and nation-wide learning platforms. -You shouldn't :) Tutor is actively maintained by `Overhang.IO `_, a France-based company founded by `RΓ©gis Behmo `_. RΓ©gis has been working on Tutor since early 2018; he has been a contributor of the Open edX project since 2015. In particular, he has worked for 2 years at `FUN-MOOC `_, one of the top 5 largest Open edX platforms in the world. He presented several talks at the Open edX conferences: +Why should I trust your software? +--------------------------------- -- *Deploying a robust, scalable Open edX platform in 1 click (or less) with Tutor*, March 2019 (`video `_, `slides `_) -- *Videofront: a Self-Hosted YouTube*, June 2017 (`video `__, `slides `__) -- *Open edX 101: A Source Code Review*, June 2016 (`video `__, `slides `__) -- *FUN: Life in the Avant-Garde*, Oct. 2015 (`video `__, `slides `__) +You shouldn't :) Tutor is actively maintained by `Edly `__, a US-based ed-tech company facilitating over 40 million learners worldwide through its eLearning solutions. With a credible engineering team that has won clients' hearts globally led by `RΓ©gis Behmo `__, Tutor has empowered numerous edtech ventures over the years. Additionally, Tutor is a `community-led project `__ with many contributions from its :ref:`project maintainers `. diff --git a/docs/gettingstarted.rst b/docs/gettingstarted.rst index b33914dbe8..3341091c78 100644 --- a/docs/gettingstarted.rst +++ b/docs/gettingstarted.rst @@ -5,8 +5,8 @@ Getting started .. toctree:: :maxdepth: 2 - + + install intro quickstart - install - whatnext \ No newline at end of file + whatnext diff --git a/docs/img/favicon.png b/docs/img/favicon.png index 662c44611f..3e169ce6ee 100644 Binary files a/docs/img/favicon.png and b/docs/img/favicon.png differ diff --git a/docs/img/launch.webp b/docs/img/launch.webp new file mode 100644 index 0000000000..1520accd15 Binary files /dev/null and b/docs/img/launch.webp differ diff --git a/docs/img/logo.png b/docs/img/logo.png deleted file mode 100644 index fa447e3eda..0000000000 Binary files a/docs/img/logo.png and /dev/null differ diff --git a/docs/img/portainer.png b/docs/img/portainer.png new file mode 100644 index 0000000000..7b9b6992a3 Binary files /dev/null and b/docs/img/portainer.png differ diff --git a/docs/img/quickstart.gif b/docs/img/quickstart.gif deleted file mode 100644 index 901f9e50de..0000000000 Binary files a/docs/img/quickstart.gif and /dev/null differ diff --git a/docs/img/tutor-logo.png b/docs/img/tutor-logo.png deleted file mode 100644 index 59ea2c4237..0000000000 Binary files a/docs/img/tutor-logo.png and /dev/null differ diff --git a/docs/img/webui.png b/docs/img/webui.png deleted file mode 100644 index 4f52c60058..0000000000 Binary files a/docs/img/webui.png and /dev/null differ diff --git a/docs/index.rst b/docs/index.rst index c9d7b089ac..4efaf54ff6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,10 +1,13 @@ +Tutor: the Docker-based Open edX distribution designed for peace of mind +======================================================================== + .. include:: ../README.rst :start-after: _readme_intro_start: :end-before: _readme_intro_end: - -.. image:: ./img/quickstart.gif - :alt: Tutor local quickstart - :target: https://terminalizer.com/view/91b0bfdd557 + +.. image:: ./img/launch.webp + :alt: Tutor local launch + :target: https://www.terminalizer.com/view/3a8d55835686 ---------------------------------- @@ -22,12 +25,23 @@ gettingstarted run configuration - plugins - extra + plugins/index + reference/index + tutorials/index troubleshooting tutor faq + +.. toctree:: + :maxdepth: 2 + :caption: Project links + + Source code + Community forums + Pypi releases + Changelog + Source code ----------- @@ -36,11 +50,11 @@ The complete source code for Tutor is available on Github: https://github.com/ov .. include:: ../README.rst :start-after: _readme_support_start: :end-before: _readme_support_end: - + .. include:: ../README.rst :start-after: _readme_contributing_start: :end-before: _readme_contributing_end: - + License ------- @@ -48,6 +62,6 @@ This work is licensed under the terms of the `GNU Affero General Public License The AGPL license covers the Tutor code, including the Dockerfiles, but not the content of the Docker images which can be downloaded from https://hub.docker.com. Software other than Tutor provided with the docker images retain their original license. -The Tutor plugin system is licensed under the terms of the `Apache License, Version 2.0 `__. +The Tutor plugin and hooks system is licensed under the terms of the `Apache License, Version 2.0 `__. -The :ref:`Tutor Web UI ` depends on the `Gotty `_ binary, which is provided under the terms of the `MIT license `_. +Β© 2021 Tutor is a registered trademark of SASU NULI NULI. All Rights Reserved. diff --git a/docs/install.rst b/docs/install.rst index 4d27a0d019..b5d8ccadb3 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -1,23 +1,24 @@ .. _install: -Install Tutor -============= +Installing Tutor +================ .. _requirements: Requirements ------------ -* Supported OS: Tutor runs on any 64-bit, UNIX-based system. It was also reported to work on Windows. +* Supported OS: Tutor runs on any 64-bit, UNIX-based OS. It was also reported to work on Windows (with `WSL 2 `__). +* Architecture: Both AMD64 and ARM64 are supported. * Required software: - - `Docker `__: v18.06.0+ - - `Docker Compose `__: v1.22.0+ + - `Docker `__: v24.0.5+ (with BuildKit 0.11+) + - `Docker Compose `__: v2.0.0+ .. warning:: Do not attempt to simply run ``apt-get install docker docker-compose`` on older Ubuntu platforms, such as 16.04 (Xenial), as you will get older versions of these utilities. -* Ports 80 and 443 should be open. If other web services run on these ports, check the section on :ref:`how to setup a web proxy `. +* Ports 80 and 443 should be open. If other web services run on these ports, check the tutorial on :ref:`how to setup a web proxy `. * Hardware: - Minimum configuration: 4 GB RAM, 2 CPU, 8 GB disk space @@ -26,56 +27,56 @@ Requirements .. note:: On Mac OS, by default, containers are allocated 2 GB of RAM, which is not enough. You should follow `these instructions from the official Docker documentation `__ to allocate at least 4-5 GB to the Docker daemon. If the deployment fails because of insufficient memory during database migrations, check the :ref:`relevant section in the troubleshooting guide `. -.. _install_binary: +Download +-------- -Direct binary download ----------------------- +Choose **one** of the installation methods below. If you install Tutor in different ways, you will end up with multiple ``tutor`` executables, which is going to be very confusing. At any time, you can check the path to your ``tutor`` executable by running ``which tutor``. -The latest binaries can be downloaded from https://github.com/overhangio/tutor/releases. From the command line: +Python package +~~~~~~~~~~~~~~ -.. include:: cli_download.rst +.. include:: download/pip.rst -This is the simplest and recommended installation method for most people. Note however that you will not be able to use custom plugins with this pre-compiled binary. The only plugins you can use with this approach are those that are already bundled with the binary: see the :ref:`existing plugins `. +Check the "tutor" package on Pypi: https://pypi.org/project/tutor. You will need Python >= 3.6 with pip and the libyaml development headers. On Ubuntu, these requirements can be installed by running:: -.. _install_source: + sudo apt install python3 python3-pip libyaml-dev -Alternative installation methods --------------------------------- +.. _install_binary: -If you would like to inspect the Tutor source code, you are most welcome to install Tutor from `Pypi `_ or directly from `the Github repository `_. You will need python >= 3.6 with pip and the libyaml development headers. On Ubuntu, these requirements can be installed by running:: +Binary release +~~~~~~~~~~~~~~ - sudo apt install python3 python3-pip libyaml-dev +The latest binaries can be downloaded from https://github.com/overhangio/tutor/releases. From the command line: -Installing from pypi -~~~~~~~~~~~~~~~~~~~~ +.. include:: download/binary.rst -:: +This is the simplest and recommended installation method for most people who do not have Python 3 on their machine. Note however that **you will not be able to use custom plugins** with this pre-compiled binary. The only plugins you can use with this approach are those that are already bundled with the binary: see the :ref:`existing plugins `. - pip install tutor-openedx +.. _install_source: Installing from source ~~~~~~~~~~~~~~~~~~~~~~ -:: +To inspect the Tutor source code, install Tutor from `the Github repository `__:: git clone https://github.com/overhangio/tutor cd tutor pip install -e . -DNS records ------------ +Configuring DNS records +----------------------- -When running a server in production, it is necessary to define `DNS records `__ which will make it possible to access your Open edX platform by name in your browser. The precise procedure to create DNS records vary from one provider to the next and is beyond the scope of these docs. You should create a record of type A with a name equal to your LMS hostname (given by ``tutor config printvalue LMS_HOST``) and a value that indicates the IP address of your server. Applications other than the LMS, such as the studio, ecommerce, etc. typically reside in subdomains of the LMS. Thus, you should also create a CNAME record to point all subdomains of the LMS to the LMS_HOST. +When running a server in production, it is necessary to define `DNS records `__ which will make it possible to access your Open edX platform by name in your browser. The precise procedure to create DNS records varies from one provider to the next and is beyond the scope of these docs. You should create a record of type A with a name equal to your LMS hostname (given by ``tutor config printvalue LMS_HOST``) and a value that indicates the IP address of your server. Applications other than the LMS, such as the studio, ecommerce, etc. typically reside in subdomains of the LMS. Thus, you should also create a CNAME record to point all subdomains of the LMS to the LMS_HOST. -For instance, the demo Open edX server that runs at http://demo.openedx.overhang.io has the following DNS records:: +For instance, to run an Open edX server at https://learn.mydomain.com on a server with IP address 1.1.1.1, you would need to configure the following DNS records:: - demo.openedx 1800 IN A 172.105.89.208 - *.demo.openedx 1800 IN CNAME demo.openedx.overhang.io. + learn 1800 IN A 1.1.1.1 + *.learn 1800 IN CNAME learn.mydomain.com. .. _cloud_install: Zero-click AWS installation -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------- Tutor can be launched on Amazon Web Services very quickly with the `official Tutor AMI `__. Shell access is not required, as all configuration will happen through the Tutor web user interface. For detailed installation instructions, we recommend watching the following video: @@ -86,42 +87,105 @@ Tutor can be launched on Amazon Web Services very quickly with the `official Tut Upgrading --------- -With Tutor, it is very easy to upgrade to a more recent Open edX or Tutor release. Just install the latest ``tutor`` version (using either methods above) and run the ``quickstart`` command again. If you have :ref:`customised ` your docker images, you will have to re-build them prior to running ``quickstart``. +To upgrade your Open edX site or benefit from the latest features and bug fixes, you should simply upgrade Tutor. Start by backing up your data and reading the `release notes `_ for the current release. + +Next, upgrade the "tutor" package and its dependencies:: + + pip install --upgrade "tutor[full]" + +Then run the ``launch`` command again. Depending on your deployment target, run one of:: -``quickstart`` should take care of automatically running the upgrade process. If for some reason you need to *manually* upgrade from an Open edX release to the next, you should run ``tutor local upgrade``. For instance, to upgrade from Juniper to Koa, run:: + tutor local launch # for local installations + tutor dev launch # for local development installations + tutor k8s launch # for Kubernetes installation - tutor local upgrade --from=juniper +Upgrading with custom Docker images +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you run :ref:`customised ` Docker images, you need to rebuild them before running ``launch``:: + + tutor config save + tutor images build all # specify here the images that you need to build + tutor local launch + +Upgrading to a new Open edX release +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Major Open edX releases are published twice a year, in June and December, by the Open edX `Build/Test/Release working group `__. When a new Open edX release comes out, Tutor gets a major version bump (see :ref:`versioning`). Such an upgrade typically includes multiple breaking changes. Any upgrade is final because downgrading is not supported. Thus, when upgrading your platform from one major version to the next, it is strongly recommended to do the following: + +1. Read the changes listed in the `CHANGELOG.md `__ file. Breaking changes are identified by a "πŸ’₯". +2. Perform a backup (see the :ref:`backup tutorial `). On a local installation, this is typically done with:: + + tutor local stop + sudo rsync -avr "$(tutor config printroot)"/ /tmp/tutor-backup/ + +3. If you created custom plugins, make sure that they are compatible with the newer release. +4. Test the new release in a sandboxed environment. +5. If you are running edx-platform, or some other repository from a custom branch, then you should rebase (and test) your changes on top of the latest release tag (see :ref:`edx_platform_fork`). + +The process for upgrading from one major release to the next works similarly to any other upgrade, with the ``launch`` command (see above). The single difference is that if the ``launch`` command detects that your tutor environment was generated with an older release, it will perform a few release-specific upgrade steps. These extra upgrade steps will be performed just once. But they will be ignored if you updated your local environment (for instance: with ``tutor config save``) before running ``launch``. This situation typically occurs if you need to re-build some Docker images (see above). In such a case, you should make use of the ``upgrade`` command. For instance, to upgrade a local installation from Quince to Redwood and rebuild some Docker images, run:: + + tutor config save + tutor images build all # list the images that should be rebuilt here + tutor local upgrade --from=quince + tutor local launch + + +Running older releases of Open edX +------------------------------------- + +Instructions for installing the appropriate Tutor version for older Open edX releases. Each command ensures compatibility between Open edX and its corresponding Tutor version. For more details on versioning conventions in Tutor, see the :ref:`versioning` section. + ++-------------------+---------------+--------------------------------------------+ +| Open edX Release | Tutor version | Installation command | ++===================+===============+============================================+ +| Juniper | v10 | pip install 'tutor[full]>=10.0.0,<11.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Koa | v11 | pip install 'tutor[full]>=11.0.0,<12.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Lilac | v12 | pip install 'tutor[full]>=12.0.0,<13.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Maple | v13 | pip install 'tutor[full]>=13.0.0,<14.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Nutmeg | v14 | pip install 'tutor[full]>=14.0.0,<15.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Olive | v15 | pip install 'tutor[full]>=15.0.0,<16.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Palm | v16 | pip install 'tutor[full]>=16.0.0,<17.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Quince | v17 | pip install 'tutor[full]>=17.0.0,<18.0.0' | ++-------------------+---------------+--------------------------------------------+ +| Redwood | v18 | pip install 'tutor[full]>=18.0.0,<19.0.0' | ++-------------------+---------------+--------------------------------------------+ .. _autocomplete: -Autocomplete ------------- +Shell autocompletion +-------------------- -Tutor is built on top of `Click `_, which is a great library for building command line interface (CLI) tools. As such, Tutor benefits from all Click features, including `auto-completion `_. After installing Tutor, auto-completion can be enabled by running:: +Tutor is built on top of `Click `_, which is a great library for building command line interface (CLI) tools. As such, Tutor benefits from all Click features, including `auto-completion `_. After installing Tutor, auto-completion can be enabled in bash by running:: - _TUTOR_COMPLETE=source tutor >> ~/.bashrc + _TUTOR_COMPLETE=bash_source tutor >> ~/.bashrc If you are running zsh, run instead:: - _TUTOR_COMPLETE=source_zsh tutor >> ~/.zshrc + _TUTOR_COMPLETE=zsh_source tutor >> ~/.zshrc After opening a new shell, you can test auto-completion by typing:: tutor -.. include:: podman.rst - Uninstallation -------------- -It is fairly easy to completely uninstall Tutor and to delete the Open edX platforms that is running locally. +It is fairly easy to completely uninstall Tutor and to delete the Open edX platforms that are running locally. -First of all, stop any locally-running platform:: +First of all, stop any locally-running platform and remove all Tutor containers:: - tutor local stop - tutor dev stop + tutor local dc down --remove-orphans + tutor dev dc down --remove-orphans -Then, delete all data associated to your Open edX platform:: +Then, delete all data associated with your Open edX platform:: # WARNING: this step is irreversible sudo rm -rf "$(tutor config printroot)" @@ -129,7 +193,13 @@ Then, delete all data associated to your Open edX platform:: Finally, uninstall Tutor itself:: # If you installed tutor from source - pip uninstall tutor-openedx + pip uninstall tutor # If you downloaded the tutor binary sudo rm /usr/local/bin/tutor + + # Optionally, you may want to remove Tutor plugins installed. + # You can get a list of the installed plugins: + pip freeze | grep tutor + # You can then remove them using the following command: + pip uninstall diff --git a/docs/intro.rst b/docs/intro.rst index c6b4a23d85..ee96c66439 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -6,7 +6,7 @@ Concepts What is Open edX? ----------------- -`Open edX `_ is a thriving open source project, backed by a great community, for running an online learning platform at scale. Open edX comes with an LMS (Learning Management System) where students access course contents, a CMS (Content Management System) that course staff uses to design courses, and a few other components to provide more services to students, course staff and platform administrators. +`Open edX `_ is a thriving open source project, backed by a great community, for running an online learning platform at scale. Open edX comes with an LMS (Learning Management System) where students access course contents, a CMS (Content Management System) that course staff uses to design courses, and a few other components to provide more services to students, course staff, and platform administrators. Should I use Open edX? ---------------------- @@ -18,80 +18,120 @@ Open edX competitors include `Moodle `__, `Instructure's Ca * Multiple extension points for comprehensive customization * Modern, intuitive user interface to keep students engaged -Open edX is a safe bet: it is backed by edX.org, a US-based non-profit that is committed to open source and which runs Open edX to service its millions of learners. With Open edX you can be sure that the features you need will be available. If it's good enough for Harvard, the MIT or the French government, then it will probably also work for you. +Open edX is a safe bet: it is backed by edX.org, a US-based non-profit that is committed to open source and which runs Open edX to service its millions of learners. With Open edX you can be sure that the features you need will be available. If it's good enough for Harvard, the MIT, or the French government, then it will probably also work for you. Should I self-host Open edX or rely on a hosting provider? ---------------------------------------------------------- Third-party Open edX providers can provide you with custom, closed-source features that they developed internally. However, their pricing is usually per-seat: that makes it difficult to predict how much running Open edX will actually cost you if you don't know in advance how many students will connect to your platform. And once you start scaling up and adding many students, running the platform will become very expensive. -On the other hand, running Open edX on your own servers will help you keep your costs under control. Because you own your servers and data, you will always be able to migrate your platform, either to a different cloud provider or an Open edX service provider. This is the true power of open source. +On the other hand, running Open edX on your own servers will help you keep your costs under control. Because you own your servers and data, you will always be able to migrate your platform, either to a different cloud provider or an Open edX service provider. This is the true power of the open source. Should I use Tutor? ------------------- -Running software on premises is cheaper only if your management costs don't go through the roof. You do not want to hire a full-time devops team just for managing your online learning platform. This is why we created Tutor: to make it easy to run a state-of-the-art online learning platform without breaking the bank. Historically, it's always been difficult to install Open edX with the native installation scripts. For instance, there are no official instructions for upgrading an existing Open edX platform: the `recommended approach `__ is to backup all data, trash the server and create a new one. As a consequence, people tend not to upgrade their platform and keep running on deprecated releases. Tutor makes it possible even to non-technical users to launch, manage and upgrade Open edX at any scale. Should you choose at some point that Tutor is not the right solution for you, you always have an escape route: because Tutor is open source software, you can easily dump your data and switch to a different installation method. But we are confident you will not do that πŸ˜‰ +Running software on-premises is cheaper only if your management costs don't go through the roof. You do not want to hire a full-time devops team just for managing your online learning platform. This is why we created Tutor: to make it easy to run a state-of-the-art online learning platform without breaking the bank. Historically, it's always been difficult to install Open edX with native installation scripts. For instance, there are no official instructions for upgrading an existing Open edX platform: the `recommended approach `__ is to backup all data, trash the server, and create a new one. As a consequence, people tend not to upgrade their platform and keep running on deprecated releases. Tutor makes it possible even for non-technical users to launch, manage and upgrade Open edX at any scale. Should you choose at some point that Tutor is not the right solution for you, you always have an escape route: because Tutor is open source software, you can easily dump your data and switch to a different installation method. But we are confident you will not do that πŸ˜‰ To learn more about Tutor, watch this 7-minute lightning talk that was made at the 2019 Open edX conference in San Diego, CA (with `slides `_): .. youtube:: Oqc7c-3qFc4 -How does Tutor work, technically speaking? ------------------------------------------- +How does Tutor simplify Open edX deployment? +-------------------------------------------- Tutor simplifies the deployment of Open edX by: 1. Separating the configuration logic from the deployment platforms. 2. Running application processes in cleanly separated `docker containers `_. 3. Providing user-friendly, reliable commands for common administration tasks, including upgrades and monitoring. -4. Using a simple :ref:`plugin system ` that makes it easy to extend and customize Open edX. +4. Using a simple :ref:`plugin system ` that makes it easy to extend and customise Open edX. .. image:: https://overhang.io/static/img/openedx-plus-docker-is-tutor.png :alt: Open edX + Docker = Tutor :width: 500px :align: center -Because Docker containers are becoming an industry-wide standard, that means that with Tutor it becomes possible to run Open edX anywhere: for now, Tutor supports deploying on a local server, with `docker-compose `_, and in a large cluster, with `Kubernetes `_. But in the future, Tutor may support other deployment platforms. +Because Docker containers are becoming an industry-wide standard, that means that with Tutor it becomes possible to run Open edX anywhere: for now, Tutor supports deploying on a local server, with `docker compose `_, and in a large cluster, with `Kubernetes `_. But in the future, Tutor may support other deployment platforms. Where can I try Open edX and Tutor? ----------------------------------- -A demo Open edX platform is available at https://demo.openedx.overhang.io. This platform was deployed using Tutor and the `Indigo theme `__. Feel free to play around with the following credentials: +A sandbox Open edX platform is available at https://sandbox.openedx.edly.io. This platform was deployed using Tutor and the `Indigo theme `__. Feel free to play around with the following credentials: * Admin user: username=admin email=admin@overhang.io password=admin * Student user: username=student email=student@overhang.io password=student -The Android mobile application for this website can be downloaded at this url: http://demo.openedx.overhang.io/static/mobile/app.apk +The Android mobile application for this demo platform can be downloaded at this url: https://mobile.sandbox.openedx.edly.io/app.apk Urls: -* LMS: https://demo.openedx.overhang.io -* Analytics (from the `Figures plugin `__): https://demo.openedx.overhang.io/figures -* Studio (CMS): https://studio.demo.openedx.overhang.io +* LMS: https://sandbox.openedx.edly.io +* Studio (CMS): https://studio.sandbox.openedx.edly.io The platform is reset every day at 9:00 AM, `Paris (France) time `__, so feel free to try and break things as much as you want. +.. _how_does_tutor_work: + How does Tutor work? -------------------- +Tutor is a piece of software that takes care of exactly three things: + +1. Project configuration: user-specific settings (such as secrets) are stored in a single ``config.yml`` file. +2. Template rendering: all the files that are necessary to run your platform are generated from a set of templates and user-specific settings. +3. Command-line interface (CLI): frequently-used administration commands are gathered in a convenient, unified CLI. + You can experiment with Tutor very quickly: start by `installing `_ Tutor. Then run:: - tutor config save --interactive + $ tutor config save --interactive + +Then, to view the result of the above command:: + + $ cd "$(tutor config printroot)" + $ ls + config.yml env + +The ``config.yml`` file contains your user-specific Open edX settings (item #1 above). The ``env/`` folder contains the rendered templates which will be used to run your Open edX platform (item #2). For instance, the ``env/local`` folder contains the ``docker-compose.yml`` file to run Open edX locally. + +The values from ``config.yml`` are used to generate the environment files in ``env/``. As a consequence, **every time the values from** ``config.yml`` **are modified, the environment must be regenerated** with ``tutor config save``.. + +Because the Tutor environment is generated entirely from the values in ``config.yml``, you can ``rm -rf`` the ``env/`` folder at any time and re-create it with ``tutor config save``. Another consequence is that **any manual change made to a file in** ``env/`` **will be overwritten by** ``tutor config save`` **commands**. Consider yourself warned! + +You can now take advantage of the Tutor-powered CLI (item #3) to bootstrap your Open edX platform:: + + tutor local launch + +Under the hood, Tutor simply runs ``docker compose`` and ``docker`` commands to launch your platform. These commands are printed in the standard output, such that you are free to replicate the same behaviour by simply copying/pasting the same commands. + +How do I navigate Tutor's command-line interface? +------------------------------------------------- + +Tutor commands are structured in an easy-to-follow hierarchy. At the top level, there are command trees for image and configuration management:: + + tutor config ... + tutor images ... + +as well as command trees for each mode in which Tutor can run:: + + tutor local ... # Commands for managing a local Open edX deployment. + tutor k8s ... # Commands for managing a Kubernetes Open edX deployment. + tutor dev ... # Commands for hacking on Open edX in development mode. + +Within each mode, Tutor has subcommands for managing that type of Open edX instance. Many of them are common between modes, such as ``launch``, ``start``, ``stop``, ``exec``, and ``logs``. For example:: -This command does two things: + tutor local logs # View logs of a local deployment. + tutor k8s logs # View logs of a Kubernetes-managed deployment. + tutor dev logs # View logs of a development platform. -1. Generate a ``config.yml`` configuration file: this file contains core :ref:`configuration parameters ` for your Open edX platforms, such as passwords and feature flags. -2. Generate an ``env/`` folder, which we call the Tutor "environment", and which contains all the files that are necessary to run an Open edX platform: these are mostly Open edX configuration files. +Many commands can be further parameterized to specify their target and options, for example:: -All these files are stored in a single folder, called the Tutor project root. On Linux, this folder is in ``~/.local/share/tutor``. On Mac OS it is ``~/Library/Application Support/tutor``. + tutor local logs cms # View logs of the CMS container in a local deployment. + tutor k8s logs mysql # View logs of MySQL in Kubernetes-managed deployment. + tutor dev logs lms --tail 10 # View ten lines of logs of the LMS container in development mode. -The values from ``config.yml`` are used to generate the environment files in ``env/``. As a consequence, **every time the values from** ``config.yml`` **are modified, the environment must be regenerated**. This can be done with:: - - tutor config save - -Another consequence is that **any manual change made to a file in** ``env/`` **will be overwritten by** ``tutor config save`` **commands**. Consider yourself warned! +And that's it! You do not need to understand Tutor's entire command-line interface to get started. Using the ``--help`` option that's available on every command, it is easy to learn as you go. For an in-depth guide, you can also explore the `CLI Reference `_. I'm ready, where do I start? ---------------------------- -Right :ref:`here `! \ No newline at end of file +Right :ref:`here `! diff --git a/docs/k8s.rst b/docs/k8s.rst index 087d93da40..ee57ea6154 100644 --- a/docs/k8s.rst +++ b/docs/k8s.rst @@ -20,24 +20,36 @@ Tutor was tested with server version 1.14.1 and client 1.14.3. Memory ~~~~~~ -In the following, we assume you have access to a working Kubernetes cluster. `kubectl` should use your cluster configuration by default. To launch a cluster locally, you may try out Minikube. Just follow the `official installation instructions `_. +In the following, we assume you have access to a working Kubernetes cluster. ``kubectl`` should use your cluster configuration by default. To launch a cluster locally, you may try out Minikube. Just follow the `official installation instructions `__. -The Kubernetes cluster should have at least 4Gb of RAM on each node. When running Minikube, the virtual machine should have that much allocated memory. See below for an example with VirtualBox: +The Kubernetes cluster should have at least 4Gb of RAM on each node. When running Minikube, the virtual machine should have that much-allocated memory. See below for an example with VirtualBox: .. image:: img/virtualbox-minikube-system.png :alt: Virtualbox memory settings for Minikube -Ingress controller and SSL/TLS certificates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load Balancer and SSL/TLS certificates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As of Tutor v11, it is no longer required to setup an Ingress controller to access your platform. Instead Caddy exposes a LoadBalancer service and SSL/TLS certificates are transparently generated at runtime. +By default, Tutor deploys a `LoadBalancer `__ service that exposes the Caddy deployment to the outside world. As in the local installation, this service is responsible for transparently generating SSL/TLS certificates at runtime. You will need to point your DNS records to this LoadBalancer object before the platform can work correctly. Thus, you should first start the Caddy load balancer, with:: -S3-like object storage with `MinIO `_ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + tutor k8s start caddy -Like many web applications, Open edX needs to persist data. In particular, it needs to persist files uploaded by students and course designers. In the local installation, these files are persisted to disk, on the host filesystem. But on Kubernetes, it is difficult to share a single filesystem between different pods. This would require persistent volume claims with `ReadWriteMany` access mode, and these are difficult to setup. +Get the external IP of this service:: -Luckily, there is another solution: at `edx.org `_, uploaded files are persisted on AWS S3: Open edX is compatible out-of-the-box with the S3 API for storing user-generated files. The problem with S3 is that it introduces a dependency on AWS. To solve this problem, Tutor comes with a plugin that emulates the S3 API but stores files on premises. This is achieved thanks to `MinIO `_. If you want to deploy a production platform to Kubernetes, you will most certainly need to enable the ``minio`` plugin:: + kubectl --namespace openedx get services/caddy + +Use this external IP to configure your DNS records. Once the DNS records are configured, you should verify that the Caddy container has properly generated the SSL/TLS certificates by checking the container logs:: + + tutor k8s logs -f caddy + +If for some reason, you would like to deploy your own load balancer, you should set ``ENABLE_WEB_PROXY=false`` just like in the :ref:`local installation `. Then, point your load balancer at the "caddy" service, which will be a `ClusterIP `__. + +S3-like object storage with `MinIO `__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like many web applications, Open edX needs to persist data. In particular, it needs to persist files uploaded by students and course designers. In the local installation, these files are persisted to disk, on the host filesystem. But on Kubernetes, it is difficult to share a single filesystem between different pods. This would require persistent volume claims with `ReadWriteMany` access mode, and these are difficult to set up. + +Luckily, there is another solution: at `edx.org `_, uploaded files are persisted on AWS S3: Open edX is compatible out-of-the-box with the S3 API for storing user-generated files. The problem with S3 is that it introduces a dependency on AWS. To solve this problem, Tutor comes with a plugin that emulates the S3 API but stores files on-premises. This is achieved thanks to `MinIO `__. If you want to deploy a production platform to Kubernetes, you will most certainly need to enable the ``minio`` plugin:: tutor plugins enable minio @@ -46,18 +58,22 @@ The "minio.LMS_HOST" domain name will have to point to your Kubernetes cluster. Kubernetes dashboard ~~~~~~~~~~~~~~~~~~~~ -This is not a requirement per se, but it's very convenient to have a visual interface of the Kubernetes cluster. We suggest the official `Kubernetes dashboard `_. Depending on your Kubernetes provider, you may need to install a dashboard yourself. There are generic instructions on the `project's README `_. AWS provides `specific instructions `_. +This is not a requirement per se, but it's very convenient to have a visual interface of the Kubernetes cluster. We suggest the official `Kubernetes dashboard `__. Depending on your Kubernetes provider, you may need to install a dashboard yourself. There are general instructions on the `project's README `__. AWS provides `specific instructions `__. On Minikube, the dashboard is already installed. To access the dashboard, run:: minikube dashboard +Lastly, Tutor itself provides a rudimentary listing of your cluster's nodes, workloads, and services:: + + tutor k8s status + Technical details ----------------- -Under the hood, Tutor wraps ``kubectl`` commands to interact with the cluster. The various commands called by Tutor are printed in the console, so that you can reproduce and modify them yourself. +Under the hood, Tutor wraps ``kubectl`` commands to interact with the cluster. The various commands called by Tutor are printed in the console so that you can reproduce and modify them yourself. -Basically, the whole platform is described in manifest files stored in ``$(tutor config printroot)/env/k8s``. There is also a ``kustomization.yml`` file at the project root for `declarative application management `_. This allows us to start and update resources with commands similar to ``kubectl apply -k $(tutor config printroot) --selector=...`` (see the ``kubectl apply`` `official documentation `_). +Basically, the whole platform is described in manifest files stored in ``$(tutor config printroot)/env/k8s``. There is also a ``kustomization.yml`` file at the project root for `declarative application management `__. This allows us to start and update resources with commands similar to ``kubectl apply -k $(tutor config printroot) --selector=...`` (see the ``kubectl apply`` `official documentation `__). The other benefit of ``kubectl apply`` is that it allows you to customise the Kubernetes resources as much as you want. For instance, the default Tutor configuration can be extended by a ``kustomization.yml`` file stored in ``$(tutor config printroot)/env-custom/`` and which would start with:: @@ -67,21 +83,21 @@ The other benefit of ``kubectl apply`` is that it allows you to customise the Ku - ../env/ ... -To learn more about "kustomizations", refer to the `official documentation `__. +To learn more about "kustomizations", refer to the `official documentation `__. Quickstart ---------- Launch the platform on Kubernetes in one command:: - tutor k8s quickstart + tutor k8s launch -All Kubernetes resources are associated to the "openedx" namespace. If you don't see anything in the Kubernetes dashboard, you are probably looking at the wrong namespace... πŸ˜‰ +All Kubernetes resources are associated with the "openedx" namespace. If you don't see anything in the Kubernetes dashboard, you are probably looking at the wrong namespace... πŸ˜‰ .. image:: img/k8s-dashboard.png :alt: Kubernetes dashboard ("openedx" namespace) -The same ``tutor k8s quickstart`` command can be used to upgrade the cluster to the latest version. +The same ``tutor k8s launch`` command can be used to upgrade the cluster to the latest version. Other commands -------------- @@ -90,15 +106,40 @@ As with the :ref:`local installation `, there are multiple commands to ru tutor k8s -h -In particular, the `tutor k8s start` command restarts and reconfigures all services by running ``kubectl apply``. That means that you can delete containers, deployments or just any other kind of resources, and Tutor will re-create them automatically. You should just beware of not deleting any persistent data stored in persistent volume claims. For instance, to restart from a "blank slate", run:: +In particular, the ``tutor k8s start`` command restarts and reconfigures all services by running ``kubectl apply``. That means that you can delete containers, deployments, or just any other kind of resources, and Tutor will re-create them automatically. You should just beware of not deleting any persistent data stored in persistent volume claims. For instance, to restart from a "blank slate", run:: tutor k8s stop tutor k8s start All non-persisting data will be deleted, and then re-created. -Guides ------- +Common tasks +------------ + +Using `tutor k8s apply` with Dry Run +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``tutor k8s apply`` command acts as a wrapper around ``kubectl apply``, facilitating the application of Kubernetes resource configurations stored in ``$(tutor config printroot)/env/k8s``. To apply the configurations, execute the following command:: + + tutor k8s apply + +Before making any changes, it is possible to validate the configurations without modifications by using the ``dry-run`` option along with server-side validation. The command is as follows:: + + tutor k8s apply --dry-run=server --validate=true + +- ``--dry-run=server``: This option simulates the application of configurations on the server-side, providing insights into how Kubernetes would interpret the changes. +- ``--validate=true``: This option validates the manifests against Kubernetes API standards, which aids in catching potential errors prior to application. + +Utilizing the dry-run feature ensures a more robust deployment process for the Open edX platform on Kubernetes. + +Executing commands inside service pods +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The Tutor and plugin documentation usually often instructions to execute some ``tutor local run ...`` commands. These commands are only valid when running Tutor locally with docker compose, and will not work on Kubernetes. Instead, you should run ``tutor k8s exec ...`` commands. Arguments and options should be identical. + +For instance, to run a Python shell in the lms container, run:: + + tutor k8s exec lms ./manage.py lms shell Running a custom "openedx" Docker image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -112,8 +153,33 @@ Some Tutor plugins and customization procedures require that the "openedx" image Updating docker images ~~~~~~~~~~~~~~~~~~~~~~ -Kubernetes does not provide a single command for updating docker images out of the box. A `commonly used trick `_ is to modify an innocuous label on all resources:: +Kubernetes does not provide a single command for updating docker images out of the box. A `commonly used trick `__ is to modify an innocuous label on all resources:: kubectl patch -k "$(tutor config printroot)/env" --patch "{\"spec\": {\"template\": {\"metadata\": {\"labels\": {\"date\": \"`date +'%Y%m%d-%H%M%S'`\"}}}}}" +.. _customizing_kubernetes_sources: + +Customizing Kubernetes resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Plugins can customize any Kubernetes resource in Tutor by overriding the definition of the resource with a :patch:`k8s-override` patch. For example, to change the volume size for MongoDB from ``5Gi`` to ``10Gi``, add the following to the plugin: + +:: + + # myplugin/tutormyplugin/patches/k8s-override + + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: mongodb + labels: + app.kubernetes.io/component: volume + app.kubernetes.io/name: mongodb + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + diff --git a/docs/local.rst b/docs/local.rst index eccc391909..7f3ebcb307 100644 --- a/docs/local.rst +++ b/docs/local.rst @@ -5,6 +5,11 @@ Local deployment This method is for deploying Open edX locally on a single server, where docker images are orchestrated with `docker-compose `_. +.. note:: + As of v16.0.0, Tutor now uses the ``docker compose`` subcommand instead of the separate ``docker-compose`` command. + +.. _tutor_root: + In the following, environment and data files will be generated in a user-specific project folder which will be referred to as the "**project root**". On Linux, the default project root is ``~/.local/share/tutor``. An alternative project root can be defined by passing the ``--root=...`` option to the ``tutor`` command, or defining the ``TUTOR_ROOT=...`` environment variable:: tutor --root=/path/to/tutorroot run ... @@ -12,24 +17,21 @@ In the following, environment and data files will be generated in a user-specifi export TUTOR_ROOT=/path/to/tutorroot tutor run ... -.. note:: - As of v10.0.0, a locally-running Open edX platform can no longer be accessed from http://localhost or http://studio.localhost. Instead, when running ``tutor local quickstart``, you must now decide whether you are running a platform that will be used in production. If not, the platform will be automatically be bound to http://local.overhang.io and http://studio.local.overhang.io, which are domain names that point to 127.0.0.1 (localhost). This change was made to facilitate internal communication between Docker containers. - Main commands ------------- All available commands can be listed by running:: - tutor local help + tutor local --help All-in-one command ~~~~~~~~~~~~~~~~~~ A fully-functional platform can be configured and run in one command:: - tutor local quickstart + tutor local launch -But you may want to run commands one at a time: it's faster when you need to run only part of the local deployment process, and it helps you understand how your platform works. In the following we decompose the ``quickstart`` command. +But you may want to run commands one at a time: it's faster when you need to run only part of the local deployment process, and it helps you understand how your platform works. In the following, we decompose the ``launch`` command. Configuration ~~~~~~~~~~~~~ @@ -38,7 +40,7 @@ Configuration tutor config save --interactive -This is the only non-automatic step in the installation process. You will be asked various questions about your Open edX platform and appropriate configuration files will be generated. If you would like to automate this step then you should run ``tutor config save --interactive`` once. After that, there will be a ``config.yml`` file at the root of the project folder: this file contains all the configuration values for your platform, such as randomly generated passwords, domain names, etc. +This is the only non-automatic step in the installation process. You will be asked various questions about your Open edX platform and appropriate configuration files will be generated. If you would like to automate this step then you should run ``tutor config save --interactive`` once. This will generate a ``config.yml`` file in the **project root**. This file contains all the configuration values for your platform, such as randomly generated passwords, domain names, etc. The location of the **project root** can be found by running ``tutor config printroot``. See :ref:`section above `. If you want to run a fully automated installation, upload the ``config.yml`` file to wherever you want to run Open edX. You can then entirely skip the configuration step. @@ -75,7 +77,7 @@ Service initialisation :: - tutor local init + tutor local do init This command should be run just once. It will initialise all applications in a running platform. In particular, this will create the required databases tables and apply database migrations for all applications. @@ -96,9 +98,17 @@ Finally, tracking logs that store `user events `_, run:: +After a fresh installation, your platform will not have a single course. To import the `Open edX demo course `_, run:: - tutor local importdemocourse + tutor local do importdemocourse .. _settheme: @@ -127,9 +137,7 @@ Setting a new theme The default Open edX theme is rather bland, so Tutor makes it easy to switch to a different theme:: - tutor local settheme mytheme $(tutor config printvalue LMS_HOST) $(tutor config printvalue CMS_HOST) - -Notice that we pass the hostnames of the LMS and the CMS to the ``settheme`` command: this is because in Open edX, themes are assigned per domain name. + tutor local do settheme mytheme Out of the box, only the default "open-edx" theme is available. We also developed `Indigo, a beautiful, customizable theme `__ which is easy to install with Tutor. @@ -152,138 +160,14 @@ After modifying Open edX settings, for instance when running ``tutor config save tutor local exec lms reload-uwsgi -.. _portainer: - -Docker container web UI with `Portainer `__ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Portainer is a web UI for managing docker containers. It lets you view your entire Open edX platform at a glace. Try it! It's really cool:: - - docker run --rm \ - --volume=/var/run/docker.sock:/var/run/docker.sock \ - --volume=/tmp/portainer:/data \ - -p 9000:9000 \ - portainer/portainer:latest --bind=:9000 - -.. .. image:: https://portainer.io/images/screenshots/portainer.gif - ..:alt: Portainer demo - -You can then view the portainer UI at `http://localhost:9000 `_. You will be asked to define a password for the admin user. Then, select a "Local environment" to work on; hit "Connect" and select the "local" group to view all running containers. - -Among many other things, you'll be able to view the logs for each container, which is really useful. - -Guides ------- - -.. _web_proxy: - -Running Open edX behind a web proxy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The containerized web server ([Caddy](caddyserver.com/)) needs to listen to ports 80 and 443 on the host. If there is already a webserver running on the host, such as Apache or Nginx, the caddy container will not be able to start. Tutor supports running behind a web proxy. To do so, add the following configuration:: - - tutor config save --set RUN_CADDY=false --set NGINX_HTTP_PORT=81 - -In this example, the nginx container port would be mapped to 81 instead of 80. You must then configure the web proxy on the host. As of v11.0.0, configuration files are no longer provided for automatic configuration of your web proxy. Basically, you should setup a reverse proxy to `localhost:NGINX_HTTP_PORT` from the following hosts: LMS_HOST, preview.LMS_HOST, CMS_HOST, as well as any additional host exposed by your plugins. - -.. warning:: - In this setup, the Nginx HTTP port will be exposed to the world. Make sure to configure your server firewall to block unwanted connections to your server's `NGINX_HTTP_PORT`. Alternatively, you can configure the Nginx container to accept only local connections:: - - tutor config save --set NGINX_HTTP_PORT=127.0.0.1:81 - -Running multiple Open edX platforms on a single server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With Tutor, it is easy to run multiple Open edX instances on a single server. To do so, the following configuration parameters must be different for all platforms: - -- ``TUTOR_ROOT``: so that configuration, environment and data are not mixed up between platforms. -- ``LOCAL_PROJECT_NAME``: the various docker-compose projects cannot share the same name. -- ``NGINX_HTTP_PORT``: ports cannot be shared by two different containers. -- ``LMS_HOST``, ``CMS_HOST``: the different platforms must be accessible from different domain (or subdomain) names. - -In addition, a web proxy must be setup on the host, as described :ref:`above `. - -As an example, here is how to launch two different platforms, with nginx running as a web proxy:: - - # platform 1 - export TUTOR_ROOT=~/openedx/site1 - tutor config save --interactive --set RUN_CADDY=false --set LOCAL_PROJECT_NAME=tutor_site1 --set NGINX_HTTP_PORT=81 - tutor local quickstart - sudo ln -s "$(tutor config printroot)/env/local/proxy/nginx/openedx.conf" /etc/nginx/sites-enabled/site1.conf - - # platform 2 - export TUTOR_ROOT=~/openedx/site2 - tutor config save --interactive --set RUN_CADDY=false --set LOCAL_PROJECT_NAME=tutor_site2 --set NGINX_HTTP_PORT=82 - tutor local quickstart - sudo ln -s "$(tutor config printroot)/env/local/proxy/nginx/openedx.conf" /etc/nginx/sites-enabled/site2.conf - -You should then have two different platforms, completely isolated from one another, running on the same server. - -Loading different production settings for ``edx-platform`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The default settings module loaded by ``edx-platform`` is ``tutor.production``. The folders ``$(tutor config printroot)/env/apps/openedx/settings/lms`` and ``$(tutor config printroot)/env/apps/openedx/settings/cms`` are mounted as ``edx-platform/lms/envs/tutor`` and ``edx-platform/cms/envs/tutor`` inside the docker containers. Thus, to use your own settings, you must do two things: - -1. Copy your settings files for the lms and the cms to ``$(tutor config printroot)/env/apps/openedx/settings/lms/mysettings.py`` and ``$(tutor config printroot)/env/apps/openedx/settings/cms/mysettings.py``. -2. Load your settings by adding ``EDX_PLATFORM_SETTINGS=tutor.mysettings`` to ``$(tutor config printroot)/env/local/.env``. - -Of course, your settings should be compatible with the docker installation. You can get some inspiration from the ``production.py`` settings modules generated by Tutor, or just import it as a base by adding ``from .production import *`` at the top of ``mysettings.py``. - -Upgrading from earlier versions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upgrading from v3+ -****************** - -Just upgrade Tutor using your :ref:`favorite installation method ` and run quickstart again:: - - tutor local quickstart - -Upgrading from v1 or v2 -*********************** - -Versions 1 and 2 of Tutor were organized differently: they relied on many different ``Makefile`` and ``make`` commands instead of a single ``tutor`` executable. To migrate from an earlier version, you should first stop your platform:: - - make stop - -Then, install Tutor using one of the :ref:`installation methods `. Then, create the Tutor project root and move your data:: - - mkdir -p "$(tutor config printroot)" - mv config.json data/ "$(tutor config printroot)" - -Finally, launch your platform with:: - - tutor local quickstart - -Backups/Migrating to a different server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -With Tutor, all data are stored in a single folder. This means that it's extremely easy to migrate an existing platform to a different server. For instance, it's possible to configure a platform locally on a laptop, and then move this platform to a production server. - -1. Make sure `tutor` is installed on both servers with the same version. -2. Stop any running platform on server 1:: - - tutor local stop - -3. Transfer the configuration, environment and platform data from server 1 to server 2:: - - rsync -avr "$(tutor config printroot)/" username@server2:/tmp/tutor/ - -4. On server 2, move the data to the right location:: - - mv /tmp/tutor "$(tutor config printroot)" - -5. Start the instance with:: - tutor local start -d +Customizing the deployed services +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Making database dumps -~~~~~~~~~~~~~~~~~~~~~ +You might want to customise the docker-compose services listed in ``$(tutor config printroot)/env/local/docker-compose.yml``. To do so, you should create a ``docker-compose.override.yml`` file in that same folder:: -To dump all data from the MySQL and Mongodb databases used on the platform, run the following commands:: + vim $(tutor config printroot)/env/local/docker-compose.override.yml - tutor local exec -e MYSQL_ROOT_PASSWORD="$(tutor config printvalue MYSQL_ROOT_PASSWORD)" mysql \ - sh -c 'mysqldump --all-databases --password=$MYSQL_ROOT_PASSWORD > /var/lib/mysql/dump.sql' - tutor local exec mongodb mongodump --out=/data/db/dump.mongodb +The values in this file will override the values from ``docker-compose.yml`` and ``docker-compose.prod.yml``, as explained in the `docker-compose documentation `__. -The ``dump.sql`` and ``dump.mongodb`` files will be located in ``$(tutor config printroot)/data/mysql`` and ``$(tutor config printroot)/data/mongodb``. +Similarly, the job service configuration can be overridden by creating a ``docker-compose.jobs.override.yml`` file in that same folder. diff --git a/docs/plugins.rst b/docs/plugins.rst deleted file mode 100644 index c6c55e1d68..0000000000 --- a/docs/plugins.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _plugins: - -Plugins -======= - -Tutor comes with a plugin system that allows anyone to customise the deployment of an Open edX platform very easily. The vision behind this plugin system is that users should not have to fork the Tutor repository to customise their deployments. For instance, if you have created a new application that integrates with Open edX, you should not have to describe how to manually patch the platform settings, ``urls.py`` or ``*.env.json`` files. Instead, you can create a "tutor-myapp" plugin for Tutor. Then, users will start using your application in three simple steps:: - - # 1) Install the plugin - pip install tutor-myapp - # 2) Enable the plugin - tutor plugins enable myapp - # 3) Reconfigure and restart the platform - tutor local quickstart - -For simple changes, it may be extremely easy to create a Tutor plugin: even non-technical users may get started with :ref:`simple yaml plugins `. - -In the following we learn how to use and create Tutor plugins. - -Commands --------- - -List installed plugins:: - - tutor plugins list - -Enable/disable a plugin:: - - tutor plugins enable myplugin - tutor plugins disable myplugin - -After enabling or disabling a plugin, the environment should be re-generated with:: - - tutor config save - -.. _existing_plugins: - -Existing plugins ----------------- - -- `Course discovery `__: Deploy an API for interacting with your course catalog -- `Ecommerce `__: Sell courses and products on your Open edX platform -- `Figures `__: Visualize daily stats about course engagement -- `MinIO `__: S3 emulator for object storage and scalable Open edX deployment. -- `Notes `__: Allows students to annotate portions of the courseware. -- `Xqueue `__: for external grading - -Plugin development ------------------- - -.. toctree:: - :maxdepth: 2 - - plugins/api - plugins/gettingstarted - plugins/examples \ No newline at end of file diff --git a/docs/plugins/examples.rst b/docs/plugins/examples.rst index beab509450..2b1d763d7f 100644 --- a/docs/plugins/examples.rst +++ b/docs/plugins/examples.rst @@ -1,62 +1,92 @@ .. _plugins_examples: -Examples of Tutor plugins -========================= +======== +Examples +======== The following are simple examples of :ref:`Tutor plugins ` that can be used to modify the behaviour of Open edX. Skip email validation for new users ------------------------------------ +=================================== :: - name: skipemailvalidation - version: 0.1.0 - patches: - common-env-features: | - "SKIP_EMAIL_VALIDATION": true + from tutor import hooks + + hooks.Filters.ENV_PATCHES.add_item( + ( + "common-env-features", + """ + "SKIP_EMAIL_VALIDATION": true + """ + ) + ) Enable bulk enrollment view in the LMS --------------------------------------- +====================================== :: - name: enablebulkenrollmentview - version: 0.1.0 - patches: - lms-env-features: | - "ENABLE_BULK_ENROLLMENT_VIEW": true + from tutor import hooks + + hooks.Filters.ENV_PATCHES.add_item( + ( + "lms-env-features", + """ + "ENABLE_BULK_ENROLLMENT_VIEW": true + """ + ) + ) Enable Google Analytics ------------------------ +======================= :: - name: googleanalytics - version: 0.1.0 - patches: - openedx-common-settings: | - # googleanalytics special settings - GOOGLE_ANALYTICS_ACCOUNT = "UA-your-account" - GOOGLE_ANALYTICS_TRACKING_ID = "UA-your-tracking-id" + from tutor import hooks -Enable SAML authentication --------------------------- + hooks.Filters.ENV_PATCHES.add_items([ + ( + "openedx-common-settings", + "GOOGLE_ANALYTICS_4_ID = 'MY-MEASUREMENT-ID'" + ), + ( + "mfe-lms-common-settings", + "MFE_CONFIG['GOOGLE_ANALYTICS_4_ID'] = 'MY-MEASUREMENT-ID'" + ), + ]) -:: +.. note:: + Please be aware that as of May 2023 Google Analytics support has been upgraded from Google Universal Analytics to Google Analytics 4 and you may need to update your configuration as mentioned in the `Open edX docs `__. - name: saml - version: 0.1.0 - patches: - common-env-features: | - "ENABLE_THIRD_PARTY_AUTH" : true - openedx-lms-common-settings: | - # saml special settings - THIRD_PARTY_AUTH_BACKENDS = "third_party_auth.saml.SAMLAuthBackend" - openedx-auth: | - "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "yoursecretkey", - "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "yourpubliccert" +Enable SAML authentication +========================== + +:: + + from tutor import hooks + + hooks.Filters.ENV_PATCHES.add_items([ + ( + "common-env-features", + '"ENABLE_THIRD_PARTY_AUTH": true', + ), + ( + "openedx-lms-common-settings", + """ + # saml special settings + AUTHENTICATION_BACKENDS += ["common.djangoapps.third_party_auth.saml.SAMLAuthBackend", "django.contrib.auth.backends.ModelBackend"] + """ + ), + ( + "openedx-auth", + """ + "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY": "yoursecretkey", + "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT": "yourpubliccert" + """ + ), + ]) Do not forget to replace "yoursecretkey" and "yourpubliccert" with your own values. diff --git a/docs/plugins/index.rst b/docs/plugins/index.rst new file mode 100644 index 0000000000..399f9568bc --- /dev/null +++ b/docs/plugins/index.rst @@ -0,0 +1,11 @@ + +======= +Plugins +======= + +.. toctree:: + :maxdepth: 2 + + intro + examples + v0/index diff --git a/docs/plugins/intro.rst b/docs/plugins/intro.rst new file mode 100644 index 0000000000..890125ca6b --- /dev/null +++ b/docs/plugins/intro.rst @@ -0,0 +1,63 @@ +.. _plugins: + +============ +Introduction +============ + +Tutor comes with a plugin system that allows anyone to customise the deployment of an Open edX platform very easily. The vision behind this plugin system is that users should not have to fork the Tutor repository to customise their deployments. For instance, if you have created a new application that integrates with Open edX, you should not have to describe how to manually patch the platform settings, ``urls.py`` or ``*.env.yml`` files. Instead, you can create a "tutor-myapp" plugin for Tutor. This plugin will be in charge of making changes to the platform settings. Then, users will be able to use your application in three simple steps:: + + # 1) Install the plugin + pip install tutor-myapp + # 2) Enable the plugin + tutor plugins enable myapp + # 3) Reconfigure and restart the platform + tutor local launch + +For simple changes, it may be extremely easy to create a Tutor plugin: even non-technical users may get started with our :ref:`plugin_development_tutorial` tutorial. We also provide a list of :ref:`simple example plugins `. + +To learn about the different ways in which plugins can extend Tutor, check out the :ref:`hooks catalog `. + +Plugin commands cheatsheet +========================== + +List installed plugins:: + + tutor plugins list + +Enable/disable a plugin:: + + tutor plugins enable myplugin + tutor plugins disable myplugin + +The full plugins CLI is described in the :ref:`reference documentation `. + +.. _existing_plugins: + +Existing plugins +================ + +Many plugins are available from plugin indexes. These indexes are lists of plugins, similar to the `pypi `__ or `npm `__ indexes. By default, Tutor comes with the "main" plugin index. You can check available plugins from this index by running:: + + tutor plugins update + tutor plugins search + +More plugins can be downloaded from the "contrib" index:: + + tutor plugins index add contrib + tutor plugins search + +The "main" and "contrib" indexes include a curated list of plugins that are well maintained and introduce useful features to Open edX. These indexes are maintained by `Edly `__. For more information about these indexes, refer to the official `overhangio/tpi `__ repository. + +Thanks to these indexes, it is very easy to download and upgrade plugins. For instance, to install the `notes plugin `__:: + + tutor plugins install notes + +Upgrade all your plugins with:: + + tutor plugins upgrade all + +To list indexes that you are downloading plugins from, run:: + + tutor plugins index list + +For more information about these indexes, check the `official Tutor plugin indexes (TPI) `__ repository. diff --git a/docs/plugins/api.rst b/docs/plugins/v0/api.rst similarity index 60% rename from docs/plugins/api.rst rename to docs/plugins/v0/api.rst index 9ffd3e533e..37c01eea8d 100644 --- a/docs/plugins/api.rst +++ b/docs/plugins/v0/api.rst @@ -1,20 +1,28 @@ Plugin API ========== -Plugins can affect the behaviour of Tutor at multiple levels. First, plugins can define new services with their Docker images, settings and the right initialisation commands. To do so you will have to define custom :ref:`config `, :ref:`patches `, :ref:`hooks ` and :ref:`templates `. Then, plugins can also extend the CLI by defining their own :ref:`commands `. +.. include:: legacy.rst -.. _plugin_config: +Plugins can affect the behaviour of Tutor at multiple levels. They can: + +* Add new settings or modify existing ones in the Tutor configuration (see :ref:`config `). +* Add new templates to the Tutor project environment or modify existing ones (see :ref:`patches `, :ref:`templates ` and :ref:`hooks `). +* Add custom commands to the Tutor CLI (see :ref:`command `). + +There exist two different APIs to create Tutor plugins: either with YAML files or Python packages. YAML files are more simple to create but are limited to just configuration and template patches. + +.. _v0_plugin_config: config ~~~~~~ The ``config`` attribute is used to modify existing and add new configuration parameters: -* ``config["add"]`` are key/values that should be added to the user-specific ``config.yml`` configuration. Add there passwords, secret keys and other values that do not have a default value. -* ``config["defaults"]`` are default key/values for this plugin. These values will not be added to the ``config.yml`` user file unless users override them manually with ``tutor config save --set ...``. -* ``config["set"]`` are existing key/values that should be modified. Be very careful what you add there! Plugins may define conflicting values for some parameters. +* ``config["add"]`` are key/values that should be added to the user-specific ``config.yml`` configuration. Add there the passwords, secret keys, and other values that do not have a reasonable default value for all users. +* ``config["defaults"]`` are default key/values for this plugin. These values can be accessed even though they are not added to the ``config.yml`` user file. Users can override them manually with ``tutor config save --set ...``. +* ``config["set"]`` are existing key/values that should be modified. Be very careful what you add there! Different plugins may define conflicting values for some parameters. - "set" and "default" key names will be automatically prefixed with the plugin name, in upper case. + "add" and "defaults" key names will be automatically prefixed with the plugin name, in upper case. Example:: @@ -36,22 +44,15 @@ This configuration from the "myplugin" plugin will set the following values: - ``MYPLUGIN_DOCKER_IMAGE``: this value will by default not be stored in ``config.yml``, but ``tutor config printvalue MYPLUGIN_DOCKER_IMAGE`` will print ``username/imagename:latest``. - ``MASTER_PASSWORD`` will be set to ``h4cked``. Needless to say, plugin developers should avoid doing this. -.. _plugin_patches: +.. _v0_plugin_patches: patches ~~~~~~~ -Plugin patches affect the rendered environment templates. In many places the Tutor templates include calls to ``{{ patch("patchname") }}``. This grants plugin developers the possibility to modify the content of rendered templates. Plugins can add content in these places by adding values to the ``patches`` attribute. +Plugin patches affect the rendered environment templates. In many places the Tutor templates include calls to ``{{ patch("patchname") }}``. This grants plugin developers the possibility to modify the content of rendered templates. Plugins can add content in these places by adding values to the ``patches`` attribute. See :ref:`patches` for the complete list available patches. -.. note:: - The list of existing patches can be found by searching for `{{ patch(` strings in the Tutor source code:: - - git grep "{{ patch" - - The list of patches can also be browsed online `on Github `__. - Example:: - + patches = { "local-docker-compose-services": """redis: image: redis:latest""" @@ -60,10 +61,11 @@ Example:: This will add a Redis instance to the services run with ``tutor local`` commands. .. note:: - The ``patches`` attribute can be a callable function instead of a static dict value. + In Python plugins, remember that ``patches`` can be a callable function instead of a static dict value. + One can use this to dynamically load a list of patch files from a folder. -.. _plugin_hooks: +.. _v0_plugin_hooks: hooks ~~~~~ @@ -76,16 +78,16 @@ Hooks are actions that are run during the lifetime of the platform. For instance The services that will be run during initialisation should be added to the ``init`` hook, for instance for database creation and migrations. Example:: - + hooks = { "init": ["myservice1", "myservice2"] } - + During initialisation, "myservice1" and "myservice2" will be run in sequence with the commands defined in the templates ``myplugin/hooks/myservice1/init`` and ``myplugin/hooks/myservice2/init``. -To initialise a "foo" service, Tutor runs the "foo-job" service that is found in the ``env/local/docker-compose.jobs.yml`` file. By default, Tutor comes with a few services in this file: mysql-job, lms-job, cms-job, forum-job. If your plugin requires running custom services during initialisation, you will need to add them to the ``docker-compose.jobs.yml`` template. To do so, just use the "local-docker-compose-jobs-services" patch. +To initialise a "foo" service, Tutor runs the "foo-job" service that is found in the ``env/local/docker-compose.jobs.yml`` file. By default, Tutor comes with a few services in this file: mysql-job, lms-job, cms-job. If your plugin requires running custom services during initialisation, you will need to add them to the ``docker-compose.jobs.yml`` template. To do so, just use the "local-docker-compose-jobs-services" patch. -In Kubernetes, the approach is the same, except that jobs are implemented as actual job objects in the ``k8s/jobs.yml`` template. To add your own services there, your plugin should implement the "k8s-jobs" patch. +In Kubernetes, the approach is the same, except that jobs are implemented as actual job objects in the ``k8s/jobs.yml`` template. To add your services there, your plugin should implement the "k8s-jobs" patch. ``pre-init`` ++++++++++++ @@ -102,13 +104,13 @@ Example:: hooks = { "build-image": {"myimage": "myimage:latest"} } - + With this hook, users will be able to build the ``myimage:latest`` docker image by running:: - + tutor images build myimage or:: - + tutor images build all This assumes that there is a ``Dockerfile`` file in the ``myplugin/build/myimage`` subfolder of the plugin templates directory. @@ -119,43 +121,47 @@ This assumes that there is a ``Dockerfile`` file in the ``myplugin/build/myimage This hook allows pulling/pushing images from/to a docker registry. Example:: - + hooks = { "remote-image": {"myimage": "myimage:latest"}, } With this hook, users will be able to pull and push the ``myimage:latest`` docker image by running:: - + tutor images pull myimage tutor images push myimage or:: - + tutor images pull all tutor images push all -.. _plugin_templates: +.. _v0_plugin_templates: templates ~~~~~~~~~ -In order to define plugin-specific hooks, a plugin should also have a template directory that includes the plugin hooks. The ``templates`` attribute should point to that directory. +To define plugin-specific hooks, a plugin should also have a template directory that includes the plugin hooks. The ``templates`` attribute should point to that directory. Example:: - + import os templates = os.path.join(os.path.abspath(os.path.dirname(__file__)), "templates") With the above declaration, you can store plugin-specific templates in the ``templates/myplugin`` folder next to the ``plugin.py`` file. -In Tutor, templates are `Jinja2 `__-formatted files that will be rendered in the Tutor environment (the ``$(tutor config printroot)/env`` folder) when running ``tutor config save``. The environment files are overwritten every time the environment is saved. Plugin developers can create templates that make use of the built-in `Jinja2 API `__. In addition, a couple additional filters are added by Tutor: - +In Tutor, templates are `Jinja2 `__-formatted files that will be rendered in the Tutor environment (the ``$(tutor config printroot)/env`` folder) when running ``tutor config save``. The environment files are overwritten every time the environment is saved. Plugin developers can create templates that make use of the built-in `Jinja2 API `__. In addition, a couple of additional filters are added by Tutor: + * ``common_domain``: Return the longest common name between two domain names. Example: ``{{ "studio.demo.myopenedx.com"|common_domain("lms.demo.myopenedx.com") }}`` is equal to "demo.myopenedx.com". * ``encrypt``: Encrypt an arbitrary string. The encryption process is compatible with `htpasswd `__ verification. * ``list_if``: In a list of ``(value, condition)`` tuples, return the list of ``value`` for which the ``condition`` is true. -* ``patch``: See :ref:`patches `. +* ``long_to_base64``: Base-64 encode a long integer. +* ``iter_values_named``: Yield the values of the configuration settings that match a certain pattern. Example: ``{% for value in iter_values_named(prefix="KEY", suffix="SUFFIX")%}...{% endfor %}``. By default, only non-empty values are yielded. To iterate also on empty values, pass the ``allow_empty=True`` argument. +* ``patch``: See :ref:`patches `. * ``random_string``: Return a random string of the given length composed of ASCII letters and digits. Example: ``{{ 8|random_string }}``. * ``reverse_host``: Reverse a domain name (see `reference `__). Example: ``{{ "demo.myopenedx.com"|reverse_host }}`` is equal to "com.myopenedx.demo". +* ``rsa_import_key``: Import a PEM-formatted RSA key and return the corresponding object. +* ``rsa_private_key``: Export an RSA private key in PEM format. * ``walk_templates``: Iterate recursively over the templates of the given folder. For instance:: {% for file in "apps/myplugin"|walk_templates %} @@ -167,41 +173,61 @@ When saving the environment, template files that are stored in a template root w * Binary files with the following extensions: .ico, .jpg, .png, .ttf * Files that are stored in a folder named "partials", or one of its subfolders. -.. _plugin_command: +.. _v0_plugin_command: command ~~~~~~~ -A plugin can provide custom command line commands. Commands are assumed to be `click.Command `__ objects. +Python plugins can provide a custom command line interface. +The ``command`` attribute is assumed to be a `click.Command `__ object, +and you typically implement them using the `click.command `__ decorator. + +You may also use the `click.pass_obj `__ decorator to pass the CLI `context `__, such as when you want to access Tutor configuration settings from your command. Example:: - + import click - + from tutor import config as tutor_config + @click.command(help="I'm a plugin command") - def command(): + @click.pass_obj + def command(context): + config = tutor_config.load(context.root) + lms_host = config["LMS_HOST"] click.echo("Hello from myplugin!") + click.echo(f"My LMS host is {lms_host}") Any user who installs the ``myplugin`` plugin can then run:: - + $ tutor myplugin Hello from myplugin! + My LMS host is learn.myserver.com + +You can even define subcommands by creating `command groups `__:: -You can even define subcommands by creating `command groups `__:: - import click - + @click.group(help="I'm a plugin command group") def command(): pass - - @click.command(help="I'm a plugin subcommand") + + @command.command(help="I'm a plugin subcommand") def dosomething(): click.echo("This subcommand is awesome") -This would allow any user to run:: +This would allow any user to see your sub-commands:: + + $ tutor myplugin + Usage: tutor myplugin [OPTIONS] COMMAND [ARGS]... + + I'm a plugin command group + + Commands: + dosomething I'm a plugin subcommand + +and then run them:: $ tutor myplugin dosomething This subcommand is awesome - -See the official `click documentation `__ for more information. + +See the official `click documentation `__ for more information. diff --git a/docs/plugins/gettingstarted.rst b/docs/plugins/v0/gettingstarted.rst similarity index 71% rename from docs/plugins/gettingstarted.rst rename to docs/plugins/v0/gettingstarted.rst index 6421a898bb..1f597e1946 100644 --- a/docs/plugins/gettingstarted.rst +++ b/docs/plugins/v0/gettingstarted.rst @@ -1,6 +1,8 @@ Getting started with plugin development ======================================= +.. include:: legacy.rst + Plugins can be created in two different ways: either as plain YAML files or installable Python packages. YAML files are great when you need to make minor changes to the default platform, such as modifying settings. For creating more complex applications, it is recommended to create python packages. .. _plugins_yaml: @@ -9,15 +11,15 @@ YAML file ~~~~~~~~~ YAML files that are stored in the tutor plugins root folder will be automatically considered as plugins. The location of the plugin root can be found by running:: - + tutor plugins printroot On Linux, this points to ``~/.local/share/tutor-plugins``. The location of the plugin root folder can be modified by setting the ``TUTOR_PLUGINS_ROOT`` environment variable. -YAML plugins need to define two extra keys: "name" and "version". Custom CLI commands are not supported by YAML plugins. +YAML plugins must define two special top-level keys: ``name`` and ``version``. Then, YAML plugins may use two more top-level keys to customise Tutor's behaviour: ``config`` and ``patches``. Custom CLI commands, templates, and hooks are not supported by YAML plugins. Let's create a simple plugin that adds your own `Google Analytics `__ tracking code to your Open edX platform. We need to add the ``GOOGLE_ANALYTICS_ACCOUNT`` and ``GOOGLE_ANALYTICS_TRACKING_ID`` settings to both the LMS and the CMS settings. To do so, we will only have to create the ``openedx-common-settings`` patch, which is shared by the development and the production settings both for the LMS and the CMS. First, create the plugin directory:: - + mkdir "$(tutor plugins printroot)" Then add the following content to the plugin file located at ``$(tutor plugins printroot)/myplugin.yml``:: @@ -31,44 +33,57 @@ Then add the following content to the plugin file located at ``$(tutor plugins p GOOGLE_ANALYTICS_TRACKING_ID = "UA-654321-1" Of course, you should replace your Google Analytics tracking code with your own. You can verify that your plugin is correctly installed, but not enabled yet:: - + $ tutor plugins list googleanalytics@0.1.0 (disabled) - + You can then enable your newly-created plugin:: - - tutor plugins enable googleanalytics -Update your environment to apply changes from your plugin:: - - tutor config save + tutor plugins enable googleanalytics You should be able to view your changes in every LMS and CMS settings file:: grep -r googleanalytics "$(tutor config printroot)/env/apps/openedx/settings/" Now just restart your platform to start sending tracking events to Google Analytics:: - - tutor local quickstart + + tutor local launch That's it! And it's very easy to share your plugins. Just upload them to your Github repo and share the url with other users. They will be able to install your plugin by running:: - + tutor plugins install https://raw.githubusercontent.com/username/yourrepo/master/googleanalytics.yml Python package ~~~~~~~~~~~~~~ -Creating a plugin as a Python package allows you to define more complex logic and to store your patches in a more structured way. Python Tutor plugins are regular Python packages that define a specific entrypoint: ``tutor.plugin.v0``. +Creating a plugin as a Python package allows you to define more complex logic and store your patches in a more structured way. Python Tutor plugins are regular Python packages that define an entry point within the ``tutor.plugin.v0`` group: Example:: - + from setuptools import setup setup( ... - entry_points={"tutor.plugin.v0": ["myplugin = myplugin.plugin"]}, + entry_points={ + "tutor.plugin.v0": ["myplugin = myplugin.plugin"] + }, ) -The ``myplugin.plugin`` python module should then declare the ``config``, ``hooks``, etc. attributes that will define its behaviour. +The ``myplugin/plugin.py`` Python module can then define the attributes ``config``, ``patches``, ``hooks``, and ``templates`` to specify the plugin's behaviour. The attributes may be defined either as dictionaries or as zero-argument callables returning dictionaries; in the latter case, the callable will be evaluated upon plugin load. Finally, the ``command`` attribute can be defined as an instance of ``click.Command`` to define the plugin's command line interface. + +Example:: + + import click + + templates = pkg_resources.resource_filename(...) + config = {...} + hooks = {...} + + def patches(): + ... + + @click.command(...) + def command(): + ... To get started on the right foot, it is strongly recommended to create your first plugin with the `tutor plugin cookiecutter `__:: diff --git a/docs/plugins/v0/index.rst b/docs/plugins/v0/index.rst new file mode 100644 index 0000000000..72bcff9bcf --- /dev/null +++ b/docs/plugins/v0/index.rst @@ -0,0 +1,11 @@ +============= +Legacy v0 API +============= + +.. include:: legacy.rst + +.. toctree:: + :maxdepth: 2 + + api + gettingstarted diff --git a/docs/plugins/v0/legacy.rst b/docs/plugins/v0/legacy.rst new file mode 100644 index 0000000000..c68fee46db --- /dev/null +++ b/docs/plugins/v0/legacy.rst @@ -0,0 +1 @@ +.. warning:: The v0 plugin API is no longer the recommended way of developing new plugins for Tutor, starting from Tutor v13.2.0. See our :ref:`plugin creation tutorial ` to learn more about the v1 plugin API. Existing v0 plugins will remain supported for some time but developers are encouraged to start migrating their plugins as soon as possible to make use of the new API. Please read the `upgrade instructions `__ to upgrade v0 plugins generated with the v0 plugin cookiecutter. diff --git a/docs/podman.rst b/docs/podman.rst deleted file mode 100644 index 353475f42a..0000000000 --- a/docs/podman.rst +++ /dev/null @@ -1,52 +0,0 @@ -Running Tutor with Podman -------------------------- - -You have the option of running Tutor with `Podman `__, instead of the native Docker tools. This has some practical advantages: it does not require a running Docker daemon, and it enables you to run and build Docker images without depending on any system component running ``root``. As such, it is particularly useful for building Tutor images from CI pipelines. - -The ``podman`` CLI aims to be fully compatible with the ``docker`` CLI, and ``podman-compose`` is meant to be a fully-compatible alias of ``docker-compose``. This means that you should be able to use together with Tutor, without making any changes to Tutor itself. - -.. warning:: - Since this was written, it was discovered that there are major compatibility issues between ``podman-compose`` and ``docker-compose``. Thus, podman cannot be considered a drop-in replacement of Docker in the context of Tutor -- at least for running Open edX locally. - -.. warning:: - You should not attempt to run Tutor with Podman on a system that already has native ``docker`` installed. If you want to switch to ``podman`` using the aliases described here, you should uninstall (or at least stop) the native Docker daemon first. - - -Enabling Podman -~~~~~~~~~~~~~~~ - -Podman is supported on a variety of development platforms, see the `installation instructions `_ for details. - -Once you have installed Podman and its dependencies on the platform of your choice, you'll need to make sure that its ``podman`` binary, usually installed as ``/usr/bin/podman``, is aliased to ``docker``, and is included as such in your system ``$PATH``. On some CentOS and Fedora releases you can install a package named ``podman-docker`` to do this for you, but on other platforms you'll need to take of this yourself. - -- If ``$HOME/bin`` is in your ``$PATH``, you can create a symbolic link there:: - - ln -s $(which podman) $HOME/bin/docker - -- If you want to instead make ``docker`` a system-wide alias for ``podman``, you can create your symlink in ``/usr/local/bin``, an action that normally requires ``root`` privileges:: - - sudo ln -s $(which podman) /usr/local/bin/docker - - -Enabling podman-compose -~~~~~~~~~~~~~~~~~~~~~~~ - -``podman-compose`` is available as a package from PyPI, and can thus be installed with ``pip``. See `its README `_ for installation instructions. Note that if you have installed Tutor in its own virtualenv, you'll need to run ``pip install podman-compose`` in that same virtualenv. - -Once installed, you'll again need to create a symbolic link that aliases ``docker-compose`` to ``podman-compose``. - -- If you run Tutor and ``podman-compose`` in a virtualenv, create the symlink in that virtualenv's ``bin`` directory: activate the virtualenv, then run:: - - ln -s $(which podman-compose) $(dirname $(which podman-compose))/docker-compose - -- If you do not, create the symlink in ``/usr/local/bin``, using ``root`` privileges:: - - sudo ln -s $(which podman-compose) /usr/local/bin/docker-compose - - -Verifying your environment -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Once you have configured your symbolic links as described, you should be able to run ``docker version`` and ``docker-compose --help`` and their output should agree, respectively, with ``podman version`` and ``podman-compose --help``. - -After that, you should be able to use ``tutor local``, ``tutor build``, and other commands as if you had installed the native Docker tools. diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 7d20341aad..0c774f8b4a 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -3,23 +3,27 @@ Quickstart (1-click install) ---------------------------- -1. `Download `_ the latest stable release of Tutor and place the ``tutor`` executable in your path. From the command line: +1. Install the latest stable release of Tutor from pip: -.. include:: cli_download.rst +.. include:: download/pip.rst -2. Run ``tutor local quickstart`` +Or `download `_ the pre-compiled binary and place the ``tutor`` executable in your path: + +.. include:: download/binary.rst + +2. Run ``tutor local launch`` 3. You're done! **That's it?** -Yes :) This is what happens when you run ``tutor local quickstart``: +Yes :) This is what happens when you run ``tutor local launch``: 1. You answer a few questions about the :ref:`configuration` of your Open edX platform. 2. Configuration files are generated from templates. 3. Docker images are downloaded. 4. Docker containers are provisioned. -5. A full, production-ready Open edX platform (`Koa `__ release) is run with docker-compose. +5. A full, production-ready Open edX platform (`Redwood `__ release) is run with docker-compose. -The whole procedure should require less than 10 minutes, on a server with a good bandwidth. Note that your host environment will not be affected in any way, since everything runs inside docker containers. Root access is not even necessary. +The whole procedure should require less than 10 minutes, on a server with good bandwidth. Note that your host environment will not be affected in any way, since everything runs inside docker containers. Root access is not even necessary. -There's a lot more to Tutor than that! To learn more about what you can do with Tutor and Open edX, check out the :ref:`whatnext` section. If the quickstart installation method above somehow didn't work for you, check out the :ref:`troubleshooting` guide. \ No newline at end of file +There's a lot more to Tutor than that! To learn more about what you can do with Tutor and Open edX, check out the :ref:`whatnext` section. If the launch installation method above somehow didn't work for you, check out the :ref:`troubleshooting` guide. diff --git a/docs/reference/api/hooks/actions.rst b/docs/reference/api/hooks/actions.rst new file mode 100644 index 0000000000..bf73ddbd4d --- /dev/null +++ b/docs/reference/api/hooks/actions.rst @@ -0,0 +1,14 @@ +.. _actions: + +======= +Actions +======= + +Actions are one of the two types of hooks (the other being :ref:`filters`) that can be used to extend Tutor. Each action represents an event that can occur during the application life cycle. Each action has a name, and callback functions can be attached to it. When an action is triggered, these callback functions are called in sequence. Each callback function can trigger side effects, independently from one another. + +.. autoclass:: tutor.core.hooks.Action + :members: + +.. The following are only to ensure that the docs build without warnings +.. class:: tutor.core.hooks.actions.T +.. class:: tutor.types.Config diff --git a/docs/reference/api/hooks/catalog.rst b/docs/reference/api/hooks/catalog.rst new file mode 100644 index 0000000000..cddd3c63b3 --- /dev/null +++ b/docs/reference/api/hooks/catalog.rst @@ -0,0 +1,18 @@ +.. _hooks_catalog: + +============= +Hooks catalog +============= + +Tutor can be extended by making use of "hooks". Hooks are either "actions" or "filters". Here, we list all instances of actions and filters that are used across Tutor. Plugin developers can leverage these hooks to modify the behaviour of Tutor. + +The underlying Python hook classes and API are documented :ref:`here `. + +.. autoclass:: tutor.hooks.Actions + :members: + +.. autoclass:: tutor.hooks.Filters + :members: + +.. autoclass:: tutor.hooks.Contexts + :members: diff --git a/docs/reference/api/hooks/contexts.rst b/docs/reference/api/hooks/contexts.rst new file mode 100644 index 0000000000..808de983d3 --- /dev/null +++ b/docs/reference/api/hooks/contexts.rst @@ -0,0 +1,10 @@ +.. _contexts: + +======== +Contexts +======== + +Contexts are a feature of the hook-based extension system in Tutor, which allows us to keep track of which components of the code created which callbacks. Contexts are very much an internal concept that most plugin developers should not have to worry about. + +.. autoclass:: tutor.core.hooks.Context +.. autofunction:: tutor.core.hooks.contexts::enter diff --git a/docs/reference/api/hooks/filters.rst b/docs/reference/api/hooks/filters.rst new file mode 100644 index 0000000000..81ec443374 --- /dev/null +++ b/docs/reference/api/hooks/filters.rst @@ -0,0 +1,15 @@ +.. _filters: + +======= +Filters +======= + +Filters are one of the two types of hooks (the other being :ref:`actions`) that can be used to extend Tutor. Filters allow one to modify the application behavior by transforming data. Each filter has a name, and callback functions can be attached to it. When a filter is applied, these callback functions are called in sequence; the result of each callback function is passed as the first argument to the next callback function. The result of the final callback function is returned to the application as the filter's output. + +.. autoclass:: tutor.core.hooks.Filter + :members: + +.. The following are only to ensure that the docs build without warnings +.. class:: tutor.core.hooks.filters.T1 +.. class:: tutor.core.hooks.filters.T2 +.. class:: tutor.core.hooks.filters.L diff --git a/docs/reference/api/hooks/index.rst b/docs/reference/api/hooks/index.rst new file mode 100644 index 0000000000..f6f130ebb2 --- /dev/null +++ b/docs/reference/api/hooks/index.rst @@ -0,0 +1,32 @@ +.. _hooks_api: + +========= +Hooks API +========= + +Types +===== + +This is the Python documentation of the two types of hooks (actions and filters) as well as the contexts system which is used to instrument them. Understanding how Tutor hooks work is useful to create plugins that modify the behaviour of Tutor. However, plugin developers should almost certainly not import these hook types directly. Instead, use the reference :ref:`hooks catalog `. + +.. toctree:: + :maxdepth: 1 + + actions + filters + contexts + +Utilities +========= + +Functions +--------- + +.. autofunction:: tutor.core.hooks::clear_all +.. autofunction:: tutor.hooks::lru_cache + +Priorities +---------- + +.. automodule:: tutor.core.hooks.priorities + :members: HIGH, DEFAULT, LOW diff --git a/docs/reference/cli/config.rst b/docs/reference/cli/config.rst new file mode 100644 index 0000000000..d97b2f1d10 --- /dev/null +++ b/docs/reference/cli/config.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.config:config_command + :prog: tutor config + :nested: full diff --git a/docs/reference/cli/dev.rst b/docs/reference/cli/dev.rst new file mode 100644 index 0000000000..a72b495112 --- /dev/null +++ b/docs/reference/cli/dev.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.dev:dev + :prog: tutor dev + :nested: full diff --git a/docs/reference/cli/images.rst b/docs/reference/cli/images.rst new file mode 100644 index 0000000000..7966bae1b2 --- /dev/null +++ b/docs/reference/cli/images.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.images:images_command + :prog: tutor images + :nested: full diff --git a/docs/reference/cli/index.rst b/docs/reference/cli/index.rst new file mode 100644 index 0000000000..6995faf8f1 --- /dev/null +++ b/docs/reference/cli/index.rst @@ -0,0 +1,13 @@ +Command line interface (CLI) +============================ + +.. toctree:: + :maxdepth: 2 + + tutor + config + dev + images + k8s + local + plugins diff --git a/docs/reference/cli/k8s.rst b/docs/reference/cli/k8s.rst new file mode 100644 index 0000000000..9334459124 --- /dev/null +++ b/docs/reference/cli/k8s.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.k8s:k8s + :prog: tutor k8s + :nested: full diff --git a/docs/reference/cli/local.rst b/docs/reference/cli/local.rst new file mode 100644 index 0000000000..91294d6af2 --- /dev/null +++ b/docs/reference/cli/local.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.local:local + :prog: tutor local + :nested: full diff --git a/docs/reference/cli/plugins.rst b/docs/reference/cli/plugins.rst new file mode 100644 index 0000000000..7bf378e71a --- /dev/null +++ b/docs/reference/cli/plugins.rst @@ -0,0 +1,5 @@ +.. _cli_plugins: + +.. click:: tutor.commands.plugins:plugins_command + :prog: tutor plugins + :nested: full diff --git a/docs/reference/cli/tutor.rst b/docs/reference/cli/tutor.rst new file mode 100644 index 0000000000..233bec5841 --- /dev/null +++ b/docs/reference/cli/tutor.rst @@ -0,0 +1,3 @@ +.. click:: tutor.commands.cli:cli + :prog: tutor + :nested: full diff --git a/docs/reference/index.rst b/docs/reference/index.rst new file mode 100644 index 0000000000..3281a9cf70 --- /dev/null +++ b/docs/reference/index.rst @@ -0,0 +1,11 @@ +Reference +========= + +.. toctree:: + :maxdepth: 2 + + api/hooks/index + api/hooks/catalog + patches + cli/index + indexes diff --git a/docs/reference/indexes.rst b/docs/reference/indexes.rst new file mode 100644 index 0000000000..45fff4b3c0 --- /dev/null +++ b/docs/reference/indexes.rst @@ -0,0 +1,158 @@ +============== +Plugin indexes +============== + +Plugin indexes are a great way to have your plugins discovered by other users. Plugin indexes make it easy for other Tutor users to install and upgrade plugins from other developers. Examples include the official indexes, which can be found in the `overhangio/tpi `__ repository. + +Index file paths +================ + +A plugin index is a yaml-formatted file. It can be stored on the web or on your computer. In both cases, the index file location must end with "/plugins.yml". For instance, the following are valid index locations if you run the Open edX "Redwood" release: + +- https://overhang.io/tutor/main/redwood/plugins.yml +- ``/path/to/your/local/index/redwood/plugins.yml`` + +To add either indexes, run the ``tutor plugins index add`` command without the suffix. For instance:: + + tutor plugins index add https://overhang.io/tutor/main + tutor plugins index add /path/to/your/local/index/ + +Your plugin cache should be updated immediately. You can also update the cache at any point by running:: + + tutor plugins update + +To view current indexes, run:: + + tutor plugins index list + +To remove an index, run:: + + tutor plugins index remove + +Plugin entry syntax +=================== + +A "plugins.yml" file is a yaml-formatted list of plugin entries. Each plugin entry has two required fields: "name" and "src". For instance, here is a minimal plugin entry:: + + - name: mfe + src: tutor-mfe + +"name" (required) +----------------- + +A plugin name is how it will be referenced when we run ``tutor plugins install `` or ``tutor plugins enable ``. It should be concise and easily identifiable, just like a Python or apt package name. + +Plugins with duplicate names will be overridden, depending on the index in which they are declared: indexes further down ``tutor plugins index list`` (which have been added later) will have higher priority. + +.. _plugin_index_src: + +"src" (required) +---------------- + +A plugin source can be either: + +1. A pip requirement file format specifier (see `reference `__). +2. The path to a Python file on your computer. +3. The URL of a Python file on the web. + +In the first case, the plugin will be installed as a Python package. In the other two cases, the plugin will be installed as a single-file plugin. + +The following "src" attributes are all valid:: + + # Pypi package + src: tutor-mfe + + # Pypi package with version specification + src: tutor-mfe>=42.0.0,<43.0.0 + + # Python package from a private index + src: | + --index-url=https://pip.mymirror.org + my-plugin>=10.0 + + # Remote git repository + src: -e git+https://github.com/myusername/tutor-contrib-myplugin@v27.0.0#egg=tutor-contrib-myplugin + + # Local editable package + src: -e /path/to/my/plugin + +"url" (optional) +---------------- + +Link to the plugin project, where users can learn more about it and ask for support. + +"author" (optional) +------------------- + +Original author of the plugin. Feel free to include your company name and email address here. For instance: "Leather Face ". + +"maintainer" (optional) +----------------------- + +Current maintainer of the plugin. Same format as "author". + +"description" (optional) +------------------------ + +Multi-line string that should contain extensive information about your plugin. The full description will be displayed with ``tutor plugins show ``. It will also be parsed for a match by ``tutor plugins search ``. Only the first line will be displayed in the output of ``tutor plugins search``. Make sure to keep the first line below 128 characters. + + +Examples +======== + +Manage plugins in development +----------------------------- + +Plugin developers and maintainers often want to install local versions of their plugins. They usually achieve this with ``pip install -e /path/to/tutor-plugin``. We can improve that workflow by creating an index for local plugins:: + + # Create the plugin index directory + mkdir -p ~/localindex/redwood/ + # Edit the index + vim ~/localindex/redwood/plugins.yml + +Add the following to the index:: + + - name: myplugin1 + src: -e /path/to/tutor-myplugin1 + - name: myplugin2 + src: -e /path/to/tutor-myplugin2 + +Then add the index:: + + tutor plugins index add ~/localindex/ + +Install the plugins:: + + tutor plugins install myplugin1 myplugin2 + +Re-install all plugins:: + + tutor plugins upgrade all + +The latter commands will install from the local index, and not from the remote indexes, because indexes that are added last have higher priority when plugins with the same names are found. + +Install plugins from a private index +------------------------------------ + +Plugin authors might want to share plugins with a limited number of users. This is for instance the case when a plugin is for internal use only. + +First, users should have access to the ``plugins.yml`` file. There are different ways to achieve that: + +- Make the index public: after all, it's mostly the plugins which are private. +- Grant access to the index from behind a VPN. +- Hide the index behing a basic HTTP auth url. The index can then be added with ``tutor plugins index add http://user:password@mycompany.com/index/``. +- Download the index to disk, and then add it from the local path: ``tutor plugins index add ../path/to/index``. + +Second, users should be able to install the plugins that are listed in the index. We recommend that the plugins are uploaded to a pip-compatible self-hosted mirror, such as `devpi `__. Alternatively, packages can be installed from a private Git repository. For instance:: + + # Install from private pip index + - name: myprivateplugin1 + src: | + --index-url=https://my-pip-index.mycompany.com/ + tutor-contrib-myprivateplugin + + # Install from private git repository + - name: myprivateplugin2 + src: -e git+https://git.mycompany.com/tutor-contrib-myplugin2.git + +Both examples work because the :ref:`"src" ` field supports just any syntax that could also be included in a requirements file installed with ``pip install -r requirements.txt``. diff --git a/docs/reference/patches.rst b/docs/reference/patches.rst new file mode 100644 index 0000000000..5ef0aece60 --- /dev/null +++ b/docs/reference/patches.rst @@ -0,0 +1,402 @@ +.. _patches: + +====================== +Template patch catalog +====================== + +This is the list of all patches used across Tutor (outside of any plugin). Alternatively, you can search for patches in Tutor templates by grepping the source code:: + + git clone https://github.com/overhangio/tutor + cd tutor + git grep "{{ patch" -- tutor/templates + +Or you can list all available patches with the following command:: + + tutor config patches list + +See also `this GitHub search `__. + +.. patch:: caddyfile + +``caddyfile`` +============= + +File: ``apps/caddy/Caddyfile`` + +Add here Caddy directives to redirect traffic from the outside to your service containers. You should make use of the "proxy" snippet that simplifies configuration and automatically configures logging. Also, make sure to use the ``$default_site_port`` environment variable to make sure that your service will be accessible both when HTTPS is enabled or disabled. For instance:: + + {{ MYPLUGIN_HOST }}{$default_site_port} { + import proxy "myservice:8000" + } + +See the `Caddy reference documentation `__ for more information. + +.. patch:: caddyfile-cms + +``caddyfile-cms`` +================= + +File: ``apps/caddy/Caddyfile`` + +.. patch:: caddyfile-global + +``caddyfile-global`` +==================== + +File: ``apps/caddy/Caddyfile`` + +.. patch:: caddyfile-lms + +``caddyfile-lms`` +================= + +File: ``apps/caddy/Caddyfile`` + +.. patch:: caddyfile-proxy + +``caddyfile-proxy`` +=========================== + +File: ``apps/caddy/Caddyfile`` + +.. patch:: cms-env + +``cms-env`` +=========== + +File: ``apps/openedx/config/cms.env.yml`` + +.. patch:: cms-env-features + +``cms-env-features`` +==================== + +File: ``apps/openedx/config/cms.env.yml`` + +.. patch:: common-env-features + +``common-env-features`` +======================= + +Files: ``apps/openedx/config/cms.env.yml``, ``apps/openedx/config/lms.env.yml`` + +.. patch:: dev-docker-compose-jobs-services + +``dev-docker-compose-jobs-services`` +==================================== + +File: ``dev/docker-compose.jobs.yml`` + +.. patch:: k8s-deployments + +``k8s-deployments`` +=================== + +File: ``k8s/deployments.yml`` + +.. patch:: k8s-jobs + +``k8s-jobs`` +============ + +File: ``k8s/jobs.yml`` + +.. patch:: k8s-override + +``k8s-override`` +================ + +File: ``k8s/override.yml`` + +Any Kubernetes resource definition in this patch will override the resource defined by Tutor, provided that their names match. See :ref:`Customizing Kubernetes resources ` for an example. + +.. patch:: k8s-services + +``k8s-services`` +================ + +File: ``k8s/services.yml`` + +.. patch:: k8s-volumes + +``k8s-volumes`` +=============== + +File: ``k8s/volumes.yml`` + +.. patch:: kustomization + +``kustomization`` +================= + +File: ``kustomization.yml`` + +.. patch:: kustomization-commonlabels + +``kustomization-commonlabels`` +============================== + +File: ``kustomization.yml`` + +.. patch:: kustomization-configmapgenerator + +``kustomization-configmapgenerator`` +==================================== + +File: ``kustomization.yml`` + +.. patch:: kustomization-patches-strategic-merge + +``kustomization-patches-strategic-merge`` +========================================= + +File: ``kustomization.yml`` + +This can be used to add more Kustomization patches that make use of the `strategic merge mechanism `__. + +.. patch:: kustomization-resources + +``kustomization-resources`` +=========================== + +File: ``kustomization.yml`` + +.. patch:: lms-env + +``lms-env`` +=========== + +File: ``apps/openedx/config/lms.env.yml`` + +.. patch:: lms-env-features + +``lms-env-features`` +==================== + +File: ``apps/openedx/config/lms.env.yml`` + +.. patch:: local-docker-compose-caddy-aliases + +``local-docker-compose-caddy-aliases`` +====================================== + +File: ``local/docker-compose.prod.yml`` + +.. patch:: local-docker-compose-cms-dependencies + +``local-docker-compose-cms-dependencies`` +========================================= + +File: ``local/docker-compose.yml`` + +.. patch:: local-docker-compose-dev-services + +``local-docker-compose-dev-services`` +===================================== + +File: ``dev/docker-compose.yml`` + +.. patch:: local-docker-compose-jobs-services + +``local-docker-compose-jobs-services`` +====================================== + +File: ``local/docker-compose.jobs.yml`` + +.. patch:: local-docker-compose-lms-dependencies + +``local-docker-compose-lms-dependencies`` +========================================= + +File: ``local/docker-compose.yml`` + +.. patch:: local-docker-compose-permissions-command + +``local-docker-compose-permissions-command`` +============================================ + +File: ``apps/permissions/setowners.sh`` + +Add commands to this script to set ownership of bind-mounted docker-compose volumes at runtime. See :patch:`local-docker-compose-permissions-volumes`. + + +.. patch:: local-docker-compose-permissions-volumes + +``local-docker-compose-permissions-volumes`` +============================================ + +File: ``local/docker-compose.yml`` + +Add bind-mounted volumes to this patch to set their owners properly. See :patch:`local-docker-compose-permissions-command`. + +.. patch:: local-docker-compose-prod-services + +``local-docker-compose-prod-services`` +====================================== + +File: ``local/docker-compose.prod.yml`` + +.. patch:: local-docker-compose-services + +``local-docker-compose-services`` +================================= + +File: ``local/docker-compose.yml`` + +.. patch:: openedx-auth + +``openedx-auth`` +================ + +File: ``apps/openedx/config/partials/auth.yml`` + +.. patch:: openedx-cms-common-settings + +``openedx-cms-common-settings`` +=============================== + +File: ``apps/openedx/settings/partials/common_cms.py`` + +.. patch:: openedx-cms-development-settings + +``openedx-cms-development-settings`` +==================================== + +File: ``apps/openedx/settings/cms/development.py`` + +.. patch:: openedx-cms-production-settings + +``openedx-cms-production-settings`` +=================================== + +File: ``apps/openedx/settings/cms/production.py`` + +.. patch:: openedx-common-assets-settings + +``openedx-common-assets-settings`` +================================== + +File: ``build/openedx/settings/partials/assets.py`` + + +.. patch:: openedx-common-i18n-settings + +``openedx-common-i18n-settings`` +================================ + +File: ``build/openedx/settings/partials/i18n.py`` + +.. patch:: openedx-common-settings + +``openedx-common-settings`` +=========================== + +File: ``apps/openedx/settings/partials/common_all.py`` + +.. patch:: openedx-dev-dockerfile-post-python-requirements + +``openedx-dev-dockerfile-post-python-requirements`` +=================================================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-development-settings + +``openedx-development-settings`` +================================ + +Files: ``apps/openedx/settings/cms/development.py``, ``apps/openedx/settings/lms/development.py`` + +.. patch:: openedx-dockerfile + +``openedx-dockerfile`` +====================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-final + +``openedx-dockerfile-final`` +============================ + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-git-patches-default + +``openedx-dockerfile-git-patches-default`` +========================================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-minimal + +``openedx-dockerfile-minimal`` +============================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-post-git-checkout + +``openedx-dockerfile-post-git-checkout`` +======================================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-post-python-requirements + +``openedx-dockerfile-post-python-requirements`` +=============================================== + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-dockerfile-pre-assets + +``openedx-dockerfile-pre-assets`` +================================= + +File: ``build/openedx/Dockerfile`` + +.. patch:: openedx-lms-common-settings + +``openedx-lms-common-settings`` +=============================== + +File: ``apps/openedx/settings/partials/common_lms.py`` + +Python-formatted LMS settings used both in production and development. + +.. patch:: openedx-lms-development-settings + +``openedx-lms-development-settings`` +==================================== + +File: ``apps/openedx/settings/lms/development.py`` + +Python-formatted LMS settings in development. Values defined here override the values from :patch:`openedx-lms-common-settings` or :patch:`openedx-lms-production-settings`. + +.. patch:: openedx-lms-production-settings + +``openedx-lms-production-settings`` +=================================== + +File: ``apps/openedx/settings/lms/production.py`` + +Python-formatted LMS settings in production. Values defined here override the values from :patch:`openedx-lms-common-settings`. + +``redis-conf`` +============== + +File: ``apps/redis/redis.conf`` + +Implement this patch to override hard-coded Redis configuration values. See the `Redis configuration reference `__`. + +``uwsgi-config`` +================ + +File: ``apps/openedx/settings/uwsgi.ini`` + +A .INI formatted file used to extend or override the uWSGI configuration. + +Check the uWSGI documentation for more details about the `.INI format `__ and the `list of available options `__. + +.. patch:: uwsgi-config diff --git a/docs/run.rst b/docs/run.rst index ed8a059b81..a4114f6649 100644 --- a/docs/run.rst +++ b/docs/run.rst @@ -5,7 +5,7 @@ Running Open edX .. toctree:: :maxdepth: 2 - + local k8s - dev \ No newline at end of file + dev diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index 7cbe8d9ed0..ef3ac7d99b 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -6,18 +6,17 @@ Troubleshooting What should you do if you have a problem? .. warning:: - **Do not** create a Github issue! + **Do not** create a GitHub issue! 1. Read the error logs that appear in the console. When running a single server platform as daemon, you can view the logs with the ``tutor local logs`` command. (see :ref:`logging` below) 2. Check if your problem already has a solution right here in the :ref:`troubleshooting` section. -3. Search for your problem in the `open and closed Github issues `_. -4. Search for your problem in the `community forums `__. -5. If, despite all your efforts, you can't solve the problem by yourself, you should discuss it in the `community forums `__. Please give as much details about your problem as possible! As a rule of thumb, **people will not dedicate more time to solving your problem than you took to write your question**. -6. If you are *absolutely* positive that you are facing a technical issue with Tutor, and not with Open edX, not with your server, not your custom configuration, then, and only then, should you open an issue on `Github `__. You *must* follow the instructions from the issue template!!! If you do not follow this procedure, your Github issues will be mercilessly closed 🀯. +3. Search for your problem in the `open and closed GitHub issues `_. +4. Search for your problem in the (now legacy) `Tutor community forums `__. +5. Search for your problem in the `Open edX community forum `__. +6. If despite all your efforts, you can't solve the problem by yourself, you should discuss it in the `Open edX community forum `__. Please give as many details about your problem as possible! As a rule of thumb, **people will not dedicate more time to solving your problem than you took to write your question**. You should tag your topic with "tutor" or the corresponding Tutor plugin name ("tutor-discovery", etc.) in order to notify the maintainers. +7. If you are *absolutely* positive that you are facing a technical issue with Tutor, and not with Open edX, not with your server, not your custom configuration, then, and only then, should you open an issue on `GitHub `__. You *must* follow the instructions from the issue template!!! If you do not follow this procedure, your GitHub issues will be mercilessly closed 🀯. -6. If you have a technical background, you may try to decide if the issue is related to Open edX or if it's specific to Tutor. In the latter case, you are most welcome to open an `issue on Github `_. **Please follow the instructions from the issue template!!!** Your issue will be examined in all cases, but you can make our life much easier by giving us as much background information as possible. - -Do you need professional assistance with your tutor-managed Open edX platform? Overhang.IO offers online support as part of its `Long Term Support (LTS) offering `__. +Do you need professional assistance with your Open edX platform? `Edly `__ provides online support as part of its `Open edX installation service `__. .. _logging: @@ -25,7 +24,7 @@ Logging ------- .. note:: - Logs are of paramount importance for debugging Tutor. When asking for help on the `Tutor forums `__, **you should always include the unedited logs of your app**. You can get those with:: + Logs are of paramount importance for debugging Tutor. When asking for help on the `Open edX forum `__, **always include the unedited logs of your app**. Logs are obtained with:: tutor local logs --tail=100 -f @@ -33,22 +32,22 @@ To view the logs from all containers use the ``tutor local logs`` command, which tutor local logs --follow -To view the logs from just one container, for instance the web server:: +To view the logs from just one container, for instance, the webserver:: - tutor local logs --follow nginx + tutor local logs --follow caddy -The last commands produce the logs since the creation of the containers, which can be a lot. Similar to a ``tail -f``, you can run:: +The last commands produce the logs since the creation of the containers, which may be a lot. Similar to a ``tail -f``, past logs can be removed with:: - tutor local logs --tail=0 -f + tutor local logs --tail=0 --follow -If you'd rather use a graphical user interface for viewing logs, you are encouraged to try out :ref:`Portainer `. +User who are more comfortable with a graphical user interface for viewing logs are encouraged to try out :ref:`Portainer `. .. _webserver: -"Cannot start service nginx: driver failed programming external connectivity" +"Cannot start service caddy: driver failed programming external connectivity" ----------------------------------------------------------------------------- -The containerized Nginx needs to listen to ports 80 and 443 on the host. If there is already a webserver, such as Apache or Nginx, running on the host, the nginx container will not be able to start. To solve this issue, check the section on :ref:`how to setup a web proxy `. +The containerized Caddy needs to listen to ports 80 and 443 on the host. If there is already a webserver, such as Apache, Caddy, or Nginx, running on the host, the caddy container will not be able to start. To solve this issue, check the section on :ref:`how to setup a web proxy `. "Couldn't connect to docker daemon" ----------------------------------- @@ -57,31 +56,90 @@ This is not an error with Tutor, but with your Docker installation. This is freq docker run --rm hello-world -If the above command does not work, you should fix your Docker installation. Some people will suggest to run Docker as root, or with ``sudo``; **do not do this**. Instead, what you should probably do is to add your user to the "docker" group. For more information, check out the `official Docker installation instructions `__. +If the above command does not work, you should fix your Docker installation. Some people will suggest running Docker as root, or with ``sudo``; **do not do this**. Instead, what you should probably do is add your user to the "docker" group. For more information, check out the `official Docker installation instructions `__. .. _migrations_killed: "Running migrations... Killed!" / "Command failed with status 137: docker-compose" ---------------------------------------------------------------------------------- -Open edX requires at least 4 GB RAM, in particular to run the SQL migrations. If the ``tutor local quickstart`` command dies after displaying "Running migrations", you most probably need to buy more memory or add swap to your machine. On Docker for Mac OS, by default, containers are allocated at most 2 GB of RAM. You should follow `these instructions from the official Docker documentation `__ to allocate at least 4-5 Gb to the Docker daemon. +Open edX requires at least 4 GB RAM, in particular, to run the SQL migrations. If the ``tutor local launch`` command dies after displaying "Running migrations", you most probably need to buy more memory or add swap to your machine. + +On macOS, by default, Docker allocates at most 2 GB of RAM to containers. ``launch`` tries to check the current allocation and outputs a warning if it can't find a value of at least 4 GB. Follow `these instructions from the official Docker documentation `__ to allocate at least 4-5 GB to the Docker daemon. -If migrations were killed halfway, there is a good chance that the MySQL database is in a state that is hard to recover from. The easiest way to recover is simply to delete all the MySQL data and restart the quickstart process. After you have allocated more memory to the Docker daemon, run:: +If migrations were killed halfway, there is a good chance that the MySQL database is in a state that is hard to recover from. The easiest way to recover is to delete all the MySQL data and restart the launch process. After more memory has been allocated to the Docker daemon, run:: tutor local stop sudo rm -rf "$(tutor config printroot)/data/mysql" - tutor local quickstart + tutor local launch .. warning:: THIS WILL ERASE ALL YOUR DATA! Do not run this on a production instance. This solution is only viable for new Open edX installations. +"Can't connect to MySQL server on 'mysql:3306' (111)" +----------------------------------------------------- + +The most common reason this happens is that two different instances of Tutor are running simultaneously, causing a port conflict between MySQL containers. Tutor will try to prevent this situation from happening (for example, it will stop ``local`` containers when running ``tutor dev`` commands, and vice versa), but it cannot prevent all edge cases. So, as a first step, stop all possible Tutor platform variants:: + + tutor dev stop + tutor local stop + tutor k8s stop + +And then run the command(s) again, ensuring the correct Tutor variant is consistently used (``tutor dev``, ``tutor local``, or ``tutor k8s``). + +If that does not work, then check if there are any other Docker containers running that may be using port 3306:: + + docker ps -a + +For example, if you have ever used :ref:`Tutor Nightly `, check whether there are still ``tutor_nightly_`` containers running. Conversely, if trying to run Tutor Nightly now, check whether there are non-Nightly ``tutor_`` containers running. If so, switch to that other version of Tutor, run ``tutor (dev|local|k8s) stop``, and then switch back to the preferred version of Tutor. + +Alternatively, if there are any other non-Tutor containers using port 3306, then stop and remove them:: + + docker stop + docker rm + +Finally, if no container or other programs are making use of port 3306, check the logs of the MySQL container itself:: + + tutor (dev|local|k8s) logs mysql + +Check whether the MySQL container is crashing upon startup, and if so, what is causing it to crash. + + Help! The Docker containers are eating all my RAM/CPU/CHEESE ------------------------------------------------------------ -You can identify which containers are consuming most resources by running:: +Containers that are consuming most resources are identified by running:: docker stats +In idle mode, the "mysql" container should use ~200MB memory; ~200-300MB for the the "lms" and "cms" containers. + +On some operating systems, such as RedHat, Arch Linux or Fedora, a very high limit of the number of open files (``nofile``) per container may cause the "mysql", "lms" and "cms" containers to use a lot of memory: up to 8-16GB. To check whether a platforms is impacted, run:: + + cat /proc/$(pgrep dockerd)/limits | grep "Max open files" + +If the output is 1073741816 or higher, then it is likely that the OS is affected by `this MySQL issue `__. To learn more about the root cause, read `this containerd issue comment `__. Basically, the OS is hard-coding a very high limit for the allowed number of open files, and this is causing some containers to fail. To resolve the problem, configure the Docker daemon to enforce a lower value, as described `here `__. Edit ``/etc/docker/daemon.json`` and add the following contents:: + + { + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 1048576, + "Soft": 1048576 + } + } + } + +Check the configuration is valid with:: + + dockerd --validate + +Then restart the Docker service:: + + sudo systemctl restart docker.service + +Launch the Open edX platform again with ``tutor local launch``. We should observe normal memory usage. + "Build failed running pavelib.servers.lms: Subprocess return code: 1" ----------------------------------------------------------------------- @@ -91,31 +149,70 @@ You can identify which containers are consuming most resources by running:: ... Build failed running pavelib.servers.lms: Subprocess return code: 1`" -This might occur when you run a ``paver`` command. ``/dev/null`` eats the actual error, so you will have to run the command manually. Run ``tutor dev shell lms`` (or ``tutor dev shell cms``) to open a bash session and then:: +This might occur when running a ``paver`` command. ``/dev/null`` eats the actual error, so we have to run the command manually to figure out the actual error. Run ``tutor dev shell lms`` (or ``tutor dev shell cms``) to open a bash session and then:: python manage.py lms print_setting STATIC_ROOT -The error produced should help you better understand what is happening. - -"ValueError: Unable to configure handler 'local'" ---------------------------------------------------- - -:: - - ValueError: Unable to configure handler 'local': [Errno 2] No such file or directory - -This will occur if you try to run a development environment without patching the LOGGING configuration, as indicated in the `development_` section. Maybe you correctly patched the development settings, but they are not taken into account? For instance, you might have correctly defined the ``EDX_PLATFORM_SETTINGS`` environment variable, but ``paver`` uses the ``devstack`` settings (which does not patch the ``LOGGING`` variable). This is because calling ``paver lms --settings=development`` or ``paver cms --settings=development`` ignores the ``--settings`` argument. Yes, it might be considered an edx-platform bug... Instead, you should run the ``update_assets`` and ``runserver`` commands, as explained above. +The error produced should help better understand what is happening. The chosen default language does not display properly ----------------------------------------------------- -By default, Open edX comes with a `limited set ` of translation/localization files. To complement these languages, we add locales from the `openedx-i18n project `_. But not all supported locales are downloaded. In some cases, the chosen default language will not display properly because if was not packaged in either edx-platform or openedx-i18n. If you feel like your language should be packaged, please `open an issue on the openedx-i18n project `_. +By default, Open edX comes with a `limited set ` of translation/localization files. + +Refer to the :ref:`i18n` section for more information about using your own translations. When I make changes to a course in the CMS, they are not taken into account by the LMS -------------------------------------------------------------------------------------- -This issue should only happen in development mode. Long story short, it can be solved by creating a Waffle switch with the following command:: +This issue should only happen in development mode. Long story short, it is solved by creating a Waffle switch with the following command:: tutor dev run lms ./manage.py lms waffle_switch block_structure.invalidate_cache_on_publish on --create -If you'd like to learn more, please take a look at `this Github issue `__. \ No newline at end of file +To learn more, check out `this GitHub issue `__. + +.. _high_resource_consumption: + +High resource consumption by Docker on ``tutor images build`` +------------------------------------------------------------- + +Some Docker images include many independent layers which are built in parallel by BuildKit. As a consequence, building these images will use up a lot of resources, sometimes even crashing the Docker daemon. To bypass this issue, we should explicitly limit the `maximum parallelism of BuildKit `__. Create a ``buildkit.toml`` configuration file with the following contents:: + + [worker.oci] + max-parallelism = 2 + +This configuration file limits the number of layers built concurrently to 2, but we should select a value that is appropriate for our machine. + +Then, create a builder named "max2cpu" that uses this configuration, and start using it right away:: + + # don't forget to specify the correct path to the buildkit.toml configuration file + docker buildx create --use --name=max2cpu --driver=docker-container --config=/path/to/buildkit.toml + +Now build again:: + + tutor images build all + +All build commands should now make use of the newly configured builder. To later revert to the default builder, run ``docker buildx use default``. + +.. note:: + Setting a too low value for maximum parallelism will result in longer build times. + +fatal: the remote end hung up unexpectedly / fatal: early EOF / fatal: index-pack failed when running ``tutor images build ...`` +-------------------------------------------------------------------------------------------------------------------------------- + +This issue can occur due to problems with the network connection while cloning edx-platform which is a fairly large repository. + +First, try to run the same command once again to see if it works, as the network connection can sometimes drop during the build process. + +If that does not work, follow the tutorial above for :ref:`High resource consumption ` to limit the number of concurrent build steps so that the network connection is not being shared between multiple layers at once. + +Can't override styles using Indigo Theme for MFEs +------------------------------------------------- + +The solution can be found in `tutor-indigo `__ documentation. + + +NPM Dependency Conflict When overriding ``@edx/frontend-component-header`` or ``@edx/frontend-component-footer`` +---------------------------------------------------------------------------------------------------------------- + +The detailed steps are mentioned in `tutor-mfe `__ documentation. diff --git a/docs/tutor.rst b/docs/tutor.rst index 4ed7984612..4435ac911d 100644 --- a/docs/tutor.rst +++ b/docs/tutor.rst @@ -3,20 +3,23 @@ Tutor development ================= +Setting up your development environment +--------------------------------------- + Start by cloning the Tutor repository:: git clone https://github.com/overhangio/tutor.git cd tutor/ Install requirements --------------------- +~~~~~~~~~~~~~~~~~~~~ :: pip install -r requirements/dev.txt Run tests ---------- +~~~~~~~~~ :: @@ -25,7 +28,7 @@ Run tests Yes, there are very few unit tests for now, but this is probably going to change. Code formatting ---------------- +~~~~~~~~~~~~~~~ Tutor code formatting is enforced by `black `_. To check whether your code changes conform to formatting standards, run:: @@ -39,15 +42,18 @@ Static error detection is performed by `pylint `__. + +2. Update version and compile changelogs: + +- Now bump the ``__version__`` value in ``tutor/__about__.py``. (see :ref:`versioning` below). +- Collect changelog entries with ``make changelog``(or ``scriv collect``) command - It will delete all previous changelog entries from changelog.d folder and will add records of those entries to CHANGELOG.md file. +- Create a commit with the version changelog e.g. this `commit `__. +- Run tests with ``make test``. +- Push your changes to the upstream repository. + +.. _versioning: + +Versioning +---------- + +The versioning format used in Tutor is the following:: + + RELEASE.MAJOR.MINOR(-BRANCH) -- Bump the ``__version__`` value in ``tutor/__about__.py``. -- Replace "Unreleased" by the version name and date in CHANGELOG.md. -- Create a commit with the version changelog. -- ``make release`` (this assumes that there are two remotes named "origin" and "overhangio") +When making a new Tutor release, increment the: -After a regular push to ``master``, run ``make nightly`` to update the "nightly" tag. +- RELEASE version when a new Open edX release comes out. The new value should match the ordinal value of the first letter of the release name: Aspen πŸ‘’ 1, Birch πŸ‘’ 2, ... Zebra πŸ‘’ 26. +- MAJOR version when making a backward-incompatible change (prefixed by "πŸ’₯" in the changelog, as explained below). +- MINOR version when making a backward-compatible change. + +An optional BRANCH suffix may be appended to the release name to indicate that extra changes were added on top of the latest release. For instance, "x.y.z-nightly" corresponds to release x.y.z on top of which extra changes were added to make it compatible with the Open edX master branches (see the :ref:`tutorial on running Tutor Nightly `). + +`Officially-supported plugins `__ follow the same versioning pattern. As a third-party plugin developer, you are encouraged to use the same pattern to make it immediately clear to your end-users which Open edX versions are supported. + +In Tutor and its officially-supported plugins, certain features, API endpoints, and older depenency versions are periodically deprecated. Generally, warnings are added to the Changelogs and/or the command-line interface one major release before support for any behavior is removed. In order to keep track of pending removals in the source code, comments containing the string ``REMOVE-AFTER-VXX`` should be used, where ```` is the last major version that must support the behavior. For example:: + + # This has been replaced with SOME_NEW_HOOK (REMOVE-AFTER-V25). + SOME_OLD_HOOK = Filter() + +indicates that this filter definition can be removed as soon as Tutor v26.0.0. .. _contributing: Contributing to Tutor --------------------- -Third-party contributions to Tutor and its plugins are more than welcome! Just make sure to follow these guidelines: +Contributions to Tutor and its plugins are highly encouraged. Please adhere to the following guidelines: -- Outside of obvious bugs, contributions should be discussed first in the `official Tutor forums `__. -- Once we agree on a high-level solution, you should open a pull request on the `Tutor repository `__ or the corresponding plugin. -- Write a good Git commit title and message: explain why you are making this change, what problem you are solving and which solution you adopted. Link to the relevant conversation topics in the forums and describe your use case. We *love* long, verbose descriptions :) -- Make sure that all tests pass by running ``make test`` (see above). -- If your PR is in the Tutor core repository, add an item to the CHANGELOG file, in the "Unreleased" section. Use the same format as the other items:: +- **General Discussion**: Before addressing anything other than clear-cut bugs, start a discussion on the `official Open edX forum `__. This facilitates reaching a consensus on a high-level solution. +- **Pull Requests**: For changes to Tutor core or plugin-specific modifications, open a pull request on the `Tutor repository `__ or the corresponding plugin repository. +- **Running Tests and Code Formatting**: + - Ensure all tests pass by running ``make test``. This is mandatory for both Tutor core and plugin contributions. + - If formatting tests fail, correct your code format using ``make format``. +- **Changelog Entry**: Create a changelog entry for significant changes (excluding reformatting or documentation) by running ``make changelog-entry``. Edit the newly created file following the given formatting instructions. This applies to both Tutor core and plugin changes. +- **Commit Messages**: Write clear Git commit titles and messages. Detail the rationale for your changes, the issue being addressed, and your solution. Include links to relevant forum discussions and describe your use case. Detailed explanations are valuable. For commit titles, follow `conventional commits `__ guidelines.Additionally, if your pull request addresses an existing GitHub issue, include 'Close #XXX' in your commit message, where XXX is the issue number. - - [TYPE] DESCRIPTION +Releasing a new version +----------------------- + +When releasing a new version: -Where "TYPE" is either "Bugfix", "Improvement", "Feature" or "Security". You should add an explosion emoji ("πŸ’₯") before "[TYPE]" if you are making a breaking change. +- **Version Number**: Update the version number in `__about__.py`. For detailed guidelines on version numbering, refer to the (versioning guidelines :ref:`versioning`). +- **Changelog Compilation**: Compile all changelog entries using ``make changelog``. +- **Git Commit for Release**: Use the format ``git commit -a -m "vX.Y.Z"`` to indicate the new version in the git commit title. Happy hacking! ☘️ + +.. _maintainers: + +Joining the team of Tutor Maintainers +------------------------------------- + +We have an open team of volunteers who help support the project. You can read all about it `here `__ -- and we hope that you'll consider joining us πŸ˜‰ diff --git a/docs/tutorials/arm64.rst b/docs/tutorials/arm64.rst new file mode 100644 index 0000000000..2d04f7ae35 --- /dev/null +++ b/docs/tutorials/arm64.rst @@ -0,0 +1,12 @@ +.. _arm64: + +Running Tutor on ARM-based systems +================================== + +Tutor can be used on ARM64 systems, and official ARM64 docker images are available starting from Tutor v16. + +For older versions of Tutor (v14 or v15), there are several options: + +* Use emulation (via qemu or Rosetta 2) to run x86_64 images. Just make sure your installation of Docker supports emulation and use Tutor as normal. This may be 20%-100% slower than native images, depending on the emulation method. +* Use the `unofficial community-maintained ARM64 plugin `_ which will set the required settings for you and which includes unofficial docker images. +* Build your own ARM64 images, e.g. using ``tutor images build openedx permissions`` and/or ``tutor images build openedx-dev`` before launching the LMS. diff --git a/docs/tutorials/datamigration.rst b/docs/tutorials/datamigration.rst new file mode 100644 index 0000000000..e7923a6463 --- /dev/null +++ b/docs/tutorials/datamigration.rst @@ -0,0 +1,36 @@ +.. _backup_tutorial: + +Making backups and migrating data +--------------------------------- + +With Tutor, all data are stored in a single folder. This means that it's extremely easy to migrate an existing platform to a different server. For instance, it's possible to configure a platform locally on a laptop, and then move this platform to a production server. + +1. Make sure `tutor` is installed on both servers with the same version. +2. Stop any running platform on server 1:: + + tutor local stop + +3. Transfer the configuration, environment, and platform data from server 1 to server 2:: + + sudo rsync -avr "$(tutor config printroot)/" username@server2:/tmp/tutor/ + +4. On server 2, move the data to the right location:: + + mv /tmp/tutor "$(tutor config printroot)" + +5. Start the instance with:: + + tutor local start -d + +Making database dumps +--------------------- + +To dump all data from the MySQL and Mongodb databases used on the platform, run the following commands:: + + tutor local exec \ + -e USERNAME="$(tutor config printvalue MYSQL_ROOT_USERNAME)" \ + -e PASSWORD="$(tutor config printvalue MYSQL_ROOT_PASSWORD)" \ + mysql sh -c 'mysqldump --all-databases --user=$USERNAME --password=$PASSWORD > /var/lib/mysql/dump.sql' + tutor local exec mongodb mongodump --out=/data/db/dump.mongodb + +The ``dump.sql`` and ``dump.mongodb`` files will be located in ``$(tutor config printroot)/data/mysql`` and ``$(tutor config printroot)/data/mongodb``. diff --git a/docs/tutorials/edx-platform-settings.rst b/docs/tutorials/edx-platform-settings.rst new file mode 100644 index 0000000000..e3c5cf2b22 --- /dev/null +++ b/docs/tutorials/edx-platform-settings.rst @@ -0,0 +1,4 @@ +Modifying ``edx-platform`` settings +----------------------------------- + +The default settings module loaded by ``edx-platform`` is ``tutor.production`` in production and ``tutor.development`` in development. The folders ``$(tutor config printroot)/env/apps/openedx/settings/lms`` and ``$(tutor config printroot)/env/apps/openedx/settings/cms`` are mounted as ``edx-platform/lms/envs/tutor`` and ``edx-platform/cms/envs/tutor`` inside the docker containers. To modify these settings you must create a plugin that implements one or more of the patch statements in those setting files. See the :ref:`plugin_development_tutorial` tutorial for more information on how to create a plugin. diff --git a/docs/tutorials/edx-platform.rst b/docs/tutorials/edx-platform.rst new file mode 100644 index 0000000000..125a0cb324 --- /dev/null +++ b/docs/tutorials/edx-platform.rst @@ -0,0 +1,178 @@ +.. _edx_platform: + +Working on edx-platform as a developer +====================================== + +Tutor supports running in development with ``tutor dev`` commands. Developers frequently need to work on a fork of some repository. The question then becomes: how to make their changes available within the "openedx" Docker container? + +For instance, when troubleshooting an issue in `edx-platform `__, we would like to make some changes to a local fork of that repository, and then apply these changes immediately in the "lms" and the "cms" containers (but also "lms-worker", "cms-worker", etc.) + +Similarly, when developing a custom XBlock, we would like to hot-reload any change we make to the XBlock source code within the containers. + +Tutor provides a simple solution to these questions. In both cases, the solution takes the form of a ``tutor mounts add ...`` command. + +Working on the "edx-platform" repository +---------------------------------------- + +Download the code from the upstream repository:: + + cd /my/workspace/edx-plaform + git clone https://github.com/openedx/edx-platform . + +Check out the right version of the upstream repository. If you are working on the `current "zebulon" release `__ of Open edX, then you should checkout the corresponding branch:: + + # "zebulon" is an example. You should put the actual release name here. + # I.e: aspen, birch, cypress, etc. + git checkout open-release/zebulon.master + +On the other hand, if you are working on the Tutor :ref:`"nightly" ` branch then you should checkout the master branch:: + + git checkout master + +Then, mount the edx-platform repository with Tutor:: + + tutor mounts add /my/workspace/edx-plaform + +This command does a few "magical" things πŸ§™ behind the scenes: + +1. Mount the edx-platform repository in the image at build-time. This means that when you run ``tutor images build openedx``, your custom repository will be used instead of the upstream. In particular, any change you've made to the installed requirements, static assets, etc. will be taken into account. +2. Mount the edx-platform repository at run time. Thus, when you run ``tutor dev start``, any change you make to the edx-platform repository will be hot-reloaded. + +You can get a glimpse of how these auto-mounts work by running ``tutor mounts list``. It should output something similar to the following:: + + $ tutor mounts list + - name: /home/data/regis/projets/overhang/repos/edx/edx-platform + build_mounts: + - image: openedx + context: edx-platform + - image: openedx-dev + context: edx-platform + compose_mounts: + - service: lms + container_path: /openedx/edx-platform + - service: cms + container_path: /openedx/edx-platform + - service: lms-worker + container_path: /openedx/edx-platform + - service: cms-worker + container_path: /openedx/edx-platform + - service: lms-job + container_path: /openedx/edx-platform + - service: cms-job + container_path: /openedx/edx-platform + +Working on edx-platform Python dependencies +------------------------------------------- + +Quite often, developers don't want to work on edx-platform directly, but on a dependency of edx-platform. For instance: an XBlock. This works the same way as above. Let's take the example of the `"edx-ora2" `__ package, for open response assessments. First, clone the Python package:: + + cd /my/workspace/edx-ora2 + git clone https://github.com/openedx/edx-ora2 . + +Then, check out the right version of the package. This is the version that is indicated in the `edx-platform/requirements/edx/base.txt `__. Be careful that the version that is currently in use in your version of edx-platform is **not necessarily the head of the master branch**:: + + git checkout + +Then, mount this repository:: + + tutor mounts add /my/workspace/edx-ora2 + +Verify that your repository is properly bind-mounted by running ``tutor mounts list``:: + + $ tutor mounts list + - name: /my/workspace/edx-ora2 + build_mounts: + - image: openedx + context: mnt-edx-ora2 + - image: openedx-dev + context: mnt-edx-ora2 + compose_mounts: + - service: lms + container_path: /mnt/edx-ora2 + - service: cms + container_path: /mnt/edx-ora2 + - service: lms-worker + container_path: /mnt/edx-ora2 + - service: cms-worker + container_path: /mnt/edx-ora2 + - service: lms-job + container_path: /mnt/edx-ora2 + - service: cms-job + container_path: /mnt/edx-ora2 + +(If the ``_mounts`` entries are empty, it didn't work automatically - see below.) + +You should then re-build the "openedx" Docker image to pick up your changes:: + + tutor images build openedx-dev + +Then, whenever you run ``tutor dev start``, the "lms" and "cms" containers should automatically hot-reload your changes. + +To push your changes in production, you should do the same with ``tutor local`` and the "openedx" image:: + + tutor images build openedx + tutor local start -d + +What if my edx-platform package is not automatically bind-mounted? +------------------------------------------------------------------ + +It is quite possible that your package is not automatically recognized and bind-mounted by Tutor. Out of the box, Tutor defines a set of regular expressions: if your package name matches this regular expression, it will be automatically bind-mounted. But if it does not, you have to tell Tutor about it. + +To do so, you will need to create a :ref:`Tutor plugin ` that implements the :py:data:`tutor.hooks.Filters.MOUNTED_DIRECTORIES` filter:: + + from tutor import hooks + hooks.Filters.MOUNTED_DIRECTORIES.add_item(("openedx", "my-package")) + +After you implement and enable that plugin, ``tutor mounts list`` should display your directory among the bind-mounted directories. + +Debugging with breakpoints +-------------------------- + +To debug a local edx-platform repository, first, start development in detached mode (with ``-d``), add a `python breakpoint `__ with ``breakpoint()`` anywhere in the code. Then, attach to the applicable service's container by running ``start`` (without ``-d``) followed by the service's name:: + + # Start in detached mode: + tutor dev start -d + + # Debugging LMS: + tutor dev start lms + + # Or, debugging CMS: + tutor dev start cms + +Running edx-platform unit tests +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's possible to run the full set of unit tests that ship with `edx-platform `__. To do so, run a shell in the LMS development container:: + + tutor dev run lms bash + +Then, run unit tests with ``pytest`` commands:: + + # Run tests on common apps + unset DJANGO_SETTINGS_MODULE + unset SERVICE_VARIANT + pytest common + pytest openedx + pytest xmodule + + # Run tests on LMS + export DJANGO_SETTINGS_MODULE=lms.envs.tutor.test + pytest lms + + # Run tests on CMS + export DJANGO_SETTINGS_MODULE=cms.envs.tutor.test + pytest cms + +.. note:: + Getting all edx-platform unit tests to pass on Tutor is currently a work-in-progress. Some unit tests are still failing. If you manage to fix some of these, please report your findings in the `Open edX forum `__. + +Do I have to re-build the "openedx" Docker image after every change? +-------------------------------------------------------------------- + +No, you don't. Re-building the "openedx" Docker image may take a while, and you don't want to run this command every time you make a change to your local repositories. Because your host directory is bind-mounted in the containers at runtime, your changes will be automatically applied to the container. If you run ``tutor dev`` commands, then your changes will be automatically picked up. + +If you run ``tutor local`` commands (for instance: when debugging a production instance) then your changes will *not* be automatically picked up. In such a case you should manually restart the containers:: + + tutor local restart lms cms lms-worker cms-worker + +Re-building the "openedx" image should only be necessary when you want to push your changes to a Docker registry, then pull them on a remote server. diff --git a/docs/tutorials/google-smtp.rst b/docs/tutorials/google-smtp.rst new file mode 100644 index 0000000000..90a8ad3d65 --- /dev/null +++ b/docs/tutorials/google-smtp.rst @@ -0,0 +1,53 @@ +Using Google Mail as an SMTP server +=================================== + +By default, Tutor comes with a simple SMTP server for sending emails. Such a server has an important limitation: it does not implement mailing good practices, such as DKIM or SPF. As a consequence. the emails you send might be flagged as spam by their recipients. Thus, you might want to disable the SMTP server and run your own, for instance using your Google Mail account. + +.. warning:: + Google Mail SMTP servers come with their own set of limitations. For instance, you are limited to sending 500 emails a day. Reference: https://support.google.com/mail/answer/22839 + +Authorization for Third-Party Access : + +To enhance security, Google recommends the use of "Application-Specific Passwords" for third-party access to Google services. It's crucial to follow these steps to enable this feature: + +1. Activate 2-Step Verification for the Google Account. This is essential for setting up application-specific passwords. +2. Visit the Google Account Security page. +3. Under 'Signing in to Google,' select 'App passwords.' +4. It may be necessary to sign in again. After signing in, choose "Select app" and select "Other (Custom name)" from the dropdown menu. +5. Enter a name that describes the purpose of this password, such as 'Tutor SMTP'. +6. Click 'Generate' to receive your 16-character app-specific password. Make sure to record this password securely. + +Reference: https://support.google.com/mail/answer/185833 + +Then, check that you can reach the Google Mail SMTP service from your own server:: + + $ telnet smtp.gmail.com 587 + +If you get ``Connected to smtp.gmail.com.`` then it means that you can successfully reach the Google Mail SMTP servers. If not, you will have to reconfigure your firewall. + +To exit the ``telnet`` shell, type ``ctrl+]``, then ``ctrl+d``. + +Then, disable the SMTP server that comes with Tutor:: + + $ tutor config save --set RUN_SMTP=false + +Configure credentials to access your SMTP server:: + + $ tutor config save \ + --set SMTP_HOST=smtp.gmail.com \ + --set SMTP_PORT=587 \ + --set SMTP_USE_SSL=false \ + --set SMTP_USE_TLS=true \ + --set SMTP_USERNAME=YOURUSERNAME@gmail.com \ + --set SMTP_PASSWORD='YOURPASSWORD' + +Don't forget to replace your email address and password in the prompt above. If your email password contains special characters, you might have to escape them. + +Then, restart your platform:: + + $ tutor local launch + +That's it! You can send a test email with the following command:: + + $ tutor local run --no-deps lms ./manage.py lms shell -c \ + "from django.core.mail import send_mail; send_mail('test subject', 'test message', 'YOURUSERNAME@gmail.com', ['YOURRECIPIENT@domain.com'])" diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst new file mode 100644 index 0000000000..cef5c070e5 --- /dev/null +++ b/docs/tutorials/index.rst @@ -0,0 +1,33 @@ +Tutorials +========= + +Open edX customization +---------------------- + +.. toctree:: + :maxdepth: 1 + + plugin + theming + edx-platform + edx-platform-settings + google-smtp + nightly + +System administration +--------------------- + +.. toctree:: + :maxdepth: 1 + + scale + portainer + podman + proxy + datamigration + multiplatforms + oldreleases + arm64 + +.. Note: maybe we should create a dedicated tutorial category in the Open edX forum? +.. Other tutorials can be found in the official Tutor forums, `in the "Tutorials" category `__. diff --git a/docs/tutorials/multiplatforms.rst b/docs/tutorials/multiplatforms.rst new file mode 100644 index 0000000000..b6a4621806 --- /dev/null +++ b/docs/tutorials/multiplatforms.rst @@ -0,0 +1,11 @@ +Running multiple Open edX platforms on a single server +====================================================== + +With Tutor, it is easy to run multiple Open edX instances on a single server. To do so, the following configuration parameters must be different for all platforms: + +- ``TUTOR_ROOT``: so that configuration, environment, and data are not mixed up between platforms. +- ``LOCAL_PROJECT_NAME``: the various docker-compose projects cannot share the same name. +- ``CADDY_HTTP_PORT``: exposed ports cannot be shared by two different containers. +- ``LMS_HOST``, ``CMS_HOST``: the different platforms must be accessible from different domain (or subdomain) names. + +In addition, a web proxy must be set up on the host, as described :ref:`in the corresponding tutorial `. diff --git a/docs/tutorials/nightly.rst b/docs/tutorials/nightly.rst new file mode 100644 index 0000000000..6ee60f3de0 --- /dev/null +++ b/docs/tutorials/nightly.rst @@ -0,0 +1,64 @@ +.. _nightly: + +Running Open edX on the master branch ("nightly") +================================================= + +Tutor was designed to make it easy for everyone to run the latest release of Open edX. But sometimes, you want to run the latest, bleeding-edge version of Open edX. This is what we call "running master", as opposed to running the release branch. Running the master branch in production is strongly **not** recommended unless you are an Open edX expert and you really know what you are doing. But Open edX developers frequently need to run the master branch locally to implement and test new features. Thus, Tutor makes it easy to run Open edX on the master branch: this is called "Tutor Nightly". + +Installing Tutor Nightly +------------------------ + +Running Tutor Nightly requires more than setting a few configuration variables: because there are so many Open edX settings, version numbers, etc. which may change between the latest release and the current master branch, Tutor Nightly is actually maintained as a separate branch of the Tutor repository. To install Tutor Nightly, you should install Tutor from the "nightly" branch of the source repository. To do so, run:: + + git clone --branch=nightly https://github.com/overhangio/tutor.git + pip install -e "./tutor[full]" + +As usual, it is strongly recommended to run the command above in a `Python virtual environment `__. + +In addition to installing Tutor Nightly itself, this will install automatically the nightly versions of all official Tutor plugins (which are enumerated in `plugins.txt `_). Alternatively, if you wish to hack on an official plugin or install a custom plugin, you can clone that plugin's repository and install it. For instance:: + + git clone --branch=nightly https://github.com/myorganization/tutor-contrib-myplugin.git + pip install -e ./tutor-contrib-myplugin + +Once Tutor Nightly is installed, you can run the usual ``tutor`` commands:: + + tutor dev launch + tutor dev run lms bash + # ... and so on + +Upgrading to the latest version of Open edX +------------------------------------------- + +To pull the latest upstream changes, you should first upgrade Tutor Nightly:: + + cd ./tutor + git pull + +Then, you will have to generate a more recent version of the nightly Docker images. Images for running Tutor Nightly are published daily to docker.io (see `here `__). You can fetch the latest images with:: + + tutor images pull all + +Alternatively, you may want to build the images yourself. As usual, this is done with:: + + tutor images build all + +However, these images include the application master branch at the point in time when the image was built. The Docker layer caching mechanism might cause the ``git clone`` step from the build to be skipped. In such cases, you will have to bypass the caching mechanism with:: + + tutor images build --no-cache all + +Running Tutor Nightly alongside the latest release +-------------------------------------------------- + +When running Tutor Nightly, you usually do not want to override your existing Tutor installation. That's why a Tutor Nightly installation has the following differences from a regular release installation: + +- The default Tutor project root is different in Tutor Nightly. By default it is set to ``~/.local/share/tutor-nightly`` on Linux (instead of ``~/.local/share/tutor``). To modify this location check the :ref:`corresponding documentation `. +- The plugins root is set to ``~/.local/share/tutor-nightly-plugins`` on Linux (instead of ``~/.local/share/tutor-plugins``). This location may be modified by setting the ``TUTOR_PLUGINS_ROOT`` environment variable. +- The default docker-compose project name is set to ``tutor_nightly_local`` (instead of ``tutor_local``). This value may be modified by manually setting the ``LOCAL_PROJECT_NAME``. + +Making changes to Tutor Nightly +------------------------------- + +In general pull requests should be open on the "master" branch of Tutor: the "master" branch is automatically merged on the "nightly" branch at every commit, such that changes made to Tutor releases find their way to Tutor Nightly as soon as they are merged. However, sometimes you want to make changes to Tutor Nightly exclusively, and not to the Tutor releases. This might be the case for instance when upgrading the running version of a third-party service (for instance: Elasticsearch, MySQL), or when the master branch requires specific changes. In that case, you should follow the instructions from the :ref:`contributing` section of the docs, with the following differences: + +- Open your pull request on top of the "nightly" branch instead of "master". +- Add a description of your changes by creating a changelog entry with `make changelog-entry`, as in the master branch. diff --git a/docs/tutorials/oldreleases.rst b/docs/tutorials/oldreleases.rst new file mode 100644 index 0000000000..b7344f2c8f --- /dev/null +++ b/docs/tutorials/oldreleases.rst @@ -0,0 +1,25 @@ +Upgrading from older releases +----------------------------- + +Upgrading from v3+ +~~~~~~~~~~~~~~~~~~ + +Just upgrade Tutor using your :ref:`favorite installation method ` and run launch again:: + + tutor local launch + +Upgrading from v1 or v2 +~~~~~~~~~~~~~~~~~~~~~~~ + +Versions 1 and 2 of Tutor were organized differently: they relied on many different ``Makefile`` and ``make`` commands instead of a single ``tutor`` executable. To migrate from an earlier version, you should first stop your platform:: + + make stop + +Then, install Tutor using one of the :ref:`installation methods `. Then, create the Tutor project root and move your data:: + + mkdir -p "$(tutor config printroot)" + mv config.json data/ "$(tutor config printroot)" + +Finally, launch your platform with:: + + tutor local launch diff --git a/docs/tutorials/plugin.rst b/docs/tutorials/plugin.rst new file mode 100644 index 0000000000..6469f49f66 --- /dev/null +++ b/docs/tutorials/plugin.rst @@ -0,0 +1,388 @@ +.. _plugin_development_tutorial: + +======================= +Creating a Tutor plugin +======================= + +Tutor plugins are the officially recommended way of customizing the behaviour of Tutor. If Tutor does not do things the way you want, then your first reaction should *not* be to fork Tutor, but instead to figure out whether you can create a plugin that will allow you to achieve what you want. + +You may be thinking that creating a plugin might be overkill for your use case. It's almost certainly not! The stable plugin API guarantees that your changes will keep working even after you upgrade from one major release to the next, with little to no extra work. Also, it allows you to distribute your changes to other users. + +A plugin can be created either as a simple, single Python module (a ``*.py`` file) or as a full-blown Python package. Single Python modules are easier to write, while Python packages can be distributed more easily with ``pip install ...``. We'll start by writing our plugin as a single Python module. + +Plugins work by making extensive use of the Tutor hooks API. The list of available hooks is available from the :ref:`hooks catalog `. Developers who want to understand how hooks work should check the :ref:`hooks API `. + +Writing a plugin as a single Python module +========================================== + +Getting started +--------------- + +In the following, we'll create a new plugin called "myplugin". We start by creating the plugins root folder:: + + $ mkdir -p "$(tutor plugins printroot)" + +Then, create an empty "myplugin.py" file in this folder:: + + $ touch "$(tutor plugins printroot)/myplugin.py" + +We can verify that the plugin is correctly detected by running:: + + $ tutor plugins list + ... + myplugin (disabled) /home/yourusername/.local/share/tutor-plugins/myplugin.py + ... + +Our plugin is disabled, for now. To enable it, we run:: + + $ tutor plugins enable myplugin + Plugin myplugin enabled + Configuration saved to /home/yourusername/.local/share/tutor/config.yml + Environment generated in /home/yourusername/.local/share/tutor/env + +At this point your environment was updated, but there would not be any change there... because the plugin does not do anything. So let's get started and make some changes. + +Modifying existing files with patches +------------------------------------- + +We'll start by modifying some of our Open edX settings files. It's a frequent requirement to modify the ``FEATURES`` setting from the LMS or the CMS in edx-platform. In the legacy native installation, this was done by modifying the ``lms.env.yml`` and ``cms.env.yml`` files. Here we'll modify the Python setting files that define the edx-platform configuration. To achieve that we'll make use of two concepts from the Tutor API: :ref:`patches` and :ref:`filters`. + +If you have not already read :ref:`how_does_tutor_work` now would be a good time ☺️ Tutor uses templates to generate various files, such as settings, Dockerfiles, etc. These templates include ``{{ patch("patch-name") }}`` statements that allow plugins to insert arbitrary content in there. These patches are located at strategic locations. See :ref:`patches` for more information. + +Let's say that we would like to limit access to our brand new Open edX platform. It is not ready for prime-time yet, so we want to prevent users from registering new accounts. There is a feature flag for that in the LMS: `FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] `__. By default this flag is set to a true value, enabling anyone to create an account. In the following we'll set it to false. + +Add the following content to the ``myplugin.py`` file that you created earlier:: + + from tutor import hooks + + hooks.Filters.ENV_PATCHES.add_item( + ( + "openedx-lms-common-settings", + "FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = False" + ) + ) + +Let's go over these changes one by one:: + + from tutor import hooks + +This imports the ``hooks`` module from Tutor, which grants us access to ``hooks.Actions`` and ``hooks.Filters`` (among other things). + +:: + + hooks.Filters.ENV_PATCHES.add_item( + ( + , + + ) + ) + +This means "add ```` to the ``{{ patch("") }}`` statement, thanks to the :py:data:`tutor.hooks.Filters.ENV_PATCHES` filter". In our case, we want to modify the LMS settings, both in production and development. The right patch for that is :patch:`openedx-lms-common-settings`. We add one item, which is a single Python-formatted line of code:: + + "FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = False" + +.. note:: Notice how "False" starts with a capital "F"? That's how booleans are created in Python. + +Now, re-render your environment with:: + + $ tutor config save + +You can check that the feature was added to your environment:: + + $ grep -r ALLOW_PUBLIC_ACCOUNT_CREATION "$(tutor config printroot)/env" + /home/yourusername/.local/share/tutor/env/apps/openedx/settings/lms/production.py:FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = False + /home/yourusername/.local/share/tutor/env/apps/openedx/settings/lms/development.py:FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = False + +Your new settings will be taken into account by restarting your platform:: + + $ tutor local restart + +Congratulations! You've created your first working plugin. As you can guess, you can add changes to other files by adding other similar patch statements to your plugin. + +Modifying configuration +----------------------- + +In the previous section you've learned how to add custom content to the Tutor templates. Now we'll see how to modify the Tutor configuration. Configuration settings can be specified in three ways: + +1. "unique" settings that need to be generated or user-specified, and then preserved in config.yml: such settings do not have reasonable defaults for all users. Examples of such settings include passwords and secret keys, which should be different for every user. +2. "default" settings have static fallback values. They are only stored in config.yml when they are modified by users. Most settings belong in this category. +3. "override" settings modify configuration from Tutor core or from other plugins. These will be removed and restored to their default values when the plugin is disabled. + +It is very strongly recommended to prefix unique and default settings with the plugin name, in all-caps, such that different plugins with the same configuration do not conflict with one another. + +As an example, we'll make it possible to configure public account creation on the LMS via a Tutor setting. In the previous section we achieved that by creating a patch. Let's modify this patch:: + + hooks.Filters.ENV_PATCHES.add_item( + ( + "openedx-lms-common-settings", + "FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = {% if MYPLUGIN_PLATFORM_IS_PUBLIC %}True{% else %}False{% endif %}", + ) + ) + +This new patch makes use of the ``MYPLUGIN_PLATFORM_IS_PUBLIC`` configuration setting, which we need to create. Since this setting is specific to our plugin and should be stored in config.yml only when it's modified, we create it as a "default" setting. We do that with the :py:data:`tutor.hooks.Filters.CONFIG_DEFAULTS` filter:: + + hooks.Filters.CONFIG_DEFAULTS.add_item( + ("MYPLUGIN_PLATFORM_IS_PUBLIC", False) + ) + +You can check that the new configuration setting was properly defined:: + + $ tutor config printvalue MYPLUGIN_PLATFORM_IS_PUBLIC + False + +Now you can quickly toggle the public account creation feature by modifying the new setting:: + + $ tutor config save --set MYPLUGIN_PLATFORM_IS_PUBLIC=True + $ tutor local restart + + +Adding new templates +-------------------- + +If you are adding an extra application to your Open edX platform, there is a good chance that you will create a new Docker image with a custom Dockerfile. This new application will have its own settings and build assets, for instance. This means that you need to add new templates to the Tutor environment. To do that, we will create a new subfolder in our plugins folder:: + + $ mkdir -p "$(tutor plugins printroot)/templates/myplugin" + +Then we tell Tutor about this new template root thanks to the :py:data:`tutor.hooks.Filters.ENV_TEMPLATE_ROOTS` filter:: + + import os + + template_folder = os.path.join(os.path.dirname(__file__), "templates") + hooks.Filters.ENV_TEMPLATE_ROOTS.add_item(template_folder) + +We create a "build" subfolder which will contain all assets to build our "myservice" image:: + + $ mkdir -p "$(tutor plugins printroot)/templates/myplugin/build/myservice" + +Create the following Dockerfile in ``$(tutor plugins printroot)/templates/myplugin/build/myservice/Dockerfile``:: + + FROM docker.io/debian:bullseye-slim + CMD echo "what an awesome plugin!" + +Tell Tutor that the "build" folder should be recursively rendered to ``env/plugins/myplugin/build`` with the :py:data:`tutor.hooks.Filters.ENV_TEMPLATE_TARGETS`:: + + hooks.Filters.ENV_TEMPLATE_TARGETS.add_item( + ("myplugin/build", "plugins") + ) + +At this point you can verify that the Dockerfile template was properly rendered:: + + $ cat "$(tutor config printroot)/env/plugins/myplugin/build/myservice/Dockerfile" + FROM docker.io/debian:bullseye-slim + CMD echo "what an awesome plugin!" + +We would like to build this image by running ``tutor images build myservice``. For that, we use the :py:data:`tutor.hooks.Filters.IMAGES_BUILD` filter:: + + hooks.Filters.IMAGES_BUILD.add_item( + ( + "myservice", # same name that will be passed to the `build` command + ("plugins", "myplugin", "build", "myservice"), # path to the Dockerfile folder + "myservice:latest", # Docker image tag + (), # custom build arguments that will be passed to the `docker build` command + ) + ) + +You can now build your image:: + + $ tutor images build myservice + Building image myservice:latest + docker build -t myservice:latest /home/yourusername/.local/share/tutor/env/plugins/myplugin/build/myservice + ... + Successfully tagged myservice:latest + +Similarly, to push/pull your image to/from a Docker registry, implement the :py:data:`tutor.hooks.Filters.IMAGES_PUSH` and :py:data:`tutor.hooks.Filters.IMAGES_PULL` filters:: + + hooks.Filters.IMAGES_PUSH.add_item(("myservice", "myservice:latest")) + hooks.Filters.IMAGES_PULL.add_item(("myservice", "myservice:latest")) + +You can now run:: + + $ tutor images push myservice + $ tutor images pull myservice + +The "myservice" container can be automatically run in local installations by implementing the :patch:`local-docker-compose-services` patch:: + + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-services", + """ + myservice: + image: myservice:latest + """ + ) + ) + +You can now run the "myservice" container which will execute the ``CMD`` statement we wrote in the Dockerfile:: + + $ tutor config save && tutor local run myservice + ... + Creating tutor_local_myservice_run ... done + what an awesome plugin! + +Declaring initialisation tasks +------------------------------ + +Services often need to run specific tasks before they can be started. For instance, the LMS and the CMS need to apply database migrations. These commands are written in shell scripts that are executed whenever we run ``launch``. We call these scripts "init tasks". To add a new local initialisation task, we must first add the corresponding service to the ``docker-compose-jobs.yml`` file by implementing the :patch:`local-docker-compose-jobs-services` patch:: + + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-jobs-services", + """ + myservice-job: + image: myservice:latest + """, + ) + ) + +The patch above defined the "myservice-job" container which will run our initialisation task. Make sure that it is applied by updating your environment:: + + $ tutor config save + +Next, we create an initialisation task by adding an item to the :py:data:`tutor.hooks.Filters.CLI_DO_INIT_TASKS` filter:: + + + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ( + "myservice", + """ + echo "++++++ initialising my plugin..." + echo "++++++ done!" + """ + ) + ) + +Run this initialisation task with:: + + $ tutor local do init --limit=myplugin + ... + Running init task: myplugin/tasks/init.sh + ... + Creating tutor_local_myservice-job_run ... done + ++++++ initialising my plugin... + ++++++ done! + All services initialised. + +Tailoring services for development +---------------------------------- + +When you add services via :patch:`local-docker-compose-services`, those services will be available both in local production mode (``tutor local start``) and local development mode (``tutor dev start``). Sometimes, you may wish to further customize a service in ways that would not be suitable for production, but could be helpful for developers. To add in such customizations, implement the :patch:`local-docker-compose-dev-services` patch. For example, we can enable breakpoint debugging on the "myservice" development container by enabling the ``stdin_open`` and ``tty`` options:: + + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-dev-services", + """ + myservice: + stdin_open: true + tty: true + """, + ) + ) + +Final result +------------ + +Eventually, our plugin is composed of the following files, all stored within the folder indicated by ``tutor plugins printroot`` (on Linux: ``~/.local/share/tutor-plugins``). + +``myplugin.py`` +~~~~~~~~~~~~~~~ + +:: + + import os + from tutor import hooks + + # Define extra folder to look for templates and render the content of the "build" folder + template_folder = os.path.join(os.path.dirname(__file__), "templates") + hooks.Filters.ENV_TEMPLATE_ROOTS.add_item(template_folder) + hooks.Filters.ENV_TEMPLATE_TARGETS.add_item( + ("myplugin/build", "plugins") + ) + + # Define patches + hooks.Filters.ENV_PATCHES.add_item( + ( + "openedx-lms-common-settings", + "FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = False" + ) + ) + hooks.Filters.ENV_PATCHES.add_item( + ( + "openedx-lms-common-settings", + "FEATURES['ALLOW_PUBLIC_ACCOUNT_CREATION'] = {% if MYPLUGIN_PLATFORM_IS_PUBLIC %}True{% else %}False{% endif %}", + ) + ) + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-services", + """ + myservice: + image: myservice:latest + """ + ) + ) + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-jobs-services", + """ + myservice-job: + image: myservice:latest + """, + ) + ) + hooks.Filters.ENV_PATCHES.add_item( + ( + "local-docker-compose-dev-services", + """ + myservice: + stdin_open: true + tty: true + """, + ) + ) + + # Modify configuration + hooks.Filters.CONFIG_DEFAULTS.add_item( + ("MYPLUGIN_PLATFORM_IS_PUBLIC", False) + ) + + # Define tasks + hooks.Filters.IMAGES_BUILD.add_item( + ( + "myservice", + ("plugins", "myplugin", "build", "myservice"), + "myservice:latest", + (), + ) + ) + hooks.Filters.IMAGES_PUSH.add_item(("myservice", "myservice:latest")) + hooks.Filters.IMAGES_PULL.add_item(("myservice", "myservice:latest")) + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ( + "myservice", + """ + echo "++++++ initialising my plugin..." + echo "++++++ done!" + """ + ) + ) + +``templates/myplugin/build/myservice/Dockerfile`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + FROM docker.io/debian:bullseye-slim + CMD echo "what an awesome plugin!" + +``templates/myplugin/tasks/init.sh`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + echo "initialising my plugin..." + echo "done!" + +Distributing a plugin as a Python package +========================================= + +Storing plugins as simple Python modules has the merit of simplicity, but it makes it more difficult to distribute them, either to other users or to remote servers. When your plugin grows more complex, it is recommended to migrate it to a Python package. You should create a package using the `plugin cookiecutter `__. Packages are automatically detected as plugins thanks to the "tutor.plugin.v1" `entry point `__. The modules indicated by this entry point will be automatically imported when the plugins are enabled. See the cookiecutter project `README `__ for more information. diff --git a/docs/tutorials/podman.rst b/docs/tutorials/podman.rst new file mode 100644 index 0000000000..3c50c6b355 --- /dev/null +++ b/docs/tutorials/podman.rst @@ -0,0 +1,95 @@ +Running Tutor with Podman +------------------------- + +`Podman `_ is a fully featured container engine that is daemonless. It provides a Docker CLI comparable command line that makes it pretty easy for people transitioning over from Docker. + +Simply put, this means that you can do something like: ``alias docker=podman`` and everything will run and behave pretty much as expected. + +As of podman v3.0.0, podman now officially supports ``docker-compose`` via a shim service. This means that you now have the option of running Tutor with Podman, instead of the native Docker tools. + +This has some practical advantages: it does not require a running Docker daemon, and it enables you to run and build Docker images without depending on any system component running as ``root``. + +.. warning:: + You should not attempt to run Tutor with Podman on a system that already has native ``docker`` installed. If you want to switch to ``podman`` using the aliases described here, you should uninstall (or at least stop) the native Docker daemon first. + + +Enabling Podman +~~~~~~~~~~~~~~~ + +Podman is supported on a variety of development platforms, see the `installation instructions `_ for details. + +Once you have installed Podman and its dependencies on the platform of your choice, you'll need to make sure that the ``podman`` binary, usually installed as ``/usr/bin/podman``, is aliased to ``docker``. + +On some CentOS and Fedora releases, you can install a package named ``podman-docker`` to do this for you, but on other platforms, you'll need to take of this yourself. + +- To alias ``podman`` to ``docker``, you can simply run this command:: + + $ alias docker=podman + +.. note:: + Running this command only makes a temporary alias. For a more permanent alias, you should place that command in your ``bashrc`` or equivalent file. + +Getting docker-compose to work with Podman +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To allow ``podman`` to work with ``docker-compose``, you'll need to enable a podman socket which pretends to be ``docker``. + +For rootless containers, this requires you to start the ``podman.service`` as a regular user and set the ``DOCKER_HOST`` environment variable. This can be done as follows:: + + # To start the podman service + $ systemctl --user start podman.service + + # To set the DOCKER_HOST environment variable + $ export DOCKER_HOST="unix://$XDG_RUNTIME_DIR/podman/podman.sock" + +If you are running in rootless mode, ``tutor local`` expects a web proxy to be running on port ``80`` or port ``443``. For instructions on how to configure a web proxy, view `this tutorial `_. + +.. note:: + As with the previous ``alias`` command, if you'd like to make the ``DOCKER_HOST`` variable permanent, you should put the entire export command in your ``bashrc`` or equivalent file. + +Fixing SELinux Errors +~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + Disabling ``SELinux`` or setting it to *permissive mode* on your system is **highly discouraged and will render your system vulnerable.** + +If your system has ``SELinux`` working in enforcing mode, chances are that the SELinux context of the tutor root directory won't be set correctly. This will cause read issues because containers will not be able read files from volumes due to a context mismatch. + +Errors stemming from this will look as follows in the ``sealert`` program:: + + "SELinux is preventing caddy from read access on the file Caddyfile." + "SELinux is preventing celery from read access on the directory cms." + "SELinux is preventing mysqld from add_name access on the directory is_writable." + +You can verify the context mismatch by running:: + + $ ls -lZ $(tutor config printroot) + +You'll most likely see something that looks like this:: + + -rw-r--r--. 1 tutor tutor unconfined_u:object_r:data_home_t:s0 2145 Jan 6 20:13 config.yml + drwxr-xr-x. 2 tutor tutor unconfined_u:object_r:data_home_t:s0 6 Jan 6 20:14 data + drwxr-xr-x. 8 tutor tutor unconfined_u:object_r:data_home_t:s0 121 Jan 6 20:14 env + +We're interested in the ``unconfined_u:object_r:data_home_t:s0`` part of that output. + +Notice how the third part of that says ``data_home_t``? + +That's the context type. For tutor to work, we need that part to be set to ``container_file_t``. + +This can be done as follows:: + + # Set the SELinux type of the tutor root directory and all of it's subdirectories to `container_file_t` + $ sudo semanage fcontext -a -t container_file_t "$(tutor config printroot)(/.*)?" + + # Apply the newly set security context to the directories + $ sudo restorecon -RF $(tutor config printroot) + +Running these two commands in a sequence should fix the SELinux errors. + +Verifying your environment +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once you've set everything up as described, you should be able to run ``docker version`` and ``docker-compose --help`` and get a valid output. + +After that, you should be able to use ``tutor local``, and other commands as if you had installed the native Docker tools. diff --git a/docs/tutorials/portainer.rst b/docs/tutorials/portainer.rst new file mode 100644 index 0000000000..c750b45b68 --- /dev/null +++ b/docs/tutorials/portainer.rst @@ -0,0 +1,19 @@ +.. _portainer: + +Running a Docker container web UI with `Portainer `__ +---------------------------------------------------------------------------- + +Portainer is a web UI for managing docker containers. It lets you view your entire Open edX platform at a glace. Try it! It's really cool:: + + docker run --rm \ + --volume=/var/run/docker.sock:/var/run/docker.sock \ + --volume=/tmp/portainer:/data \ + -p 9000:9000 \ + portainer/portainer-ce:latest --bind=:9000 + +You can then view the portainer UI at `http://localhost:9000 `_. You will be asked to define a password for the admin user. Then, select a "Local environment" to work on; hit "Connect" and select the "local" group to view all running containers. + +.. image:: ../img/portainer.png + :alt: Portainer demo + +Among many other things, you'll be able to view the logs for each container, which is really useful. diff --git a/docs/tutorials/proxy.rst b/docs/tutorials/proxy.rst new file mode 100644 index 0000000000..61122667b2 --- /dev/null +++ b/docs/tutorials/proxy.rst @@ -0,0 +1,32 @@ +.. _web_proxy: + +Running Open edX behind a web proxy +=================================== + +In a vanilla deployment of Open edX with Tutor, a web proxy is launched to process incoming web requests. This web proxy is an instance of `Caddy `__ running inside a Docker container. This Docker container listens to ports 80 and 443 on the host. + +Quite often, there is already a web proxy running on the host, and this web proxy also listens to ports 80 and 443. In such a configuration, the Caddy container will not be able to start out of the box. So you should make small changes to the Tutor configuration by running:: + + tutor config save --set ENABLE_WEB_PROXY=false --set CADDY_HTTP_PORT=81 + +With these changes, Tutor will no longer listen to ports 80 and 443 on the host. In this configuration, the Caddy container will only listen to port 81 on the host. Web requests will follow this path:: + + Client β†’ Web proxy (http(s)://yourhost) β†’ Caddy (0.0.0.0:81) β†’ uwsgi (LMS/CMS/...) + +.. warning:: + In this setup, the Caddy HTTP port (81) will be exposed to the world. Make sure to configure your server firewall to block unwanted connections to the Caddy container. Alternatively, you can configure the Caddy container to accept only local connections:: + + tutor config save --set ENABLE_WEB_PROXY=false --set CADDY_HTTP_PORT=127.0.0.1:81 + +It is then your responsibility to configure the web proxy on the host. There are too many use cases and proxy vendors, so Tutor does not provide configuration files that will work for everyone. You should configure your web proxy to: + +- Capture traffic for the following hostnames: LMS_HOST, PREVIEW_LMS_HOST, CMS_HOST, as well as any additional host exposed by your plugins (MFE_HOST, ECOMMERCE_HOST, etc.). See each plugin documentation for more information. +- If SSL/TLS is enabled: + - Perform SSL/TLS termination using your own certificates. + - Forward http traffic to https. +- Set the following headers appropriately: ``X-Forwarded-Proto``, ``X-Forwarded-Port``. +- Forward all traffic to ``localhost:81`` (or whatever port indicated by CADDY_HTTP_PORT, see above). +- If possible, add support for `HTTP/3 `__, which considerably improves performance for Open edX (see `this comment `__). + +.. note:: + If you want to run Open edX at ``https://...`` urls (as you probably do in production) it is *crucial* that the ``ENABLE_HTTPS`` flag is set to ``true``. If not, the web services will be configured to run at ``http://...`` URLs, and all sorts of trouble will happen. Therefore, make sure to continue answering ``y`` ("yes") to the quickstart dialogue question "Activate SSL/TLS certificates for HTTPS access?". diff --git a/docs/tutorials/scale.rst b/docs/tutorials/scale.rst new file mode 100644 index 0000000000..4cecdb3ef2 --- /dev/null +++ b/docs/tutorials/scale.rst @@ -0,0 +1,85 @@ +.. _scale: + +Running Open edX at scale +========================= + +Does Open edX scale? This is the $10⁢ question when it comes to Tutor and Open edX deployments. The short answer is "yes". The longer answer is also "yes", but the details will very much depend on what we mean by "scaling". + +Depending on the context, "scaling" can imply different things: + +1. `Vertical scaling `__: increasing platform capacity by allocating more resources to a single server. +2. `Horizontal scaling `__: the ability to serve an infinitely increasing number of users with consistent performance and linear costs. +3. `High availability (HA) `__: the ability of the platform to remain fully functional despite one or more components being unavailable. + +All of these can be achieved with Tutor and Open edX, but the method to attain either differs greatly. First of all, the range of available solutions will depend on which deployment target is used. Tutor supports installations of Open edX on a single server with the :ref:`"local" ` deployment target, where Docker containers are orchestrated by docker-compose. On a single server, by definition, the server is a single point of failure (`SPOF `__). Thus, high availability is out of the question with a single server. To achieve high availability, it is necessary to deploy to a cluster of multiple servers. But while docker-compose is a great tool for managing single-server deployments, it is simply inappropriate for deploying to a cluster. Tutor also supports deploying to a Kubernetes cluster (see :ref:`k8s`). This is the recommended solution to deploy Open edX "at scale". + +Scaling with a single server +---------------------------- + +Options are limited when it comes to scaling an Open edX platform deployed on a single-server. High availability is out of the question and the number of users that your platform can serve simultaneously will be limited by the server capacity. + +Fortunately, Open edX was designed to run at scale -- most notably at `edX.org `__, but also on large national education platforms. Thus, performance will not be limited by the backend software, but only by the hardware. + +Increasing web server capacity +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As the server CPU and memory are increased, the request throughput can be increased by adjusting the number of uWSGI workers (see :ref:`configuration docs `). By default, the "lms" and "cms" containers each spawn 2 uWSGI workers. The number of workers should be increased if you observe an increase in the latency of user requests but CPU usage remains below 100%. To increase the number of workers for the LMS and the CMS, run for example:: + + tutor config save \ + --set OPENEDX_LMS_UWSGI_WORKERS=8 \ + --set OPENEDX_CMS_UWSGI_WORKERS=4 + tutor local restart lms cms + +The right values will very much depend on your server's available memory and CPU performance, as well as the maximum number of simultaneous users who use your platform. As an example data point, it was reported that a large Open edX platform can serve up to 500k unique users per week on a virtual server with 8 vCPU and 16 GB memory. + +Offloading data storage +~~~~~~~~~~~~~~~~~~~~~~~ + +Aside from web workers, the most resource-intensive services are in the data persistence layer. They are, by decreasing resource usage: + +- `Elasticsearch `__: indexing of course contents and forum topics, mostly for search. Elasticsearch is never a source of truth in Open edX, and the data can thus be trashed and re-created safely. +- `MySQL `__: structured, consistent data storage which is the default destination of all data. +- `MongoDB `__: structured storage of course data. +- `Redis `__: caching and asynchronous task management. +- `MinIO `__: S3-like object storage for user-uploaded files, which is enabled by the `tutor-minio `__ plugin. It is possible to replace MinIO by direct filesystem storage (the default), but scaling will then become much more difficult down the road. + +When attempting to scale a single-server deployment, we recommend starting by offloading some of these stateful data storage components, in the same order of priority. There are multiple benefits: + +1. It will free up some resources both for the web workers and the data storage components. +2. It is the first step towards horizontal scaling of the web workers. +3. It becomes possible to either install every component as a separate service or rely on 3rd-party SaaS with high availability. + +Moving each of the data storage components is a fairly straightforward process, although details vary for every component. For instance, for the MySQL database, start by disabling the locally running MySQL instance:: + + tutor config save --set RUN_MYSQL=false + +Then, migrate the data located at ``$(tutor config printroot)/data/mysql`` to the new MySQL instance. Configure the Open edX platform to point at the new database:: + + tutor config save \ + --set MYSQL_HOST=yourdb.com \ + --set MYSQL_PORT=3306 \ + --set MYSQL_ROOT_USERNAME=root \ + --set MYSQL_ROOT_PASSWORD=p4ssw0rd + +The changes will be taken into account the next time the platform is restarted. + +Beware that moving the data components to dedicated servers has the potential of creating new single points of failure (`SPOF `__). To avoid this situation, each component should be installed as a highly available service (or as a highly available SaaS). + +Scaling with multiple servers +----------------------------- + +Horizontally scaling web services +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As the number of users of a web platform increases, they put increased pressure on the web workers that respond to their requests. Thus, in most cases, web worker performance is the first bottleneck that system administrators have to face when their service becomes more popular. Initially, any given Kubernetes-based Tutor platform ships with one replica for each deployment. To increase (or reduce) the number of replicas for any given service, run ``tutor k8s scale ``. Behind the scenes, this command will trigger a ``kubectl scale --replicas=...`` command that will seamlessly increase the number of pods for that deployment. + +In Open edX multiple web services are exposed to the outside world. The ones that usually receive the most traffic are, in decreasing order, the LMS, the CMS, and the forum (assuming the `tutor-forum `__ plugin was enabled). As an example, all three deployment replicas can be scaled by running:: + + tutor k8s scale lms 8 + tutor k8s scale cms 4 + tutor k8s scale forum 2 + +Highly-available architecture, autoscaling, ... +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There is only so much that Tutor can do for you, and scaling some components falls beyond the scope of Tutor. For instance, it is your responsibility to make sure that your Kubernetes cluster has a `highly available control plane `__ and `topology `__. Also, it is possible to achieve `autoscaling `__; but it is your responsibility to setup latency metrics collection and to configure the scaling policies. diff --git a/docs/tutorials/theming.rst b/docs/tutorials/theming.rst new file mode 100644 index 0000000000..6c98384612 --- /dev/null +++ b/docs/tutorials/theming.rst @@ -0,0 +1,57 @@ +.. _theming: + +Changing the appearance of Open edX +=================================== + +Installing a custom theme +------------------------- + +Comprehensive theming is enabled by default, but only the default theme is compiled. `Indigo `__ is a better, ready-to-run theme that you can start using today. + +To compile your own theme, add it to the ``env/build/openedx/themes/`` folder:: + + git clone https://github.com/me/myopenedxtheme.git \ + "$(tutor config printroot)/env/build/openedx/themes/myopenedxtheme" + +The ``themes`` folder should have the following structure:: + + openedx/themes/ + mycustomtheme1/ + cms/ + ... + lms/ + ... + mycustomtheme2/ + ... + +Then you must rebuild the openedx Docker image:: + + tutor images build openedx + +Finally, you should enable your theme with the :ref:`settheme command `. + +.. _theme_development: + +Developing a new theme +---------------------- + +With Tutor, it's pretty easy to develop your own themes. Start by placing your files inside the ``env/build/openedx/themes`` directory. For instance, you could start from the ``edx.org`` theme present inside the ``edx-platform`` repository:: + + cp -r /path/to/edx-platform/themes/edx.org "$(tutor config printroot)/env/build/openedx/themes/" + +.. warning:: + You should not create a soft link here. If you do, it will trigger a ``Theme not found in any of the themes dirs`` error. This is because soft links are not properly resolved from inside docker containers. + +Then, run a local webserver:: + + tutor dev start lms + +The LMS can then be accessed at http://local.edly.io:8000. You will then have to :ref:`enable that theme `:: + + tutor dev do settheme mythemename + +Watch the themes folders for changes (in a different terminal):: + + tutor dev run watchthemes + +Make changes to some of the files inside the theme directory: the theme assets should be automatically recompiled and visible at http://local.edly.io:8000. diff --git a/docs/whatnext.rst b/docs/whatnext.rst index 648450ace4..2d77da5b62 100644 --- a/docs/whatnext.rst +++ b/docs/whatnext.rst @@ -3,27 +3,27 @@ What next? ========== -You have gone through the :ref:`Quickstart installation `: at this point you should have a running Open edX platform. If you don't, please follow the instructions from the :ref:`Troubleshooting ` section. +You have gone through the :ref:`Quickstart installation `: at this point, you should have a running Open edX platform. If you don't, please follow the instructions from the :ref:`Troubleshooting ` section. Logging-in as administrator --------------------------- -Out of the box, Tutor does not create any user for you. You will want to create a user yourself with staff and administrator privileges in order to access the studio. There is a :ref:`simple command for that `. +Out of the box, Tutor does not create any user for you. You will want to create a user yourself with staff and administrator privileges to access the studio. There is a :ref:`simple command for that `. Importing a demo course ----------------------- -To get a glimpse of the possibilities of Open edX, we recommend you import the `official demo test course `__. Tutor provides a :ref:`simple command for that `. +To get a glimpse of the possibilities of Open edX, we recommend you import the `official demo test course `__. Tutor provides a :ref:`simple command for that `. Making Open edX look better --------------------------- -Tutor makes it easy to :ref:`develop ` and :ref:`install ` your own themes. We also provide `Indigo `__: a free, customizable theme that you can install today. +Tutor makes it easy to :ref:`install ` and :ref:`develop ` your own themes. We also provide `Indigo `__: a free, customizable theme that you can install today. Adding features --------------- -Check out the Tutor :ref:`plugins `, :ref:`extra features ` and :ref:`configuration/customization options `. +Check out the Tutor :ref:`plugins ` and :ref:`configuration/customization options `. Hacking into Open edX --------------------- @@ -35,7 +35,12 @@ Deploying to Kubernetes Yes, Tutor comes with Kubernetes deployment support :ref:`out of the box `. +Gathering insights and analytics about Open edX +----------------------------------------------- + +Check out `Cairn `__, the next-generation analytics solution for Open edX. + Meeting the community --------------------- -Ask your questions and chat with the Tutor community on the official community forums: https://discuss.overhang.io \ No newline at end of file +Ask your questions and chat with the Tutor community on the official Open edX community forum: https://discuss.openedx.org diff --git a/github-release.py b/github-release.py deleted file mode 100755 index 0f4585d578..0000000000 --- a/github-release.py +++ /dev/null @@ -1,83 +0,0 @@ -#! /usr/bin/env python3 - -""" -This is a quick-and-dirty script to upload release assets to Github via the API. -Currently, we do not use this script and rely instead on the github-release binary. This -is just in case the github-release binary stops working (as it has before). - -Run script with: - - GITHUB_TOKEN=s3cr3t ./github-release.py v3.00.00 ./dist/tutor-openedx-3.00.00.tar.gz "tutor-$(uname -s)_$(uname -m)" -""" - -import argparse -import os -from urllib.parse import urlencode - -import requests - -HEADERS = { - "Accept": "application/vnd.github.v3+json", - "Authorization": "token {}".format(os.environ["GITHUB_TOKEN"]), -} -RELEASES_URL = "https://api.github.com/repos/overhangio/tutor/releases" - - -def main(): - parser = argparse.ArgumentParser( - description="Create github releases and upload assets" - ) - parser.add_argument("tag") - parser.add_argument("asset") - parser.add_argument("asset_name") - args = parser.parse_args() - release = get_or_create_release(args.tag) - overwrite_asset(args.asset, args.asset_name, release) - - -def get_or_create_release(tag): - # https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name - url = "{}/tags/{}".format(RELEASES_URL, tag) - response = requests.get(url) - if response.status_code == 200: - print("Release {} already exists".format(tag)) - return response.json() - - print("Creating release {}".format(tag)) - description = open( - os.path.join(os.path.dirname("__file__"), "docs", "_release_description.md") - ).read() - params = {"tag_name": tag, "name": tag, "body": description} - # https://developer.github.com/v3/repos/releases/#create-a-release - return requests.post(RELEASES_URL, json=params, headers=HEADERS,).json() - - -def overwrite_asset(asset, asset_name, release): - # https://developer.github.com/v3/repos/releases/#list-assets-for-a-release - url = "{}/{}/assets".format(RELEASES_URL, release["id"]) - for existing_asset in requests.get(url).json(): - if existing_asset["name"] == asset_name: - print("Deleting existing asset") - # https://developer.github.com/v3/repos/releases/#delete-a-release-asset - delete_url = "{}/assets/{}".format(RELEASES_URL, existing_asset["id"]) - response = requests.delete(delete_url, headers=HEADERS) - if response.status_code != 204: - print(response, response.content) - raise ValueError("Could not delete asset") - print("Uploading asset") - upload_asset(asset, asset_name, release) - - -def upload_asset(asset, asset_name, release): - upload_url = release["upload_url"].replace( - "{?name,label}", "?" + urlencode({"name": asset_name}) - ) - files = {"file": (asset_name, open(asset, "rb"))} - response = requests.post(upload_url, files=files, headers=HEADERS) - if response.status_code > 299: - print(response, response.content) - raise ValueError("Could not upload asset to release") - - -if __name__ == "__main__": - main() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..d1e6ae6e56 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[build-system] +requires = ["setuptools", "wheel"] diff --git a/requirements/base.in b/requirements/base.in index 63afad5d52..f03129f9ca 100644 --- a/requirements/base.in +++ b/requirements/base.in @@ -1,10 +1,11 @@ appdirs -click>=7.0 -click_repl -pycryptodome -jinja2>=2.9 +click>=8.0 +jinja2>=2.10 kubernetes -pyyaml>=4.2b1 - -# Installing urllib3==1.26.0 causes compatibility errors with requests==2.24.0 -urllib3<1.26.0 \ No newline at end of file +mypy +packaging +pycryptodome>=3.17.0 +pyyaml>=6.0 +typing-extensions>=4.4.0 +importlib-metadata>=7.0.1 +importlib-resources>=6.1.1 diff --git a/requirements/base.txt b/requirements/base.txt index 452c9d0199..17237d2a0f 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,34 +1,86 @@ # -# This file is autogenerated by pip-compile -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile requirements/base.in # -appdirs==1.4.4 # via -r requirements/base.in -cachetools==4.1.1 # via google-auth -certifi==2020.11.8 # via kubernetes, requests -chardet==3.0.4 # via requests -click-repl==0.1.6 # via -r requirements/base.in -click==7.1.2 # via -r requirements/base.in, click-repl -google-auth==1.23.0 # via kubernetes -idna==2.10 # via requests -jinja2==2.11.2 # via -r requirements/base.in -kubernetes==12.0.0 # via -r requirements/base.in -markupsafe==1.1.1 # via jinja2 -oauthlib==3.1.0 # via requests-oauthlib -prompt-toolkit==3.0.8 # via click-repl -pyasn1-modules==0.2.8 # via google-auth -pyasn1==0.4.8 # via pyasn1-modules, rsa -pycryptodome==3.9.9 # via -r requirements/base.in -python-dateutil==2.8.1 # via kubernetes -pyyaml==5.3.1 # via -r requirements/base.in, kubernetes -requests-oauthlib==1.3.0 # via kubernetes -requests==2.24.0 # via kubernetes, requests-oauthlib -rsa==4.6 # via google-auth -six==1.15.0 # via click-repl, google-auth, kubernetes, python-dateutil, websocket-client -urllib3==1.25.11 # via -r requirements/base.in, kubernetes, requests -wcwidth==0.2.5 # via prompt-toolkit -websocket-client==0.57.0 # via kubernetes - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +appdirs==1.4.4 + # via -r requirements/base.in +cachetools==5.5.0 + # via google-auth +certifi==2024.8.30 + # via + # kubernetes + # requests +charset-normalizer==3.4.0 + # via requests +click==8.1.7 + # via -r requirements/base.in +durationpy==0.9 + # via kubernetes +google-auth==2.36.0 + # via kubernetes +idna==3.10 + # via requests +importlib-metadata==8.5.0 + # via -r requirements/base.in +importlib-resources==6.4.5 + # via -r requirements/base.in +jinja2==3.1.4 + # via -r requirements/base.in +kubernetes==31.0.0 + # via -r requirements/base.in +markupsafe==3.0.2 + # via jinja2 +mypy==1.13.0 + # via -r requirements/base.in +mypy-extensions==1.0.0 + # via mypy +oauthlib==3.2.2 + # via + # kubernetes + # requests-oauthlib +packaging==24.2 + # via -r requirements/base.in +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth +pycryptodome==3.21.0 + # via -r requirements/base.in +python-dateutil==2.9.0.post0 + # via kubernetes +pyyaml==6.0.2 + # via + # -r requirements/base.in + # kubernetes +requests==2.32.3 + # via + # kubernetes + # requests-oauthlib +requests-oauthlib==2.0.0 + # via kubernetes +rsa==4.9 + # via google-auth +six==1.16.0 + # via + # kubernetes + # python-dateutil +tomli==2.1.0 + # via mypy +typing-extensions==4.12.2 + # via + # -r requirements/base.in + # mypy +urllib3==2.2.3 + # via + # kubernetes + # requests +websocket-client==1.8.0 + # via kubernetes +zipp==3.21.0 + # via + # importlib-metadata + # importlib-resources diff --git a/requirements/dev.in b/requirements/dev.in index ce25d148f3..c53771c419 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -1,6 +1,13 @@ -r base.txt black +coverage pip-tools pylint pyinstaller -twine \ No newline at end of file +scriv +twine + +# Types packages +types-docutils +types-PyYAML +types-setuptools diff --git a/requirements/dev.txt b/requirements/dev.txt index 1f913353bd..a42a3c5ed0 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,72 +1,255 @@ # -# This file is autogenerated by pip-compile -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile requirements/dev.in # -altgraph==0.17 # via pyinstaller -appdirs==1.4.4 # via -r requirements/base.txt, black -astroid==2.4.2 # via pylint -black==20.8b1 # via -r requirements/dev.in -bleach==3.2.1 # via readme-renderer -cachetools==4.1.1 # via -r requirements/base.txt, google-auth -certifi==2020.11.8 # via -r requirements/base.txt, kubernetes, requests -cffi==1.14.3 # via cryptography -chardet==3.0.4 # via -r requirements/base.txt, requests -click-repl==0.1.6 # via -r requirements/base.txt -click==7.1.2 # via -r requirements/base.txt, black, click-repl, pip-tools -colorama==0.4.4 # via twine -cryptography==3.2.1 # via secretstorage -docutils==0.16 # via readme-renderer -google-auth==1.23.0 # via -r requirements/base.txt, kubernetes -idna==2.10 # via -r requirements/base.txt, requests -importlib-metadata==2.0.0 # via keyring, twine -isort==5.6.4 # via pylint -jeepney==0.5.0 # via keyring, secretstorage -jinja2==2.11.2 # via -r requirements/base.txt -keyring==21.5.0 # via twine -kubernetes==12.0.0 # via -r requirements/base.txt -lazy-object-proxy==1.4.3 # via astroid -markupsafe==1.1.1 # via -r requirements/base.txt, jinja2 -mccabe==0.6.1 # via pylint -mypy-extensions==0.4.3 # via black -oauthlib==3.1.0 # via -r requirements/base.txt, requests-oauthlib -packaging==20.4 # via bleach -pathspec==0.8.1 # via black -pip-tools==5.3.1 # via -r requirements/dev.in -pkginfo==1.6.1 # via twine -prompt-toolkit==3.0.8 # via -r requirements/base.txt, click-repl -pyasn1-modules==0.2.8 # via -r requirements/base.txt, google-auth -pyasn1==0.4.8 # via -r requirements/base.txt, pyasn1-modules, rsa -pycparser==2.20 # via cffi -pycryptodome==3.9.9 # via -r requirements/base.txt -pygments==2.7.2 # via readme-renderer -pyinstaller-hooks-contrib==2020.10 # via pyinstaller -pyinstaller==4.0 # via -r requirements/dev.in -pylint==2.6.0 # via -r requirements/dev.in -pyparsing==2.4.7 # via packaging -python-dateutil==2.8.1 # via -r requirements/base.txt, kubernetes -pyyaml==5.3.1 # via -r requirements/base.txt, kubernetes -readme-renderer==28.0 # via twine -regex==2020.10.28 # via black -requests-oauthlib==1.3.0 # via -r requirements/base.txt, kubernetes -requests-toolbelt==0.9.1 # via twine -requests==2.24.0 # via -r requirements/base.txt, kubernetes, requests-oauthlib, requests-toolbelt, twine -rfc3986==1.4.0 # via twine -rsa==4.6 # via -r requirements/base.txt, google-auth -secretstorage==3.2.0 # via keyring -six==1.15.0 # via -r requirements/base.txt, astroid, bleach, click-repl, cryptography, google-auth, kubernetes, packaging, pip-tools, python-dateutil, readme-renderer, websocket-client -toml==0.10.2 # via black, pylint -tqdm==4.51.0 # via twine -twine==3.2.0 # via -r requirements/dev.in -typed-ast==1.4.1 # via astroid, black -typing-extensions==3.7.4.3 # via black -urllib3==1.25.11 # via -r requirements/base.txt, kubernetes, requests -wcwidth==0.2.5 # via -r requirements/base.txt, prompt-toolkit -webencodings==0.5.1 # via bleach -websocket-client==0.57.0 # via -r requirements/base.txt, kubernetes -wrapt==1.12.1 # via astroid -zipp==3.4.0 # via importlib-metadata +altgraph==0.17.4 + # via pyinstaller +appdirs==1.4.4 + # via -r requirements/base.txt +astroid==3.3.5 + # via pylint +attrs==24.2.0 + # via scriv +backports-tarfile==1.2.0 + # via jaraco-context +black==24.10.0 + # via -r requirements/dev.in +build==1.2.2.post1 + # via pip-tools +cachetools==5.5.0 + # via + # -r requirements/base.txt + # google-auth +certifi==2024.8.30 + # via + # -r requirements/base.txt + # kubernetes + # requests +cffi==1.17.1 + # via cryptography +charset-normalizer==3.4.0 + # via + # -r requirements/base.txt + # requests +click==8.1.7 + # via + # -r requirements/base.txt + # black + # click-log + # pip-tools + # scriv +click-log==0.4.0 + # via scriv +coverage==7.6.7 + # via -r requirements/dev.in +cryptography==43.0.3 + # via secretstorage +dill==0.3.9 + # via pylint +docutils==0.21.2 + # via readme-renderer +durationpy==0.9 + # via + # -r requirements/base.txt + # kubernetes +google-auth==2.36.0 + # via + # -r requirements/base.txt + # kubernetes +idna==3.10 + # via + # -r requirements/base.txt + # requests +importlib-metadata==8.5.0 + # via + # -r requirements/base.txt + # build + # keyring + # pyinstaller + # pyinstaller-hooks-contrib + # twine +importlib-resources==6.4.5 + # via -r requirements/base.txt +isort==5.13.2 + # via pylint +jaraco-classes==3.4.0 + # via keyring +jaraco-context==6.0.1 + # via keyring +jaraco-functools==4.1.0 + # via keyring +jeepney==0.8.0 + # via + # keyring + # secretstorage +jinja2==3.1.4 + # via + # -r requirements/base.txt + # scriv +keyring==25.5.0 + # via twine +kubernetes==31.0.0 + # via -r requirements/base.txt +markdown-it-py==3.0.0 + # via + # rich + # scriv +markupsafe==3.0.2 + # via + # -r requirements/base.txt + # jinja2 +mccabe==0.7.0 + # via pylint +mdurl==0.1.2 + # via markdown-it-py +more-itertools==10.5.0 + # via + # jaraco-classes + # jaraco-functools +mypy==1.13.0 + # via -r requirements/base.txt +mypy-extensions==1.0.0 + # via + # -r requirements/base.txt + # black + # mypy +nh3==0.2.18 + # via readme-renderer +oauthlib==3.2.2 + # via + # -r requirements/base.txt + # kubernetes + # requests-oauthlib +packaging==24.2 + # via + # -r requirements/base.txt + # black + # build + # pyinstaller + # pyinstaller-hooks-contrib +pathspec==0.12.1 + # via black +pip-tools==7.4.1 + # via -r requirements/dev.in +pkginfo==1.10.0 + # via twine +platformdirs==4.3.6 + # via + # black + # pylint +pyasn1==0.6.1 + # via + # -r requirements/base.txt + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via + # -r requirements/base.txt + # google-auth +pycparser==2.22 + # via cffi +pycryptodome==3.21.0 + # via -r requirements/base.txt +pygments==2.18.0 + # via + # readme-renderer + # rich +pyinstaller==6.11.1 + # via -r requirements/dev.in +pyinstaller-hooks-contrib==2024.10 + # via pyinstaller +pylint==3.3.1 + # via -r requirements/dev.in +pyproject-hooks==1.2.0 + # via + # build + # pip-tools +python-dateutil==2.9.0.post0 + # via + # -r requirements/base.txt + # kubernetes +pyyaml==6.0.2 + # via + # -r requirements/base.txt + # kubernetes +readme-renderer==44.0 + # via twine +requests==2.32.3 + # via + # -r requirements/base.txt + # kubernetes + # requests-oauthlib + # requests-toolbelt + # scriv + # twine +requests-oauthlib==2.0.0 + # via + # -r requirements/base.txt + # kubernetes +requests-toolbelt==1.0.0 + # via twine +rfc3986==2.0.0 + # via twine +rich==13.9.4 + # via twine +rsa==4.9 + # via + # -r requirements/base.txt + # google-auth +scriv==1.5.1 + # via -r requirements/dev.in +secretstorage==3.3.3 + # via keyring +six==1.16.0 + # via + # -r requirements/base.txt + # kubernetes + # python-dateutil +tomli==2.1.0 + # via + # -r requirements/base.txt + # black + # build + # mypy + # pip-tools + # pylint +tomlkit==0.13.2 + # via pylint +twine==5.1.1 + # via -r requirements/dev.in +types-docutils==0.21.0.20241005 + # via -r requirements/dev.in +types-pyyaml==6.0.12.20240917 + # via -r requirements/dev.in +types-setuptools==75.5.0.20241122 + # via -r requirements/dev.in +typing-extensions==4.12.2 + # via + # -r requirements/base.txt + # astroid + # black + # mypy + # pylint + # rich +urllib3==2.2.3 + # via + # -r requirements/base.txt + # kubernetes + # requests + # twine +websocket-client==1.8.0 + # via + # -r requirements/base.txt + # kubernetes +wheel==0.45.0 + # via pip-tools +zipp==3.21.0 + # via + # -r requirements/base.txt + # importlib-metadata + # importlib-resources # The following packages are considered to be unsafe in a requirements file: # pip diff --git a/requirements/docs.in b/requirements/docs.in index b3b56217f5..79c02e20e3 100644 --- a/requirements/docs.in +++ b/requirements/docs.in @@ -1,3 +1,4 @@ -r base.txt sphinx sphinx-rtd-theme +sphinx-click diff --git a/requirements/docs.txt b/requirements/docs.txt index 2bbcd12454..98f106a5de 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,51 +1,168 @@ # -# This file is autogenerated by pip-compile -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile requirements/docs.in # -alabaster==0.7.12 # via sphinx -appdirs==1.4.4 # via -r requirements/base.txt -babel==2.8.0 # via sphinx -cachetools==4.1.1 # via -r requirements/base.txt, google-auth -certifi==2020.11.8 # via -r requirements/base.txt, kubernetes, requests -chardet==3.0.4 # via -r requirements/base.txt, requests -click-repl==0.1.6 # via -r requirements/base.txt -click==7.1.2 # via -r requirements/base.txt, click-repl -docutils==0.16 # via sphinx -google-auth==1.23.0 # via -r requirements/base.txt, kubernetes -idna==2.10 # via -r requirements/base.txt, requests -imagesize==1.2.0 # via sphinx -jinja2==2.11.2 # via -r requirements/base.txt, sphinx -kubernetes==12.0.0 # via -r requirements/base.txt -markupsafe==1.1.1 # via -r requirements/base.txt, jinja2 -oauthlib==3.1.0 # via -r requirements/base.txt, requests-oauthlib -packaging==20.4 # via sphinx -prompt-toolkit==3.0.8 # via -r requirements/base.txt, click-repl -pyasn1-modules==0.2.8 # via -r requirements/base.txt, google-auth -pyasn1==0.4.8 # via -r requirements/base.txt, pyasn1-modules, rsa -pycryptodome==3.9.9 # via -r requirements/base.txt -pygments==2.7.2 # via sphinx -pyparsing==2.4.7 # via packaging -python-dateutil==2.8.1 # via -r requirements/base.txt, kubernetes -pytz==2020.4 # via babel -pyyaml==5.3.1 # via -r requirements/base.txt, kubernetes -requests-oauthlib==1.3.0 # via -r requirements/base.txt, kubernetes -requests==2.24.0 # via -r requirements/base.txt, kubernetes, requests-oauthlib, sphinx -rsa==4.6 # via -r requirements/base.txt, google-auth -six==1.15.0 # via -r requirements/base.txt, click-repl, google-auth, kubernetes, packaging, python-dateutil, websocket-client -snowballstemmer==2.0.0 # via sphinx -sphinx-rtd-theme==0.5.0 # via -r requirements/docs.in -sphinx==3.3.0 # via -r requirements/docs.in, sphinx-rtd-theme -sphinxcontrib-applehelp==1.0.2 # via sphinx -sphinxcontrib-devhelp==1.0.2 # via sphinx -sphinxcontrib-htmlhelp==1.0.3 # via sphinx -sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.3 # via sphinx -sphinxcontrib-serializinghtml==1.1.4 # via sphinx -urllib3==1.25.11 # via -r requirements/base.txt, kubernetes, requests -wcwidth==0.2.5 # via -r requirements/base.txt, prompt-toolkit -websocket-client==0.57.0 # via -r requirements/base.txt, kubernetes - -# The following packages are considered to be unsafe in a requirements file: -# setuptools +alabaster==0.7.16 + # via sphinx +appdirs==1.4.4 + # via -r requirements/base.txt +babel==2.16.0 + # via sphinx +cachetools==5.5.0 + # via + # -r requirements/base.txt + # google-auth +certifi==2024.8.30 + # via + # -r requirements/base.txt + # kubernetes + # requests +charset-normalizer==3.4.0 + # via + # -r requirements/base.txt + # requests +click==8.1.7 + # via + # -r requirements/base.txt + # sphinx-click +docutils==0.21.2 + # via + # sphinx + # sphinx-click + # sphinx-rtd-theme +durationpy==0.9 + # via + # -r requirements/base.txt + # kubernetes +google-auth==2.36.0 + # via + # -r requirements/base.txt + # kubernetes +idna==3.10 + # via + # -r requirements/base.txt + # requests +imagesize==1.4.1 + # via sphinx +importlib-metadata==8.5.0 + # via + # -r requirements/base.txt + # sphinx +importlib-resources==6.4.5 + # via -r requirements/base.txt +jinja2==3.1.4 + # via + # -r requirements/base.txt + # sphinx +kubernetes==31.0.0 + # via -r requirements/base.txt +markupsafe==3.0.2 + # via + # -r requirements/base.txt + # jinja2 +mypy==1.13.0 + # via -r requirements/base.txt +mypy-extensions==1.0.0 + # via + # -r requirements/base.txt + # mypy +oauthlib==3.2.2 + # via + # -r requirements/base.txt + # kubernetes + # requests-oauthlib +packaging==24.2 + # via + # -r requirements/base.txt + # sphinx +pyasn1==0.6.1 + # via + # -r requirements/base.txt + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via + # -r requirements/base.txt + # google-auth +pycryptodome==3.21.0 + # via -r requirements/base.txt +pygments==2.18.0 + # via sphinx +python-dateutil==2.9.0.post0 + # via + # -r requirements/base.txt + # kubernetes +pyyaml==6.0.2 + # via + # -r requirements/base.txt + # kubernetes +requests==2.32.3 + # via + # -r requirements/base.txt + # kubernetes + # requests-oauthlib + # sphinx +requests-oauthlib==2.0.0 + # via + # -r requirements/base.txt + # kubernetes +rsa==4.9 + # via + # -r requirements/base.txt + # google-auth +six==1.16.0 + # via + # -r requirements/base.txt + # kubernetes + # python-dateutil +snowballstemmer==2.2.0 + # via sphinx +sphinx==7.4.7 + # via + # -r requirements/docs.in + # sphinx-click + # sphinx-rtd-theme + # sphinxcontrib-jquery +sphinx-click==6.0.0 + # via -r requirements/docs.in +sphinx-rtd-theme==3.0.2 + # via -r requirements/docs.in +sphinxcontrib-applehelp==2.0.0 + # via sphinx +sphinxcontrib-devhelp==2.0.0 + # via sphinx +sphinxcontrib-htmlhelp==2.1.0 + # via sphinx +sphinxcontrib-jquery==4.1 + # via sphinx-rtd-theme +sphinxcontrib-jsmath==1.0.1 + # via sphinx +sphinxcontrib-qthelp==2.0.0 + # via sphinx +sphinxcontrib-serializinghtml==2.0.0 + # via sphinx +tomli==2.1.0 + # via + # -r requirements/base.txt + # mypy + # sphinx +typing-extensions==4.12.2 + # via + # -r requirements/base.txt + # mypy +urllib3==2.2.3 + # via + # -r requirements/base.txt + # kubernetes + # requests +websocket-client==1.8.0 + # via + # -r requirements/base.txt + # kubernetes +zipp==3.21.0 + # via + # -r requirements/base.txt + # importlib-metadata + # importlib-resources diff --git a/requirements/plugins.txt b/requirements/plugins.txt index cb282d810c..2c4be2fe12 100644 --- a/requirements/plugins.txt +++ b/requirements/plugins.txt @@ -1,7 +1,14 @@ -tutor-discovery -tutor-ecommerce -#tutor-figures -tutor-license -tutor-minio -tutor-notes -tutor-xqueue \ No newline at end of file +# change version ranges when upgrading from redwood +tutor-android>=18.0.0,<19.0.0 +tutor-cairn>=18.0.0,<19.0.0 +tutor-credentials>=18.0.0,<19.0.0 +tutor-discovery>=18.0.0,<19.0.0 +tutor-ecommerce>=18.0.0,<19.0.0 +tutor-forum>=18.0.0,<19.0.0 +tutor-indigo>=18.0.0,<19.0.0 +tutor-jupyter>=18.0.0,<19.0.0 +tutor-mfe>=18.0.0,<19.0.0 +tutor-minio>=18.0.0,<19.0.0 +tutor-notes>=18.0.0,<19.0.0 +tutor-webui>=18.0.0,<19.0.0 +tutor-xqueue>=18.0.0,<19.0.0 diff --git a/setup.py b/setup.py index 073d3c9bef..50a182bf59 100644 --- a/setup.py +++ b/setup.py @@ -1,17 +1,23 @@ import io import os +from typing import Dict, List + from setuptools import find_packages, setup HERE = os.path.abspath(os.path.dirname(__file__)) -def load_readme(): +def load_readme() -> str: with io.open(os.path.join(HERE, "README.rst"), "rt", encoding="utf8") as f: - return f.read() + readme = f.read() + # Replace img src for publication on pypi + return readme.replace( + "./docs/img/", "https://github.com/overhangio/tutor/raw/master/docs/img/" + ) -def load_about(): - about = {} +def load_about() -> Dict[str, str]: + about: Dict[str, str] = {} with io.open( os.path.join(HERE, "tutor", "__about__.py"), "rt", encoding="utf-8" ) as f: @@ -19,39 +25,43 @@ def load_about(): return about -def load_requirements(): +def load_requirements(filename: str) -> List[str]: with io.open( - os.path.join(HERE, "requirements", "base.in"), "rt", encoding="utf-8" + os.path.join(HERE, "requirements", filename), "rt", encoding="utf-8" ) as f: return [line.strip() for line in f if is_requirement(line)] -def is_requirement(line): +def is_requirement(line: str) -> bool: return not (line.strip() == "" or line.startswith("#")) ABOUT = load_about() setup( - name="tutor-openedx", - version=ABOUT["__version__"], - url="https://docs.tutor.overhang.io/", + name="tutor", + version=ABOUT["__package_version__"], + url="https://docs.tutor.edly.io/", project_urls={ - "Documentation": "https://docs.tutor.overhang.io/", + "Documentation": "https://docs.tutor.edly.io/", "Code": "https://github.com/overhangio/tutor", "Issue tracker": "https://github.com/overhangio/tutor/issues", - "Community": "https://discuss.overhang.io", + "Community": "https://discuss.openedx.org/tag/tutor", }, license="AGPLv3", - author="Overhang.io", - author_email="contact@overhang.io", - description="The docker-based Open edX distribution designed for peace of mind", + author="Edly", + author_email="hello@edly.io", + description="The Docker-based Open edX distribution designed for peace of mind", long_description=load_readme(), long_description_content_type="text/x-rst", packages=find_packages(exclude=["tests*"]), include_package_data=True, - python_requires=">=3.5", - install_requires=load_requirements(), + python_requires=">=3.9", + install_requires=load_requirements("base.in"), + extras_require={ + "dev": load_requirements("dev.txt"), + "full": load_requirements("plugins.txt"), + }, entry_points={"console_scripts": ["tutor=tutor.commands.cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", @@ -59,11 +69,10 @@ def is_requirement(line): "License :: OSI Approved :: GNU Affero General Public License v3", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", ], + test_suite="tests", ) diff --git a/tests/commands/__init__.py b/tests/commands/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/commands/base.py b/tests/commands/base.py new file mode 100644 index 0000000000..868e00d75b --- /dev/null +++ b/tests/commands/base.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import click.testing + +from tests.helpers import TestContext, temporary_root +from tutor.commands.cli import cli + + +class TestCommandMixin: + """ + Run CLI tests in an isolated test root. + """ + + @staticmethod + def invoke(args: list[str]) -> click.testing.Result: + with temporary_root() as root: + return TestCommandMixin.invoke_in_root(root, args) + + @staticmethod + def invoke_in_root( + root: str, args: list[str], catch_exceptions: bool = True + ) -> click.testing.Result: + """ + Use this method for commands that all need to run in the same root: + + with temporary_root() as root: + result1 = self.invoke_in_root(root, ...) + result2 = self.invoke_in_root(root, ...) + """ + runner = click.testing.CliRunner( + env={ + "TUTOR_ROOT": root, + "TUTOR_IGNORE_ENTRYPOINT_PLUGINS": "1", + "TUTOR_IGNORE_DICT_PLUGINS": "1", + }, + mix_stderr=False, + ) + return runner.invoke( + cli, args, obj=TestContext(root), catch_exceptions=catch_exceptions + ) diff --git a/tests/commands/test_cli.py b/tests/commands/test_cli.py new file mode 100644 index 0000000000..65e6525455 --- /dev/null +++ b/tests/commands/test_cli.py @@ -0,0 +1,23 @@ +import unittest + +from tutor.__about__ import __version__ + +from .base import TestCommandMixin + + +class CliTests(unittest.TestCase, TestCommandMixin): + def test_help(self) -> None: + result = self.invoke(["help"]) + self.assertEqual(0, result.exit_code) + self.assertIsNone(result.exception) + + def test_cli_help(self) -> None: + result = self.invoke(["--help"]) + self.assertEqual(0, result.exit_code) + self.assertIsNone(result.exception) + + def test_cli_version(self) -> None: + result = self.invoke(["--version"]) + self.assertEqual(0, result.exit_code) + self.assertIsNone(result.exception) + self.assertRegex(result.output, rf"cli, version {__version__}\n") diff --git a/tests/commands/test_config.py b/tests/commands/test_config.py new file mode 100644 index 0000000000..4a1ab796a7 --- /dev/null +++ b/tests/commands/test_config.py @@ -0,0 +1,118 @@ +import unittest + +from tests.helpers import temporary_root +from tutor import config as tutor_config + +from .base import TestCommandMixin + + +class ConfigTests(unittest.TestCase, TestCommandMixin): + def test_config_help(self) -> None: + result = self.invoke(["config", "--help"]) + self.assertEqual(0, result.exit_code) + self.assertFalse(result.exception) + + def test_config_save(self) -> None: + result = self.invoke(["config", "save"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + + def test_config_save_cleanup_env_dir(self) -> None: + result = self.invoke(["config", "save", "-c"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + + def test_config_save_interactive(self) -> None: + result = self.invoke(["config", "save", "-i"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + + def test_config_save_skip_update(self) -> None: + result = self.invoke(["config", "save", "-e"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + + def test_config_save_set_value(self) -> None: + with temporary_root() as root: + result1 = self.invoke_in_root(root, ["config", "save", "-s", "key=value"]) + result2 = self.invoke_in_root(root, ["config", "printvalue", "key"]) + self.assertFalse(result1.exception) + self.assertEqual(0, result1.exit_code) + self.assertIn("value", result2.output) + + def test_config_save_unset_value(self) -> None: + with temporary_root() as root: + result1 = self.invoke_in_root(root, ["config", "save", "-U", "key"]) + result2 = self.invoke_in_root(root, ["config", "printvalue", "key"]) + self.assertFalse(result1.exception) + self.assertEqual(0, result1.exit_code) + self.assertEqual(1, result2.exit_code) + + def test_config_printroot(self) -> None: + with temporary_root() as root: + result = self.invoke_in_root(root, ["config", "printroot"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn(root, result.output) + + def test_config_printvalue(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + result = self.invoke_in_root( + root, ["config", "printvalue", "MYSQL_ROOT_PASSWORD"] + ) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + self.assertTrue(result.output) + + def test_config_append(self) -> None: + with temporary_root() as root: + self.invoke_in_root( + root, ["config", "save", "--append=TEST=value"], catch_exceptions=False + ) + config1 = tutor_config.load(root) + self.invoke_in_root( + root, ["config", "save", "--append=TEST=value"], catch_exceptions=False + ) + config2 = tutor_config.load(root) + self.invoke_in_root( + root, ["config", "save", "--remove=TEST=value"], catch_exceptions=False + ) + config3 = tutor_config.load(root) + # Value is appended + self.assertEqual(["value"], config1["TEST"]) + # Value is not appended a second time + self.assertEqual(["value"], config2["TEST"]) + # Value is removed + self.assertEqual([], config3["TEST"]) + + def test_config_append_with_existing_default(self) -> None: + with temporary_root() as root: + self.invoke_in_root( + root, + [ + "config", + "save", + "--append=OPENEDX_EXTRA_PIP_REQUIREMENTS=my-package==1.0.0", + ], + catch_exceptions=False, + ) + config = tutor_config.load(root) + assert isinstance(config["OPENEDX_EXTRA_PIP_REQUIREMENTS"], list) + self.assertEqual( + ["my-package==1.0.0"], config["OPENEDX_EXTRA_PIP_REQUIREMENTS"] + ) + + +class PatchesTests(unittest.TestCase, TestCommandMixin): + def test_config_patches_list(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + result = self.invoke_in_root(root, ["config", "patches", "list"]) + self.assertFalse(result.exception) + self.assertEqual(0, result.exit_code) + + def test_config_patches_show(self) -> None: + result = self.invoke(["config", "patches", "show", "mypatch"]) + self.assertEqual(0, result.exit_code) + self.assertEqual("", result.stdout) diff --git a/tests/commands/test_context.py b/tests/commands/test_context.py new file mode 100644 index 0000000000..cf2a1f0172 --- /dev/null +++ b/tests/commands/test_context.py @@ -0,0 +1,18 @@ +import os +import unittest + +from tests.helpers import TestContext, TestTaskRunner, temporary_root +from tutor import config as tutor_config + + +class TestContextTests(unittest.TestCase): + def test_create_testcontext(self) -> None: + with temporary_root() as root: + context = TestContext(root) + config = tutor_config.load_full(root) + runner = context.job_runner(config) + self.assertTrue(os.path.exists(context.root)) + self.assertFalse( + os.path.exists(os.path.join(context.root, tutor_config.CONFIG_FILENAME)) + ) + self.assertTrue(isinstance(runner, TestTaskRunner)) diff --git a/tests/commands/test_dev.py b/tests/commands/test_dev.py new file mode 100644 index 0000000000..0b962d723a --- /dev/null +++ b/tests/commands/test_dev.py @@ -0,0 +1,10 @@ +import unittest + +from .base import TestCommandMixin + + +class DevTests(unittest.TestCase, TestCommandMixin): + def test_dev_help(self) -> None: + result = self.invoke(["dev", "--help"]) + self.assertEqual(0, result.exit_code) + self.assertIsNone(result.exception) diff --git a/tests/commands/test_images.py b/tests/commands/test_images.py new file mode 100644 index 0000000000..22a9072c5e --- /dev/null +++ b/tests/commands/test_images.py @@ -0,0 +1,157 @@ +from unittest.mock import Mock, patch + +from tests.helpers import PluginsTestCase, temporary_root +from tutor import images, plugins, utils +from tutor.__about__ import __version__ +from tutor.commands.images import ImageNotFoundError + +from .base import TestCommandMixin + + +class ImagesTests(PluginsTestCase, TestCommandMixin): + def test_images_help(self) -> None: + result = self.invoke(["images", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_images_pull_image(self) -> None: + result = self.invoke(["images", "pull"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_images_pull_plugin_invalid_plugin_should_throw_error(self) -> None: + result = self.invoke(["images", "pull", "plugin"]) + self.assertEqual(1, result.exit_code) + self.assertEqual(ImageNotFoundError, type(result.exception)) + + @patch.object(images, "pull", return_value=None) + def test_images_pull_plugin(self, image_pull: Mock) -> None: + plugins.v0.DictPlugin( + { + "name": "plugin1", + "hooks": { + "remote-image": { + "service1": "service1:1.0.0", + "service2": "service2:2.0.0", + } + }, + } + ) + plugins.load("plugin1") + result = self.invoke(["images", "pull", "service1"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + image_pull.assert_called_once_with("service1:1.0.0") + + @patch.object(images, "pull", return_value=None) + def test_images_pull_all_vendor_images(self, image_pull: Mock) -> None: + result = self.invoke(["images", "pull", "mysql"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + # Note: we should update this tag whenever the mysql image is updated + image_pull.assert_called_once_with("docker.io/mysql:8.4.0") + + def test_images_printtag_image(self) -> None: + result = self.invoke(["images", "printtag", "openedx"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertRegex( + result.output, rf"docker.io/overhangio/openedx:{__version__}\n" + ) + + def test_images_printtag_plugin(self) -> None: + plugins.v0.DictPlugin( + { + "name": "plugin1", + "hooks": { + "build-image": { + "service1": "service1:1.0.0", + "service2": "service2:2.0.0", + } + }, + } + ) + plugins.load("plugin1") + result = self.invoke(["images", "printtag", "service1"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code, result) + self.assertEqual(result.output, "service1:1.0.0\n") + + @patch.object(images, "build", return_value=None) + def test_images_build_plugin(self, mock_image_build: Mock) -> None: + plugins.v0.DictPlugin( + { + "name": "plugin1", + "hooks": { + "build-image": { + "service1": "service1:1.0.0", + "service2": "service2:2.0.0", + } + }, + } + ) + plugins.load("plugin1") + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + result = self.invoke_in_root(root, ["images", "build", "service1"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + mock_image_build.assert_called() + self.assertIn("service1:1.0.0", mock_image_build.call_args[0]) + + @patch.object(images, "build", return_value=None) + def test_images_build_plugin_with_args(self, image_build: Mock) -> None: + plugins.v0.DictPlugin( + { + "name": "plugin1", + "hooks": { + "build-image": { + "service1": "service1:1.0.0", + "service2": "service2:2.0.0", + } + }, + } + ) + plugins.load("plugin1") + build_args = [ + "images", + "build", + "--no-cache", + "-a", + "myarg=value", + "--add-host", + "host", + "--target", + "target", + "-d", + "docker_args", + "service1", + ] + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + result = self.invoke_in_root(root, build_args) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + image_build.assert_called() + self.assertIn("service1:1.0.0", image_build.call_args[0]) + self.assertEqual( + [ + "service1:1.0.0", + "--no-cache", + "--build-arg", + "myarg=value", + "--add-host", + "host", + "--target", + "target", + "--output=type=docker", + "docker_args", + "--cache-from=type=registry,ref=service1:1.0.0-cache", + ], + list(image_build.call_args[0][1:]), + ) + + def test_images_push(self) -> None: + result = self.invoke(["images", "push"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) diff --git a/tests/commands/test_jobs.py b/tests/commands/test_jobs.py new file mode 100644 index 0000000000..2ab388773b --- /dev/null +++ b/tests/commands/test_jobs.py @@ -0,0 +1,92 @@ +from unittest.mock import patch + +from tests.helpers import PluginsTestCase, temporary_root +from tutor.commands import jobs + +from .base import TestCommandMixin + + +class JobsTests(PluginsTestCase, TestCommandMixin): + def test_initialise(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + result = self.invoke_in_root(root, ["local", "do", "init"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn("All services initialised.", result.output) + + def test_create_user_template_without_staff(self) -> None: + command = jobs.create_user_template( + "superuser", False, "username", "email", "p4ssw0rd" + ) + self.assertNotIn("--staff", command) + self.assertIn("set_password", command) + + def test_create_user_template_with_staff(self) -> None: + command = jobs.create_user_template( + "superuser", True, "username", "email", "p4ssw0rd" + ) + self.assertIn("--staff", command) + + def test_import_demo_course(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + with patch("tutor.utils.docker_compose") as mock_docker_compose: + result = self.invoke_in_root(root, ["local", "do", "importdemocourse"]) + dc_args, _dc_kwargs = mock_docker_compose.call_args + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn("cms-job", dc_args) + self.assertIn( + "git clone https://github.com/openedx/openedx-demo-course", dc_args[-1] + ) + + def test_import_demo_libraries(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + with patch("tutor.utils.docker_compose") as mock_docker_compose: + result = self.invoke_in_root( + root, + [ + "local", + "do", + "importdemolibraries", + "admin", + ], + ) + dc_args, _dc_kwargs = mock_docker_compose.call_args + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn("cms-job", dc_args) + self.assertIn( + "git clone https://github.com/openedx/openedx-demo-course", dc_args[-1] + ) + self.assertIn( + "./manage.py cms import_content_library /tmp/library.tar.gz admin", + dc_args[-1], + ) + + def test_set_theme(self) -> None: + with temporary_root() as root: + self.invoke_in_root(root, ["config", "save"]) + with patch("tutor.utils.docker_compose") as mock_docker_compose: + result = self.invoke_in_root( + root, + [ + "local", + "do", + "settheme", + "--domain", + "domain1", + "--domain", + "domain2", + "beautiful", + ], + ) + dc_args, _dc_kwargs = mock_docker_compose.call_args + + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn("lms-job", dc_args) + self.assertIn("assign_theme('beautiful', 'domain1')", dc_args[-1]) + self.assertIn("assign_theme('beautiful', 'domain2')", dc_args[-1]) diff --git a/tests/commands/test_k8s.py b/tests/commands/test_k8s.py new file mode 100644 index 0000000000..f8513fd49e --- /dev/null +++ b/tests/commands/test_k8s.py @@ -0,0 +1,10 @@ +import unittest + +from .base import TestCommandMixin + + +class K8sTests(unittest.TestCase, TestCommandMixin): + def test_k8s_help(self) -> None: + result = self.invoke(["k8s", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) diff --git a/tests/commands/test_local.py b/tests/commands/test_local.py new file mode 100644 index 0000000000..06e89817a9 --- /dev/null +++ b/tests/commands/test_local.py @@ -0,0 +1,71 @@ +import os +import pathlib +import tempfile +import unittest +from unittest.mock import patch + +from tests.helpers import temporary_root + +from .base import TestCommandMixin + + +class LocalTests(unittest.TestCase, TestCommandMixin): + def test_local_help(self) -> None: + result = self.invoke(["local", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_local_launch_help(self) -> None: + result = self.invoke(["local", "launch", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_local_upgrade_help(self) -> None: + result = self.invoke(["local", "upgrade", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_copyfrom(self) -> None: + with temporary_root() as root: + with tempfile.TemporaryDirectory() as directory: + # resolve actual path, just like click.Path does it. + directory = os.fsdecode(pathlib.Path(directory).resolve()) + with patch("tutor.utils.docker_compose") as mock_docker_compose: + self.invoke_in_root(root, ["config", "save"]) + + # Copy to existing directory + result = self.invoke_in_root( + root, ["local", "copyfrom", "lms", "/openedx/venv", directory] + ) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn( + f"--volume={directory}:/tmp/mount", + mock_docker_compose.call_args[0], + ) + self.assertIn( + "cp --recursive --preserve /openedx/venv /tmp/mount", + mock_docker_compose.call_args[0], + ) + + # Copy to non-existing directory + result = self.invoke_in_root( + root, + [ + "local", + "copyfrom", + "lms", + "/openedx/venv", + os.path.join(directory, "venv2"), + ], + ) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertIn( + f"--volume={directory}:/tmp/mount", + mock_docker_compose.call_args[0], + ) + self.assertIn( + "cp --recursive --preserve /openedx/venv /tmp/mount/venv2", + mock_docker_compose.call_args[0], + ) diff --git a/tests/commands/test_plugins.py b/tests/commands/test_plugins.py new file mode 100644 index 0000000000..bcec79b8fe --- /dev/null +++ b/tests/commands/test_plugins.py @@ -0,0 +1,57 @@ +import unittest +from unittest.mock import Mock, patch + +from tutor import plugins +from tutor.commands import plugins as plugins_commands + +from .base import TestCommandMixin + + +class PluginsTests(unittest.TestCase, TestCommandMixin): + def test_plugins_help(self) -> None: + result = self.invoke(["plugins", "--help"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + + def test_plugins_printroot(self) -> None: + result = self.invoke(["plugins", "printroot"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertTrue(result.output) + + @patch.object(plugins, "iter_info", return_value=[]) + def test_plugins_list(self, _iter_info: Mock) -> None: + result = self.invoke(["plugins", "list"]) + self.assertIsNone(result.exception) + self.assertEqual(0, result.exit_code) + self.assertEqual("NAME\tSTATUS\tVERSION\n", result.output) + _iter_info.assert_called() + + def test_plugins_install_not_found_plugin(self) -> None: + result = self.invoke(["plugins", "install", "notFound"]) + self.assertEqual(1, result.exit_code) + self.assertTrue(result.exception) + + def test_plugins_enable_not_installed_plugin(self) -> None: + result = self.invoke(["plugins", "enable", "notFound"]) + self.assertEqual(1, result.exit_code) + self.assertTrue(result.exception) + + def test_plugins_disable_not_installed_plugin(self) -> None: + result = self.invoke(["plugins", "disable", "notFound"]) + self.assertEqual(0, result.exit_code) + self.assertFalse(result.exception) + + @patch.object( + plugins, + "iter_info", + return_value=[("aacd", None), ("abcd", None), ("abef", None), ("alba", None)], + ) + def test_plugins_name_auto_complete(self, _iter_info: Mock) -> None: + self.assertEqual([], plugins_commands.PluginName().get_names("z")) + self.assertEqual( + ["abcd", "abef"], plugins_commands.PluginName().get_names("ab") + ) + self.assertEqual( + ["all", "alba"], plugins_commands.PluginName(allow_all=True).get_names("al") + ) diff --git a/tests/core/__init__.py b/tests/core/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/hooks/__init__.py b/tests/core/hooks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/hooks/test_actions.py b/tests/core/hooks/test_actions.py new file mode 100644 index 0000000000..e0c884b9a3 --- /dev/null +++ b/tests/core/hooks/test_actions.py @@ -0,0 +1,59 @@ +import typing as t +import unittest + +from tutor.core.hooks import actions, contexts + + +class PluginActionsTests(unittest.TestCase): + def setUp(self) -> None: + self.side_effect_int = 0 + + def run(self, result: t.Any = None) -> t.Any: + with contexts.enter("tests"): + return super().run(result=result) + + def test_do(self) -> None: + action: actions.Action[int] = actions.Action() + + @action.add() + def _test_action_1(increment: int) -> None: + self.side_effect_int += increment + + @action.add() + def _test_action_2(increment: int) -> None: + self.side_effect_int += increment * 2 + + action.do(1) + self.assertEqual(3, self.side_effect_int) + + def test_priority(self) -> None: + action: actions.Action[[]] = actions.Action() + + @action.add(priority=2) + def _test_action_1() -> None: + self.side_effect_int += 4 + + @action.add(priority=1) + def _test_action_2() -> None: + self.side_effect_int = self.side_effect_int // 2 + + # Action 2 must be performed before action 1 + self.side_effect_int = 4 + action.do() + self.assertEqual(6, self.side_effect_int) + + def test_equal_priority(self) -> None: + action: actions.Action[[]] = actions.Action() + + @action.add(priority=2) + def _test_action_1() -> None: + self.side_effect_int += 4 + + @action.add(priority=2) + def _test_action_2() -> None: + self.side_effect_int = self.side_effect_int // 2 + + # Action 2 must be performed after action 1 + self.side_effect_int = 4 + action.do() + self.assertEqual(4, self.side_effect_int) diff --git a/tests/core/hooks/test_filters.py b/tests/core/hooks/test_filters.py new file mode 100644 index 0000000000..3443e9158d --- /dev/null +++ b/tests/core/hooks/test_filters.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import typing as t +import unittest + +from tutor.core.hooks import contexts, filters + + +class PluginFiltersTests(unittest.TestCase): + def run(self, result: t.Any = None) -> t.Any: + with contexts.enter("tests"): + return super().run(result=result) + + def test_add(self) -> None: + filtre: filters.Filter[int, []] = filters.Filter() + + @filtre.add() + def filter1(value: int) -> int: + return value + 1 + + value = filtre.apply(0) + self.assertEqual(1, value) + + def test_add_items(self) -> None: + filtre: filters.Filter[list[int], []] = filters.Filter() + + @filtre.add() + def filter1(sheeps: list[int]) -> list[int]: + return sheeps + [0] + + filtre.add_item(1) + filtre.add_item(2) + filtre.add_items([3, 4]) + + sheeps: list[int] = filtre.apply([]) + self.assertEqual([0, 1, 2, 3, 4], sheeps) + + def test_filter_callbacks(self) -> None: + callback = filters.FilterCallback(lambda _: 1) + self.assertTrue(callback.is_in_context(None)) + self.assertFalse(callback.is_in_context("customcontext")) + self.assertEqual(1, callback.apply(0)) + + def test_filter_context(self) -> None: + filtre: filters.Filter[list[int], []] = filters.Filter() + with contexts.enter("testcontext"): + filtre.add_item(1) + filtre.add_item(2) + + self.assertEqual([1, 2], filtre.apply([])) + self.assertEqual([1], filtre.apply_from_context("testcontext", [])) + + def test_clear_context(self) -> None: + filtre: filters.Filter[list[int], []] = filters.Filter() + with contexts.enter("testcontext"): + filtre.add_item(1) + filtre.add_item(2) + + self.assertEqual([1, 2], filtre.apply([])) + filtre.clear(context="testcontext") + self.assertEqual([2], filtre.apply([])) diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 0000000000..3ec9029872 --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,78 @@ +import os +import tempfile +import typing as t +import unittest +import unittest.result + +from tutor import hooks +from tutor.commands.context import BaseTaskContext +from tutor.core.hooks.contexts import enter as enter_context +from tutor.tasks import BaseTaskRunner +from tutor.types import Config + + +class TestTaskRunner(BaseTaskRunner): + """ + Mock job runner for unit testing. + + This runner does nothing except print the service name and command, + separated by dashes. + """ + + def run_task(self, service: str, command: str) -> int: + print(os.linesep.join([f"Service: {service}", "-----", command, "----- "])) + return 0 + + +def temporary_root() -> "tempfile.TemporaryDirectory[str]": + """ + Context manager to handle temporary test root. + + This function can be used as follows: + + with temporary_root() as root: + config = tutor_config.load_full(root) + ... + """ + return tempfile.TemporaryDirectory(prefix="tutor-test-root-") + + +class TestContext(BaseTaskContext): + """ + Click context that will use only test job runners. + """ + + def job_runner(self, config: Config) -> TestTaskRunner: + return TestTaskRunner(self.root, config) + + +class PluginsTestCase(unittest.TestCase): + """ + This test case class clears the hooks created during tests. It also makes sure that + we don't accidentally load entrypoint/dict plugins from the user. + """ + + def setUp(self) -> None: + self.clean() + self.addCleanup(self.clean) + super().setUp() + + def clean(self) -> None: + # We clear hooks created in some contexts, such that user plugins are never loaded. + for context in [ + hooks.Contexts.PLUGINS.name, + hooks.Contexts.PLUGINS_V0_ENTRYPOINT.name, + hooks.Contexts.PLUGINS_V0_YAML.name, + "unittests", + ]: + hooks.clear_all(context=context) + + def run( + self, result: t.Optional[unittest.result.TestResult] = None + ) -> t.Optional[unittest.result.TestResult]: + """ + Run all actions and filters with a test context, such that they can be cleared + from one run to the next. + """ + with enter_context("unittests"): + return super().run(result=result) diff --git a/tests/openedx-lms-common-settings b/tests/openedx-lms-common-settings deleted file mode 100644 index c1253bb04a..0000000000 --- a/tests/openedx-lms-common-settings +++ /dev/null @@ -1 +0,0 @@ -ORA2_FILEUPLOAD_BACKEND = "s3" diff --git a/tests/test_bindmount.py b/tests/test_bindmount.py new file mode 100644 index 0000000000..cd73512892 --- /dev/null +++ b/tests/test_bindmount.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import unittest + +from tutor import bindmount + + +class BindmountTests(unittest.TestCase): + def test_parse_explicit(self) -> None: + self.assertEqual( + [("lms", "/path/to/edx-platform", "/openedx/edx-platform")], + bindmount.parse_explicit_mount( + "lms:/path/to/edx-platform:/openedx/edx-platform" + ), + ) + self.assertEqual( + [ + ("lms", "/path/to/edx-platform", "/openedx/edx-platform"), + ("cms", "/path/to/edx-platform", "/openedx/edx-platform"), + ], + bindmount.parse_explicit_mount( + "lms,cms:/path/to/edx-platform:/openedx/edx-platform" + ), + ) + self.assertEqual( + [ + ("lms", "/path/to/edx-platform", "/openedx/edx-platform"), + ("cms", "/path/to/edx-platform", "/openedx/edx-platform"), + ], + bindmount.parse_explicit_mount( + "lms, cms:/path/to/edx-platform:/openedx/edx-platform" + ), + ) + self.assertEqual( + [ + ("lms", "/path/to/edx-platform", "/openedx/edx-platform"), + ("lms-worker", "/path/to/edx-platform", "/openedx/edx-platform"), + ], + bindmount.parse_explicit_mount( + "lms,lms-worker:/path/to/edx-platform:/openedx/edx-platform" + ), + ) + self.assertEqual( + [("lms", "/path/to/edx-platform", "/openedx/edx-platform")], + bindmount.parse_explicit_mount( + "lms,:/path/to/edx-platform:/openedx/edx-platform" + ), + ) + + def test_parse_implicit(self) -> None: + # Import module to make sure filter is created + # pylint: disable=import-outside-toplevel,unused-import + import tutor.commands.compose + + self.assertEqual( + [("openedx", "/path/to/edx-platform", "/openedx/edx-platform")], + bindmount.parse_implicit_mount("/path/to/edx-platform"), + ) diff --git a/tests/test_config.py b/tests/test_config.py index bdd3f41544..ed1971bcb7 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1,80 +1,106 @@ +import json +import os import unittest -import unittest.mock -import tempfile +from unittest.mock import Mock, patch +import click + +from tests.helpers import PluginsTestCase, temporary_root from tutor import config as tutor_config -from tutor import env -from tutor import interactive +from tutor import fmt, hooks, interactive, utils +from tutor.types import Config, get_typed class ConfigTests(unittest.TestCase): - def setUp(self): - # This is necessary to avoid cached mocks - env.Renderer.reset() - - def test_version(self): - defaults = tutor_config.load_defaults() + def test_version(self) -> None: + defaults = tutor_config.get_defaults() self.assertNotIn("TUTOR_VERSION", defaults) - def test_merge(self): - config1 = {"x": "y"} - config2 = {"x": "z"} + def test_merge(self) -> None: + config1: Config = {"x": "y"} + config2: Config = {"x": "z"} tutor_config.merge(config1, config2) self.assertEqual({"x": "y"}, config1) - def test_merge_render(self): - config = {} - defaults = tutor_config.load_defaults() - with unittest.mock.patch.object( - tutor_config.utils, "random_string", return_value="abcd" - ): - tutor_config.merge(config, defaults) + def test_merge_not_render(self) -> None: + config: Config = {} + base = tutor_config.get_base() + with patch.object(utils, "random_string", return_value="abcd"): + tutor_config.merge(config, base) - self.assertEqual("abcd", config["MYSQL_ROOT_PASSWORD"]) + # Check that merge does not perform a rendering + self.assertNotEqual("abcd", config["MYSQL_ROOT_PASSWORD"]) - @unittest.mock.patch.object(tutor_config.fmt, "echo") - def test_update_twice(self, _): - with tempfile.TemporaryDirectory() as root: - tutor_config.update(root) - config1 = tutor_config.load_user(root) - - tutor_config.update(root) - config2 = tutor_config.load_user(root) + @patch.object(fmt, "echo") + def test_update_twice_should_return_same_config(self, _: Mock) -> None: + with temporary_root() as root: + config1 = tutor_config.load_minimal(root) + tutor_config.save_config_file(root, config1) + config2 = tutor_config.load_minimal(root) self.assertEqual(config1, config2) - @unittest.mock.patch.object(tutor_config.fmt, "echo") - def test_removed_entry_is_added_on_save(self, _): - with tempfile.TemporaryDirectory() as root: - with unittest.mock.patch.object( - tutor_config.utils, "random_string" - ) as mock_random_string: - mock_random_string.return_value = "abcd" - config1, _ = tutor_config.load_all(root) - password1 = config1["MYSQL_ROOT_PASSWORD"] - - config1.pop("MYSQL_ROOT_PASSWORD") - tutor_config.save_config_file(root, config1) + def test_interactive(self) -> None: + def mock_prompt(*_args: None, **kwargs: str) -> str: + return kwargs["default"] - mock_random_string.return_value = "efgh" - config2, _ = tutor_config.load_all(root) - password2 = config2["MYSQL_ROOT_PASSWORD"] - - self.assertEqual("abcd", password1) - self.assertEqual("efgh", password2) - - def test_interactive_load_all(self): - with tempfile.TemporaryDirectory() as rootdir: - config, defaults = interactive.load_all(rootdir, interactive=False) + with temporary_root() as rootdir: + with patch.object(click, "prompt", new=mock_prompt): + with patch.object(click, "confirm", new=mock_prompt): + config = tutor_config.load_minimal(rootdir) + interactive.ask_questions(config) self.assertIn("MYSQL_ROOT_PASSWORD", config) - self.assertEqual(8, len(config["MYSQL_ROOT_PASSWORD"])) - self.assertNotIn("LMS_HOST", config) - self.assertEqual("www.myopenedx.com", defaults["LMS_HOST"]) - self.assertEqual("studio.{{ LMS_HOST }}", defaults["CMS_HOST"]) - - def test_is_service_activated(self): - config = {"RUN_SERVICE1": True, "RUN_SERVICE2": False} + self.assertEqual(8, len(get_typed(config, "MYSQL_ROOT_PASSWORD", str))) + self.assertEqual("www.myopenedx.com", config["LMS_HOST"]) + self.assertEqual("studio.www.myopenedx.com", config["CMS_HOST"]) + def test_is_service_activated(self) -> None: + config: Config = {"RUN_SERVICE1": True, "RUN_SERVICE2": False} self.assertTrue(tutor_config.is_service_activated(config, "service1")) self.assertFalse(tutor_config.is_service_activated(config, "service2")) + + @patch.object(fmt, "echo") + def test_json_config_is_overwritten_by_yaml(self, _: Mock) -> None: + with temporary_root() as root: + # Create config from scratch + config_yml_path = os.path.join(root, tutor_config.CONFIG_FILENAME) + config_json_path = os.path.join( + root, tutor_config.CONFIG_FILENAME.replace("yml", "json") + ) + config = tutor_config.load_full(root) + + # Save config to json + with open(config_json_path, "w", encoding="utf-8") as f: + json.dump(config, f, ensure_ascii=False, indent=4) + self.assertFalse(os.path.exists(config_yml_path)) + self.assertTrue(os.path.exists(config_json_path)) + + # Reload and compare + current = tutor_config.load_full(root) + self.assertTrue(os.path.exists(config_yml_path)) + self.assertFalse(os.path.exists(config_json_path)) + self.assertEqual(config, current) + + +class ConfigPluginTestCase(PluginsTestCase): + @patch.object(fmt, "echo") + def test_removed_entry_is_added_on_save(self, _: Mock) -> None: + with temporary_root() as root: + mock_random_string = Mock() + + hooks.Filters.ENV_TEMPLATE_FILTERS.add_item( + ("random_string", mock_random_string), + ) + mock_random_string.return_value = "abcd" + config1 = tutor_config.load_full(root) + password1 = config1.pop("MYSQL_ROOT_PASSWORD") + + tutor_config.save_config_file(root, config1) + + mock_random_string.return_value = "efgh" + config2 = tutor_config.load_full(root) + password2 = config2["MYSQL_ROOT_PASSWORD"] + + self.assertEqual("abcd", password1) + self.assertEqual("efgh", password2) diff --git a/tests/test_env.py b/tests/test_env.py index baa424d931..e2ba7bbe1d 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -1,50 +1,72 @@ import os import tempfile import unittest -import unittest.mock +from io import StringIO +from unittest.mock import Mock, patch +from tests.helpers import PluginsTestCase, temporary_root from tutor import config as tutor_config -from tutor import env -from tutor import fmt -from tutor import exceptions +from tutor import env, exceptions, fmt, plugins +from tutor.__about__ import __version__ +from tutor.plugins.v0 import DictPlugin +from tutor.types import Config -class EnvTests(unittest.TestCase): - def setUp(self): - env.Renderer.reset() - - def test_walk_templates(self): - renderer = env.Renderer({}, [env.TEMPLATES_ROOT]) +class EnvTests(PluginsTestCase): + def test_walk_templates(self) -> None: + renderer = env.Renderer() templates = list(renderer.walk_templates("local")) self.assertIn("local/docker-compose.yml", templates) - def test_walk_templates_partials_are_ignored(self): + def test_walk_templates_partials_are_ignored(self) -> None: template_name = "apps/openedx/settings/partials/common_all.py" - renderer = env.Renderer({}, [env.TEMPLATES_ROOT], ignore_folders=["partials"]) + renderer = env.Renderer() templates = list(renderer.walk_templates("apps")) self.assertIn(template_name, renderer.environment.loader.list_templates()) self.assertNotIn(template_name, templates) - def test_is_binary_file(self): + def test_files_are_rendered(self) -> None: + self.assertTrue(env.is_rendered("some/file")) + self.assertFalse(env.is_rendered(".git")) + self.assertFalse(env.is_rendered(".git/subdir")) + self.assertFalse(env.is_rendered("directory/.git")) + self.assertFalse(env.is_rendered("directory/.git/somefile")) + self.assertFalse(env.is_rendered("directory/somefile.pyc")) + self.assertTrue(env.is_rendered("directory/somedir.pyc/somefile")) + self.assertFalse(env.is_rendered("directory/__pycache__")) + self.assertFalse(env.is_rendered("directory/__pycache__/somefile")) + self.assertFalse(env.is_rendered("directory/partials/extra.scss")) + self.assertFalse(env.is_rendered("directory/partials")) + self.assertFalse(env.is_rendered("partials/somefile")) + + def test_is_binary_file(self) -> None: self.assertTrue(env.is_binary_file("/home/somefile.ico")) - def test_find_os_path(self): - renderer = env.Renderer({}, [env.TEMPLATES_ROOT]) - path = renderer.find_os_path("local/docker-compose.yml") + def test_find_os_path(self) -> None: + environment = env.JinjaEnvironment() + path = environment.find_os_path("local/docker-compose.yml") self.assertTrue(os.path.exists(path)) - def test_pathjoin(self): - self.assertEqual( - "/tmp/env/target/dummy", env.pathjoin("/tmp", "target", "dummy") - ) - self.assertEqual("/tmp/env/dummy", env.pathjoin("/tmp", "dummy")) + def test_pathjoin(self) -> None: + with temporary_root() as root: + self.assertEqual( + os.path.join(env.base_dir(root), "dummy"), env.pathjoin(root, "dummy") + ) - def test_render_str(self): + def test_render_str(self) -> None: self.assertEqual( "hello world", env.render_str({"name": "world"}, "hello {{ name }}") ) - def test_common_domain(self): + def test_render_unknown(self) -> None: + config: Config = { + "var1": "a", + } + self.assertEqual("ab", env.render_unknown(config, "{{ var1 }}b")) + self.assertEqual({"x": "ac"}, env.render_unknown(config, {"x": "{{ var1 }}c"})) + self.assertEqual(["x", "ac"], env.render_unknown(config, ["x", "{{ var1 }}c"])) + + def test_common_domain(self) -> None: self.assertEqual( "mydomain.com", env.render_str( @@ -53,130 +75,326 @@ def test_common_domain(self): ), ) - def test_render_str_missing_configuration(self): + def test_render_str_missing_configuration(self) -> None: self.assertRaises(exceptions.TutorError, env.render_str, {}, "hello {{ name }}") - def test_render_file(self): - config = {} - tutor_config.merge(config, tutor_config.load_defaults()) + def test_render_file(self) -> None: + config: Config = {} + tutor_config.update_with_base(config) + tutor_config.update_with_defaults(config) + tutor_config.render_full(config) + config["MYSQL_ROOT_PASSWORD"] = "testpassword" - rendered = env.render_file(config, "hooks", "mysql", "init") + rendered = env.render_file(config, "jobs", "init", "mysql.sh") self.assertIn("testpassword", rendered) - @unittest.mock.patch.object(tutor_config.fmt, "echo") - def test_render_file_missing_configuration(self, _): + @patch.object(fmt, "echo") + def test_render_file_missing_configuration(self, _: Mock) -> None: self.assertRaises( exceptions.TutorError, env.render_file, {}, "local", "docker-compose.yml" ) - def test_save_full(self): - defaults = tutor_config.load_defaults() - with tempfile.TemporaryDirectory() as root: - config = tutor_config.load_current(root, defaults) - tutor_config.merge(config, defaults) - with unittest.mock.patch.object(fmt, "STDOUT"): + def test_save_full(self) -> None: + with temporary_root() as root: + config = tutor_config.load_full(root) + with patch.object(fmt, "STDOUT"): env.save(root, config) self.assertTrue( - os.path.exists(os.path.join(root, "env", "local", "docker-compose.yml")) + os.path.exists( + os.path.join(env.base_dir(root), "local", "docker-compose.yml") + ) ) - def test_save_full_with_https(self): - defaults = tutor_config.load_defaults() - with tempfile.TemporaryDirectory() as root: - config = tutor_config.load_current(root, defaults) - tutor_config.merge(config, defaults) + def test_save_full_with_https(self) -> None: + with temporary_root() as root: + config = tutor_config.load_full(root) config["ENABLE_HTTPS"] = True - with unittest.mock.patch.object(fmt, "STDOUT"): + with patch.object(fmt, "STDOUT"): env.save(root, config) - with open(os.path.join(root, "env", "apps", "caddy", "Caddyfile")) as f: - self.assertIn("www.myopenedx.com {", f.read()) + with open( + os.path.join(env.base_dir(root), "apps", "caddy", "Caddyfile"), + encoding="utf-8", + ) as f: + self.assertIn("www.myopenedx.com{$default_site_port}", f.read()) - def test_patch(self): + def test_patch(self) -> None: patches = {"plugin1": "abcd", "plugin2": "efgh"} - with unittest.mock.patch.object( - env.plugins, "iter_patches", return_value=patches.items() + with patch.object( + plugins, "iter_patches", return_value=patches.values() ) as mock_iter_patches: rendered = env.render_str({}, '{{ patch("location") }}') - mock_iter_patches.assert_called_once_with({}, "location") + mock_iter_patches.assert_called_once_with("location") self.assertEqual("abcd\nefgh", rendered) - def test_patch_separator_suffix(self): + def test_patch_separator_suffix(self) -> None: patches = {"plugin1": "abcd", "plugin2": "efgh"} - with unittest.mock.patch.object( - env.plugins, "iter_patches", return_value=patches.items() - ): + with patch.object(plugins, "iter_patches", return_value=patches.values()): rendered = env.render_str( {}, '{{ patch("location", separator=",\n", suffix=",") }}' ) self.assertEqual("abcd,\nefgh,", rendered) - def test_plugin_templates(self): + def test_plugin_templates(self) -> None: with tempfile.TemporaryDirectory() as plugin_templates: - # Create plugin - plugin1 = env.plugins.DictPlugin( + DictPlugin( {"name": "plugin1", "version": "0", "templates": plugin_templates} ) - # Create two templates os.makedirs(os.path.join(plugin_templates, "plugin1", "apps")) with open( - os.path.join(plugin_templates, "plugin1", "unrendered.txt"), "w" + os.path.join(plugin_templates, "plugin1", "unrendered.txt"), + "w", + encoding="utf-8", ) as f: f.write("This file should not be rendered") with open( - os.path.join(plugin_templates, "plugin1", "apps", "rendered.txt"), "w" + os.path.join(plugin_templates, "plugin1", "apps", "rendered.txt"), + "w", + encoding="utf-8", ) as f: f.write("Hello my ID is {{ ID }}") - # Create configuration - config = {"ID": "abcd"} - # Render templates - with unittest.mock.patch.object( - env.plugins, - "iter_enabled", - return_value=[plugin1], - ): - with tempfile.TemporaryDirectory() as root: - # Render plugin templates - env.save_plugin_templates(plugin1, root, config) - - # Check that plugin template was rendered - dst_unrendered = os.path.join( - root, "env", "plugins", "plugin1", "unrendered.txt" - ) - dst_rendered = os.path.join( - root, "env", "plugins", "plugin1", "apps", "rendered.txt" - ) - self.assertFalse(os.path.exists(dst_unrendered)) - self.assertTrue(os.path.exists(dst_rendered)) - with open(dst_rendered) as f: - self.assertEqual("Hello my ID is abcd", f.read()) - - def test_renderer_is_reset_on_config_change(self): + with temporary_root() as root: + # Create configuration + config: Config = tutor_config.load_full(root) + config["ID"] = "Hector Rumblethorpe" + plugins.load("plugin1") + tutor_config.save_enabled_plugins(config) + + # Render environment + with patch.object(fmt, "STDOUT"): + env.save(root, config) + + # Check that plugin template was rendered + root_env = os.path.join(root, "env") + dst_unrendered = os.path.join( + root_env, "plugins", "plugin1", "unrendered.txt" + ) + dst_rendered = os.path.join( + root_env, "plugins", "plugin1", "apps", "rendered.txt" + ) + self.assertFalse(os.path.exists(dst_unrendered)) + self.assertTrue(os.path.exists(dst_rendered)) + with open(dst_rendered, encoding="utf-8") as f: + self.assertEqual("Hello my ID is Hector Rumblethorpe", f.read()) + + def test_renderer_is_reset_on_config_change(self) -> None: with tempfile.TemporaryDirectory() as plugin_templates: - plugin1 = env.plugins.DictPlugin( + plugin1 = DictPlugin( {"name": "plugin1", "version": "0", "templates": plugin_templates} ) + # Create one template os.makedirs(os.path.join(plugin_templates, plugin1.name)) with open( - os.path.join(plugin_templates, plugin1.name, "myplugin.txt"), "w" + os.path.join(plugin_templates, plugin1.name, "myplugin.txt"), + "w", + encoding="utf-8", ) as f: f.write("some content") # Load env once - config = {"PLUGINS": []} - env1 = env.Renderer.instance(config).environment - - with unittest.mock.patch.object( - env.plugins, - "iter_enabled", - return_value=[plugin1], - ): - # Load env a second time - config["PLUGINS"].append("myplugin") - env2 = env.Renderer.instance(config).environment + config: Config = {"PLUGINS": []} + env1 = env.Renderer(config).environment + + # Enable plugins + plugins.load("plugin1") + + # Load env a second time + config["PLUGINS"] = ["myplugin"] + env2 = env.Renderer(config).environment self.assertNotIn("plugin1/myplugin.txt", env1.loader.list_templates()) self.assertIn("plugin1/myplugin.txt", env2.loader.list_templates()) + + def test_iter_values_named(self) -> None: + config: Config = { + "something0_test_app": 0, + "something1_test_not_app": 1, + "notsomething_test_app": 2, + "something3_test_app": 3, + } + renderer = env.Renderer(config) + self.assertEqual([2, 3], list(renderer.iter_values_named(suffix="test_app"))) + self.assertEqual([1, 3], list(renderer.iter_values_named(prefix="something"))) + self.assertEqual( + [0, 3], + list( + renderer.iter_values_named( + prefix="something", suffix="test_app", allow_empty=True + ) + ), + ) + + +class CurrentVersionTests(unittest.TestCase): + def test_current_version_in_empty_env(self) -> None: + with temporary_root() as root: + self.assertIsNone(env.current_version(root)) + self.assertIsNone(env.get_env_release(root)) + self.assertIsNone(env.should_upgrade_from_release(root)) + self.assertTrue(env.is_up_to_date(root)) + + def test_current_version_in_lilac_env(self) -> None: + with temporary_root() as root: + os.makedirs(env.base_dir(root)) + with open( + os.path.join(env.base_dir(root), env.VERSION_FILENAME), + "w", + encoding="utf-8", + ) as f: + f.write("12.0.46") + self.assertEqual("12.0.46", env.current_version(root)) + self.assertEqual("lilac", env.get_env_release(root)) + self.assertEqual("lilac", env.should_upgrade_from_release(root)) + self.assertFalse(env.is_up_to_date(root)) + + def test_current_version_in_latest_env(self) -> None: + with temporary_root() as root: + os.makedirs(env.base_dir(root)) + with open( + os.path.join(env.base_dir(root), env.VERSION_FILENAME), + "w", + encoding="utf-8", + ) as f: + f.write(__version__) + self.assertEqual(__version__, env.current_version(root)) + self.assertEqual("redwood", env.get_env_release(root)) + self.assertIsNone(env.should_upgrade_from_release(root)) + self.assertTrue(env.is_up_to_date(root)) + + +class PatchRendererTests(unittest.TestCase): + def setUp(self) -> None: + self.render = env.PatchRenderer() + self.render.current_template = "current_template" + return super().setUp() + + @patch("tutor.env.Renderer.render_template") + def test_render_template(self, render_template_mock: Mock) -> None: + """Test that render_template changes the current template and + calls once render_template from Renderer with the current template.""" + self.render.render_template("new_template") + + self.assertEqual(self.render.current_template, "new_template") + render_template_mock.assert_called_once_with("new_template") + + @patch("tutor.env.Renderer.patch") + def test_patch_with_first_patch(self, patch_mock: Mock) -> None: + """Test that patch is called from Renderer and adds patches_locations + when we didn't have that patch.""" + self.render.patches_locations = {} + + self.render.patch("first_patch") + + patch_mock.assert_called_once_with("first_patch", separator="\n", suffix="") + self.assertEqual( + self.render.patches_locations, + {"first_patch": [self.render.current_template]}, + ) + + def test_patch_with_patch_multiple_locations(self) -> None: + """Test add more locations to a patch.""" + self.render.patches_locations = {"first_patch": ["template_1"]} + + self.render.patch("first_patch") + + self.assertEqual( + self.render.patches_locations, + {"first_patch": ["template_1", "current_template"]}, + ) + + @patch("tutor.env.plugins.iter_patches") + def test_patch_with_custom_patch_in_a_plugin_patch( + self, iter_patches_mock: Mock + ) -> None: + """Test the patch function with a plugin with a custom patch. + Examples: + - When first_patch is in a plugin patches and has a 'custom_patch', + the patches_locations will reflect that 'custom_patch' is from + first_patch location. + - If in tutor-mfe/tutormfe/patches/caddyfile you add a custom patch + inside the caddyfile patch, the patches_locations will reflect that. + + Expected behavior: + - Process the first_patch and find the custom_patch in a plugin with + first_patch patch. + - Process the custom_patch and add "within patch: first_patch" in the + patches_locations.""" + iter_patches_mock.side_effect = [ + ["""{{ patch('custom_patch')|indent(4) }}"""], + [], + ] + self.render.patches_locations = {} + calls = [unittest.mock.call("first_patch"), unittest.mock.call("custom_patch")] + + self.render.patch("first_patch") + + iter_patches_mock.assert_has_calls(calls) + self.assertEqual( + self.render.patches_locations, + { + "first_patch": ["current_template"], + "custom_patch": ["within patch: first_patch"], + }, + ) + + @patch("tutor.env.plugins.iter_patches") + def test_patch_with_processed_patch_in_a_plugin_patch( + self, iter_patches_mock: Mock + ) -> None: + """Test the patch function with a plugin with a processed patch. + Example: + - When first_patch was processed and the second_patch is used in a + plugin and call the first_patch again. Then the patches_locations will + reflect that first_patch also have a location from second_patch.""" + iter_patches_mock.side_effect = [ + ["""{{ patch('first_patch')|indent(4) }}"""], + [], + ] + self.render.patches_locations = {"first_patch": ["current_template"]} + + self.render.patch("second_patch") + + self.assertEqual( + self.render.patches_locations, + { + "first_patch": ["current_template", "within patch: second_patch"], + "second_patch": ["current_template"], + }, + ) + + @patch("tutor.env.Renderer.iter_templates_in") + @patch("tutor.env.PatchRenderer.render_template") + def test_render_all( + self, render_template_mock: Mock, iter_templates_in_mock: Mock + ) -> None: + """Test render_template was called for templates in iter_templates_in.""" + iter_templates_in_mock.return_value = ["template_1", "template_2"] + calls = [unittest.mock.call("template_1"), unittest.mock.call("template_2")] + + self.render.render_all() + + iter_templates_in_mock.assert_called_once() + render_template_mock.assert_has_calls(calls) + + @patch("sys.stdout", new_callable=StringIO) + @patch("tutor.env.PatchRenderer.render_all") + def test_print_patches_locations( + self, render_all_mock: Mock, stdout_mock: Mock + ) -> None: + """Test render_all was called and the output of print_patches_locations.""" + self.render.patches_locations = {"first_patch": ["template_1", "template_2"]} + + self.render.print_patches_locations() + + render_all_mock.assert_called_once() + self.assertEqual( + """ +PATCH LOCATIONS +first_patch template_1 + template_2 +""".strip(), + stdout_mock.getvalue().strip(), + ) diff --git a/tests/test_images.py b/tests/test_images.py deleted file mode 100644 index 525dac9731..0000000000 --- a/tests/test_images.py +++ /dev/null @@ -1,12 +0,0 @@ -import unittest -from tutor import images - - -class ImagesTests(unittest.TestCase): - def test_get_tag(self): - config = { - "DOCKER_IMAGE_OPENEDX": "registry/openedx", - "DOCKER_IMAGE_OPENEDX_DEV": "registry/openedxdev", - } - self.assertEqual("registry/openedx", images.get_tag(config, "openedx")) - self.assertEqual("registry/openedxdev", images.get_tag(config, "openedx-dev")) diff --git a/tests/test_plugin_indexes.py b/tests/test_plugin_indexes.py new file mode 100644 index 0000000000..f4e536466e --- /dev/null +++ b/tests/test_plugin_indexes.py @@ -0,0 +1,81 @@ +import os +import unittest +from unittest.mock import patch + +from tutor.exceptions import TutorError +from tutor.plugins import indexes +from tutor.types import Config + + +class PluginIndexesTest(unittest.TestCase): + def test_named_index_url(self) -> None: + self.assertEqual( + f"https://myindex.com/tutor/{indexes.RELEASE}/plugins.yml", + indexes.named_index_url("https://myindex.com/tutor"), + ) + self.assertEqual( + f"https://myindex.com/tutor/{indexes.RELEASE}/plugins.yml", + indexes.named_index_url("https://myindex.com/tutor/"), + ) + + local_url = os.path.join("path", "to", "index", indexes.RELEASE) + self.assertEqual( + os.path.join(local_url, indexes.RELEASE, "plugins.yml"), + indexes.named_index_url(local_url), + ) + + def test_parse_index(self) -> None: + # Valid, empty index + self.assertEqual([], indexes.parse_index("[]")) + # Invalid index, list expected + with self.assertRaises(TutorError): + self.assertEqual([], indexes.parse_index("{}")) + # Invalid, empty index + with self.assertRaises(TutorError): + self.assertEqual([], indexes.parse_index("[")) + # Partially valid index + with patch.object(indexes.fmt, "echo"): + self.assertEqual( + [{"name": "valid1"}], + indexes.parse_index( + """ +- namE: invalid1 +- name: valid1 + """ + ), + ) + + def test_add(self) -> None: + config: Config = {} + self.assertTrue(indexes.add("https://myindex.com", config)) + self.assertFalse(indexes.add("https://myindex.com", config)) + self.assertEqual(["https://myindex.com"], config["PLUGIN_INDEXES"]) + + def test_add_by_alias(self) -> None: + config: Config = {} + self.assertTrue(indexes.add("main", config)) + self.assertEqual(["https://overhang.io/tutor/main"], config["PLUGIN_INDEXES"]) + self.assertTrue(indexes.remove("main", config)) + self.assertEqual([], config["PLUGIN_INDEXES"]) + + def test_deduplication(self) -> None: + plugins = [ + {"name": "plugin1", "description": "desc1"}, + {"name": "PLUGIN1", "description": "desc2"}, + ] + deduplicated = indexes.deduplicate_plugins(plugins) + self.assertEqual([{"name": "plugin1", "description": "desc2"}], deduplicated) + + def test_short_description(self) -> None: + entry = indexes.IndexEntry({"name": "plugin1"}) + self.assertEqual("", entry.short_description) + + def test_entry_match(self) -> None: + self.assertTrue(indexes.IndexEntry({"name": "ecommerce"}).match("ecomm")) + self.assertFalse(indexes.IndexEntry({"name": "ecommerce"}).match("ecom1")) + self.assertTrue(indexes.IndexEntry({"name": "ecommerce"}).match("Ecom")) + self.assertTrue( + indexes.IndexEntry( + {"name": "ecommerce", "description": "An awesome plugin"} + ).match("AWESOME") + ) diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 19eb566053..aa6375eb33 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -1,233 +1,16 @@ -import unittest -from unittest.mock import patch +from __future__ import annotations -from tutor import config as tutor_config -from tutor import exceptions -from tutor import fmt -from tutor import plugins +from tests.helpers import PluginsTestCase +from tutor import hooks, plugins -class PluginsTests(unittest.TestCase): - def setUp(self): - plugins.Plugins.clear() +class PluginsTests(PluginsTestCase): + def test_env_patches_updated_on_new_plugin(self) -> None: + self.assertEqual([], list(plugins.iter_patches("mypatch"))) - @patch.object(plugins.DictPlugin, "iter_installed", return_value=[]) - def test_iter_installed(self, _dict_plugin_iter_installed): - with patch.object(plugins.pkg_resources, "iter_entry_points", return_value=[]): - self.assertEqual([], list(plugins.iter_installed())) + hooks.Filters.ENV_PATCHES.add_item(("mypatch", "hello!")) - def test_is_installed(self): - self.assertFalse(plugins.is_installed("dummy")) + # env patches cache should be cleared on new plugin + hooks.Actions.PLUGIN_LOADED.do("dummyplugin") - @patch.object(plugins.DictPlugin, "iter_installed", return_value=[]) - def test_official_plugins(self, _dict_plugin_iter_installed): - with patch.object(plugins.importlib, "import_module", return_value=42): - plugin1 = plugins.OfficialPlugin.load("plugin1") - with patch.object(plugins.importlib, "import_module", return_value=43): - plugin2 = plugins.OfficialPlugin.load("plugin2") - with patch.object( - plugins.EntrypointPlugin, - "iter_installed", - return_value=[plugin1], - ): - self.assertEqual( - [plugin1, plugin2], - list(plugins.iter_installed()), - ) - - def test_enable(self): - config = {plugins.CONFIG_KEY: []} - with patch.object(plugins, "is_installed", return_value=True): - plugins.enable(config, "plugin2") - plugins.enable(config, "plugin1") - self.assertEqual(["plugin1", "plugin2"], config[plugins.CONFIG_KEY]) - - def test_enable_twice(self): - config = {plugins.CONFIG_KEY: []} - with patch.object(plugins, "is_installed", return_value=True): - plugins.enable(config, "plugin1") - plugins.enable(config, "plugin1") - self.assertEqual(["plugin1"], config[plugins.CONFIG_KEY]) - - def test_enable_not_installed_plugin(self): - config = {"PLUGINS": []} - with patch.object(plugins, "is_installed", return_value=False): - self.assertRaises(exceptions.TutorError, plugins.enable, config, "plugin1") - - def test_disable(self): - config = {"PLUGINS": ["plugin1", "plugin2"]} - with patch.object(fmt, "STDOUT"): - plugins.disable(config, "plugin1") - self.assertEqual(["plugin2"], config["PLUGINS"]) - - def test_disable_removes_set_config(self): - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[ - plugins.DictPlugin( - { - "name": "plugin1", - "version": "1.0.0", - "config": {"set": {"KEY": "value"}}, - } - ) - ], - ): - config = {"PLUGINS": ["plugin1"], "KEY": "value"} - with patch.object(fmt, "STDOUT"): - plugins.disable(config, "plugin1") - self.assertEqual([], config["PLUGINS"]) - self.assertNotIn("KEY", config) - - def test_patches(self): - class plugin1: - patches = {"patch1": "Hello {{ ID }}"} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - patches = list(plugins.iter_patches({}, "patch1")) - self.assertEqual([("plugin1", "Hello {{ ID }}")], patches) - - def test_plugin_without_patches(self): - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", None)], - ): - patches = list(plugins.iter_patches({}, "patch1")) - self.assertEqual([], patches) - - def test_configure(self): - config = {"ID": "id"} - defaults = {} - - class plugin1: - config = { - "add": {"PARAM1": "value1", "PARAM2": "value2"}, - "set": {"PARAM3": "value3"}, - "defaults": {"PARAM4": "value4"}, - } - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, defaults) - - self.assertEqual( - { - "ID": "id", - "PARAM3": "value3", - "PLUGIN1_PARAM1": "value1", - "PLUGIN1_PARAM2": "value2", - }, - config, - ) - self.assertEqual({"PLUGIN1_PARAM4": "value4"}, defaults) - - def test_configure_set_does_not_override(self): - config = {"ID": "oldid"} - - class plugin1: - config = {"set": {"ID": "newid"}} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, {}) - - self.assertEqual({"ID": "oldid"}, config) - - def test_configure_set_random_string(self): - config = {} - - class plugin1: - config = {"set": {"PARAM1": "{{ 128|random_string }}"}} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, {}) - self.assertEqual(128, len(config["PARAM1"])) - - def test_configure_default_value_with_previous_definition(self): - config = {} - defaults = {"PARAM1": "value"} - - class plugin1: - config = {"defaults": {"PARAM2": "{{ PARAM1 }}"}} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, defaults) - self.assertEqual("{{ PARAM1 }}", defaults["PLUGIN1_PARAM2"]) - - def test_configure_add_twice(self): - config = {} - - class plugin1: - config = {"add": {"PARAM1": "{{ 10|random_string }}"}} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, {}) - value1 = config["PLUGIN1_PARAM1"] - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - tutor_config.load_plugins(config, {}) - value2 = config["PLUGIN1_PARAM1"] - - self.assertEqual(10, len(value1)) - self.assertEqual(10, len(value2)) - self.assertEqual(value1, value2) - - def test_hooks(self): - class plugin1: - hooks = {"init": ["myclient"]} - - with patch.object( - plugins.Plugins, - "iter_enabled", - return_value=[plugins.BasePlugin("plugin1", plugin1)], - ): - self.assertEqual( - [("plugin1", ["myclient"])], list(plugins.iter_hooks({}, "init")) - ) - - def test_plugins_are_updated_on_config_change(self): - config = {"PLUGINS": []} - plugins1 = plugins.Plugins(config) - self.assertEqual(0, len(list(plugins1.iter_enabled()))) - config["PLUGINS"].append("plugin1") - with patch.object( - plugins.Plugins, - "iter_installed", - return_value=[plugins.BasePlugin("plugin1", None)], - ): - plugins2 = plugins.Plugins(config) - self.assertEqual(1, len(list(plugins2.iter_enabled()))) - - def test_dict_plugin(self): - plugin = plugins.DictPlugin( - {"name": "myplugin", "config": {"set": {"KEY": "value"}}, "version": "0.1"} - ) - self.assertEqual("myplugin", plugin.name) - self.assertEqual({"KEY": "value"}, plugin.config_set) + self.assertEqual(["hello!"], list(plugins.iter_patches("mypatch"))) diff --git a/tests/test_plugins_v0.py b/tests/test_plugins_v0.py new file mode 100644 index 0000000000..5d737d0748 --- /dev/null +++ b/tests/test_plugins_v0.py @@ -0,0 +1,228 @@ +from __future__ import annotations + +from unittest.mock import patch + +from tests.helpers import PluginsTestCase, temporary_root +from tutor import config as tutor_config +from tutor import exceptions, fmt, hooks, plugins +from tutor.plugins import v0 as plugins_v0 +from tutor.types import Config, get_typed + + +class PluginsV0Tests(PluginsTestCase): + def test_iter_installed(self) -> None: + self.assertEqual([], list(plugins.iter_installed())) + + def test_is_installed(self) -> None: + self.assertFalse(plugins.is_installed("dummy")) + + def test_official_plugins(self) -> None: + # Create 2 official plugins + plugins_v0.OfficialPlugin("plugin1") + plugins_v0.OfficialPlugin("plugin2") + self.assertEqual( + ["plugin1", "plugin2"], + list(plugins.iter_installed()), + ) + + def test_load(self) -> None: + config: Config = {tutor_config.PLUGINS_CONFIG_KEY: []} + plugins_v0.DictPlugin({"name": "plugin1"}) + plugins_v0.DictPlugin({"name": "plugin2"}) + plugins.load("plugin2") + plugins.load("plugin1") + tutor_config.save_enabled_plugins(config) + self.assertEqual( + ["plugin1", "plugin2"], config[tutor_config.PLUGINS_CONFIG_KEY] + ) + + def test_enable_twice(self) -> None: + plugins_v0.DictPlugin({"name": "plugin1"}) + plugins.load("plugin1") + plugins.load("plugin1") + config: Config = {tutor_config.PLUGINS_CONFIG_KEY: []} + tutor_config.save_enabled_plugins(config) + self.assertEqual(["plugin1"], config[tutor_config.PLUGINS_CONFIG_KEY]) + + def test_load_not_installed_plugin(self) -> None: + self.assertRaises(exceptions.TutorError, plugins.load, "plugin1") + + def test_disable(self) -> None: + plugins_v0.DictPlugin( + { + "name": "plugin1", + "version": "1.0.0", + "config": {"set": {"KEY": "value"}}, + } + ) + plugins_v0.DictPlugin( + { + "name": "plugin2", + "version": "1.0.0", + } + ) + config: Config = {"PLUGINS": ["plugin1", "plugin2"]} + tutor_config.enable_plugins(config) + with patch.object(fmt, "STDOUT"): + hooks.Actions.PLUGIN_UNLOADED.do("plugin1", "", config) + self.assertEqual(["plugin2"], config["PLUGINS"]) + + def test_disable_removes_set_config(self) -> None: + plugins_v0.DictPlugin( + { + "name": "plugin1", + "version": "1.0.0", + "config": {"set": {"KEY": "value"}}, + } + ) + config: Config = {"PLUGINS": ["plugin1"], "KEY": "value"} + tutor_config.enable_plugins(config) + with patch.object(fmt, "STDOUT"): + hooks.Actions.PLUGIN_UNLOADED.do("plugin1", "", config) + self.assertEqual([], config["PLUGINS"]) + self.assertNotIn("KEY", config) + + def test_patches(self) -> None: + plugins_v0.DictPlugin( + {"name": "plugin1", "patches": {"patch1": "Hello {{ ID }}"}} + ) + plugins.load_all(["plugin1"]) + patches = list(plugins.iter_patches("patch1")) + self.assertEqual(["Hello {{ ID }}"], patches) + + def test_plugin_without_patches(self) -> None: + plugins_v0.DictPlugin({"name": "plugin1"}) + plugins.load_all(["plugin1"]) + patches = list(plugins.iter_patches("patch1")) + self.assertEqual([], patches) + + def test_configure(self) -> None: + plugins_v0.DictPlugin( + { + "name": "plugin1", + "config": { + "add": {"PARAM1": "value1", "PARAM2": "value2"}, + "set": {"PARAM3": "value3"}, + "defaults": {"PARAM4": "value4"}, + }, + } + ) + plugins.load("plugin1") + + base = tutor_config.get_base() + defaults = tutor_config.get_defaults() + + self.assertEqual(base["PARAM3"], "value3") + self.assertEqual(base["PLUGIN1_PARAM1"], "value1") + self.assertEqual(base["PLUGIN1_PARAM2"], "value2") + self.assertEqual(defaults["PLUGIN1_PARAM4"], "value4") + + def test_configure_set_does_not_override(self) -> None: + config: Config = {"ID1": "oldid"} + + plugins_v0.DictPlugin( + {"name": "plugin1", "config": {"set": {"ID1": "newid", "ID2": "id2"}}} + ) + plugins.load("plugin1") + tutor_config.update_with_base(config) + + self.assertEqual("oldid", config["ID1"]) + self.assertEqual("id2", config["ID2"]) + + def test_configure_set_random_string(self) -> None: + plugins_v0.DictPlugin( + { + "name": "plugin1", + "config": {"set": {"PARAM1": "{{ 128|random_string }}"}}, + } + ) + plugins.load("plugin1") + config = tutor_config.get_base() + tutor_config.render_full(config) + + self.assertEqual(128, len(get_typed(config, "PARAM1", str))) + + def test_configure_default_value_with_previous_definition(self) -> None: + config: Config = {"PARAM1": "value"} + plugins_v0.DictPlugin( + {"name": "plugin1", "config": {"defaults": {"PARAM2": "{{ PARAM1 }}"}}} + ) + plugins.load("plugin1") + tutor_config.update_with_defaults(config) + self.assertEqual("{{ PARAM1 }}", config["PLUGIN1_PARAM2"]) + + def test_config_load_from_plugins(self) -> None: + config: Config = {} + + plugins_v0.DictPlugin( + {"name": "plugin1", "config": {"add": {"PARAM1": "{{ 10|random_string }}"}}} + ) + plugins.load("plugin1") + + tutor_config.update_with_base(config) + tutor_config.update_with_defaults(config) + tutor_config.render_full(config) + value1 = get_typed(config, "PLUGIN1_PARAM1", str) + + self.assertEqual(10, len(value1)) + + def test_init_tasks(self) -> None: + plugins_v0.DictPlugin({"name": "plugin1", "hooks": {"init": ["myclient"]}}) + with patch.object( + plugins_v0.env, "read_template_file", return_value="echo hello" + ) as mock_read_template: + plugins.load("plugin1") + mock_read_template.assert_called_once_with( + "plugin1", "hooks", "myclient", "init" + ) + + self.assertIn( + ("myclient", "echo hello"), + list(hooks.Filters.CLI_DO_INIT_TASKS.iterate()), + ) + + def test_plugins_are_updated_on_config_change(self) -> None: + config: Config = {} + plugins_v0.DictPlugin({"name": "plugin1"}) + tutor_config.enable_plugins(config) + plugins1 = list(plugins.iter_loaded()) + config["PLUGINS"] = ["plugin1"] + tutor_config.enable_plugins(config) + plugins2 = list(plugins.iter_loaded()) + + self.assertEqual([], plugins1) + self.assertEqual(1, len(plugins2)) + + def test_dict_plugin(self) -> None: + plugin = plugins_v0.DictPlugin( + {"name": "myplugin", "config": {"set": {"KEY": "value"}}, "version": "0.1"} + ) + plugins.load("myplugin") + overriden_items = hooks.Filters.CONFIG_OVERRIDES.apply([]) + versions = list(plugins.iter_info()) + self.assertEqual("myplugin", plugin.name) + self.assertEqual([("myplugin", "0.1")], versions) + self.assertEqual([("KEY", "value")], overriden_items) + + def test_config_disable_plugin(self) -> None: + plugins_v0.DictPlugin( + {"name": "plugin1", "config": {"set": {"KEY1": "value1"}}} + ) + plugins_v0.DictPlugin( + {"name": "plugin2", "config": {"set": {"KEY2": "value2"}}} + ) + plugins.load("plugin1") + plugins.load("plugin2") + + with temporary_root() as root: + config = tutor_config.load_minimal(root) + config_pre = config.copy() + with patch.object(fmt, "STDOUT"): + hooks.Actions.PLUGIN_UNLOADED.do("plugin1", "", config) + config_post = tutor_config.load_minimal(root) + + self.assertEqual("value1", config_pre["KEY1"]) + self.assertEqual("value2", config_pre["KEY2"]) + self.assertNotIn("KEY1", config) + self.assertNotIn("KEY1", config_post) + self.assertEqual("value2", config["KEY2"]) diff --git a/tests/test_serialize.py b/tests/test_serialize.py index 3a78d26268..0923b15e8c 100644 --- a/tests/test_serialize.py +++ b/tests/test_serialize.py @@ -4,32 +4,58 @@ class SerializeTests(unittest.TestCase): - def test_parse_str(self): + def test_parse_str(self) -> None: self.assertEqual("abcd", serialize.parse("abcd")) - def test_parse_int(self): + def test_parse_int(self) -> None: self.assertEqual(1, serialize.parse("1")) - def test_parse_bool(self): + def test_parse_bool(self) -> None: self.assertEqual(True, serialize.parse("true")) self.assertEqual(False, serialize.parse("false")) - def test_parse_null(self): + def test_parse_null(self) -> None: self.assertIsNone(serialize.parse("null")) - def test_parse_invalid_format(self): + def test_parse_invalid_format(self) -> None: self.assertEqual('["abcd"', serialize.parse('["abcd"')) - def test_parse_list(self): + def test_parse_list(self) -> None: self.assertEqual(["abcd"], serialize.parse('["abcd"]')) - def test_parse_weird_chars(self): + def test_parse_weird_chars(self) -> None: self.assertEqual("*@google.com", serialize.parse("*@google.com")) - def test_parse_empty_string(self): + def test_parse_empty_string(self) -> None: self.assertEqual("", serialize.parse("''")) - # def test_dump_null(self):# # Unfortunately, this fails as the output is "null\n...\n" - - -# self.assertEqual("null", serialize.dumps(None)) + def test_parse_key_value(self) -> None: + self.assertEqual(("name", True), serialize.parse_key_value("name=true")) + self.assertEqual(("name", "abcd"), serialize.parse_key_value("name=abcd")) + self.assertEqual(("name", ""), serialize.parse_key_value("name=")) + self.assertIsNone(serialize.parse_key_value("name")) + self.assertEqual(("x", "a=bcd"), serialize.parse_key_value("x=a=bcd")) + self.assertEqual( + ("x", {"key1": {"subkey": "value"}, "key2": {"subkey": "value"}}), + serialize.parse_key_value( + "x=key1:\n subkey: value\nkey2:\n subkey: value" + ), + ) + self.assertEqual( + ("INDIGO_PRIMARY_COLOR", "#225522"), + serialize.parse_key_value("INDIGO_PRIMARY_COLOR=#225522"), + ) + + def test_str_format(self) -> None: + self.assertEqual("true", serialize.str_format(True)) + self.assertEqual("false", serialize.str_format(False)) + self.assertEqual("null", serialize.str_format(None)) + self.assertEqual("éü©", serialize.str_format("éü©")) + self.assertEqual("""[1, 'abcd']""", serialize.str_format([1, "abcd"])) + + def test_load_str_format(self) -> None: + self.assertEqual(True, serialize.load(serialize.str_format(True))) + self.assertEqual(False, serialize.load(serialize.str_format(False))) + self.assertEqual(None, serialize.load(serialize.str_format(None))) + self.assertEqual("éü©", serialize.load(serialize.str_format("éü©"))) + self.assertEqual([1, "abcd"], serialize.load(serialize.str_format([1, "abcd"]))) diff --git a/tests/test_utils.py b/tests/test_utils.py index c056b3df03..5d5a6428ee 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,10 +1,17 @@ +import base64 +import os +import subprocess +import tempfile import unittest +from io import StringIO +from typing import List, Tuple +from unittest.mock import MagicMock, mock_open, patch -from tutor import utils +from tutor import exceptions, utils class UtilsTests(unittest.TestCase): - def test_common_domain(self): + def test_common_domain(self) -> None: self.assertEqual( "domain.com", utils.common_domain("sub1.domain.com", "sub2.domain.com") ) @@ -17,16 +24,253 @@ def test_common_domain(self): "domain.com", utils.common_domain("sub.domain.com", "ub.domain.com") ) - def test_reverse_host(self): + def test_reverse_host(self) -> None: self.assertEqual("com.google.www", utils.reverse_host("www.google.com")) - def test_list_if(self): + def test_list_if(self) -> None: self.assertEqual('["cms"]', utils.list_if([("lms", False), ("cms", True)])) - def test_encrypt_decrypt(self): + def test_encrypt_success(self) -> None: password = "passw0rd" encrypted1 = utils.encrypt(password) encrypted2 = utils.encrypt(password) self.assertNotEqual(encrypted1, encrypted2) self.assertTrue(utils.verify_encrypted(encrypted1, password)) self.assertTrue(utils.verify_encrypted(encrypted2, password)) + + def test_encrypt_fail(self) -> None: + password = "passw0rd" + self.assertFalse(utils.verify_encrypted(password, password)) + + def test_ensure_file_directory_exists(self) -> None: + with tempfile.TemporaryDirectory() as root: + tempPath = os.path.join(root, "tempDir", "tempFile") + utils.ensure_file_directory_exists(tempPath) + self.assertTrue(os.path.exists(os.path.dirname(tempPath))) + + def test_ensure_file_directory_exists_dirExists(self) -> None: + with tempfile.TemporaryDirectory() as root: + tempPath = os.path.join(root, "tempDir") + os.makedirs(tempPath) + self.assertRaises( + exceptions.TutorError, utils.ensure_file_directory_exists, tempPath + ) + + def test_long_to_base64(self) -> None: + self.assertEqual( + b"\x00", base64.urlsafe_b64decode(utils.long_to_base64(0) + "==") + ) + + def test_rsa_key(self) -> None: + key = utils.rsa_private_key(1024) + imported = utils.rsa_import_key(key) + self.assertIsNotNone(imported.e) + self.assertIsNotNone(imported.d) + self.assertIsNotNone(imported.n) + self.assertIsNotNone(imported.p) + self.assertIsNotNone(imported.q) + + def test_is_root(self) -> None: + with patch("sys.platform", "win32"): + with patch.object(utils, "get_user_id", return_value=42): + self.assertFalse(utils.is_root()) + with patch.object(utils, "get_user_id", return_value=0): + self.assertFalse(utils.is_root()) + + with patch("sys.platform", "linux"): + with patch.object(utils, "get_user_id", return_value=42): + self.assertFalse(utils.is_root()) + with patch.object(utils, "get_user_id", return_value=0): + self.assertTrue(utils.is_root()) + + @patch("sys.platform", "win32") + def test_is_root_win32(self) -> None: + result = utils.is_root() + self.assertFalse(result) + + def test_get_user_id(self) -> None: + with patch("os.getuid", return_value=42): + self.assertEqual(42, utils.get_user_id()) + + with patch("sys.platform", new="win32"): + self.assertEqual(0, utils.get_user_id()) + + @patch("sys.platform", "win32") + def test_get_user_id_win32(self) -> None: + result = utils.get_user_id() + self.assertEqual(0, result) + + @patch("sys.stdout", new_callable=StringIO) + @patch("subprocess.Popen", autospec=True) + def test_execute_exit_without_error( + self, mock_popen: MagicMock, mock_stdout: StringIO + ) -> None: + process = mock_popen.return_value + mock_popen.return_value.__enter__.return_value = process + process.wait.return_value = 0 + process.communicate.return_value = ("output", "error") + + result = utils.execute("echo", "") + self.assertEqual(0, result) + self.assertEqual("echo ''\n", mock_stdout.getvalue()) + self.assertEqual(1, process.wait.call_count) + process.kill.assert_not_called() + + @patch("sys.stdout", new_callable=StringIO) + @patch("subprocess.Popen", autospec=True) + def test_execute_nested_command( + self, mock_popen: MagicMock, mock_stdout: StringIO + ) -> None: + process = mock_popen.return_value + mock_popen.return_value.__enter__.return_value = process + process.wait.return_value = 0 + process.communicate.return_value = ("output", "error") + + result = utils.execute("bash", "-c", "echo -n hi") + self.assertEqual(0, result) + self.assertEqual("bash -c 'echo -n hi'\n", mock_stdout.getvalue()) + self.assertEqual(1, process.wait.call_count) + process.kill.assert_not_called() + + @patch("sys.stdout", new_callable=StringIO) + @patch("subprocess.Popen", autospec=True) + def test_execute_exit_with_error( + self, mock_popen: MagicMock, mock_stdout: StringIO + ) -> None: + process = mock_popen.return_value + mock_popen.return_value.__enter__.return_value = process + process.wait.return_value = 1 + process.communicate.return_value = ("output", "error") + + self.assertRaises(exceptions.TutorError, utils.execute, "echo", "") + self.assertEqual("echo ''\n", mock_stdout.getvalue()) + self.assertEqual(1, process.wait.call_count) + process.kill.assert_not_called() + + @patch("sys.stdout", new_callable=StringIO) + @patch("subprocess.Popen", autospec=True) + def test_execute_throw_exception( + self, mock_popen: MagicMock, mock_stdout: StringIO + ) -> None: + process = mock_popen.return_value + mock_popen.return_value.__enter__.return_value = process + process.wait.side_effect = ZeroDivisionError("Exception occurred.") + + self.assertRaises(ZeroDivisionError, utils.execute, "echo", "") + self.assertEqual("echo ''\n", mock_stdout.getvalue()) + self.assertEqual(2, process.wait.call_count) + process.kill.assert_called_once() + + @patch("sys.stdout", new_callable=StringIO) + @patch("subprocess.Popen", autospec=True) + def test_execute_keyboard_interrupt( + self, mock_popen: MagicMock, mock_stdout: StringIO + ) -> None: + process = mock_popen.return_value + mock_popen.return_value.__enter__.return_value = process + process.wait.side_effect = KeyboardInterrupt() + + with self.assertRaises(KeyboardInterrupt): + utils.execute("echo", "") + output = mock_stdout.getvalue() + self.assertIn("echo", output) + self.assertEqual(2, process.wait.call_count) + process.kill.assert_called_once() + + @patch("sys.platform", "win32") + def test_check_macos_docker_memory_win32_should_skip(self) -> None: + utils.check_macos_docker_memory() + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin(self) -> None: + with patch("tutor.utils.open", mock_open(read_data='{"memoryMiB": 4096}')): + utils.check_macos_docker_memory() + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_filenotfound(self) -> None: + with patch("tutor.utils.open", mock_open()) as mock_open_settings: + mock_open_settings.return_value.__enter__.side_effect = FileNotFoundError + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertIn("Error accessing Docker settings file", e.exception.args[0]) + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_json_decode_error(self) -> None: + with patch("tutor.utils.open", mock_open(read_data="invalid")): + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertIn("invalid JSON", e.exception.args[0]) + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_key_error(self) -> None: + with patch("tutor.utils.open", mock_open(read_data="{}")): + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertIn("key 'memoryMiB' not found", e.exception.args[0]) + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_type_error(self) -> None: + with patch( + "tutor.utils.open", mock_open(read_data='{"memoryMiB": "invalidstring"}') + ): + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertIn("Unexpected JSON data", e.exception.args[0]) + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_insufficient_memory(self) -> None: + with patch("tutor.utils.open", mock_open(read_data='{"memoryMiB": 4095}')): + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertEqual( + "Docker is configured to allocate 4095 MiB RAM, less than the recommended 4096 MiB", + e.exception.args[0], + ) + + @patch("sys.platform", "darwin") + def test_check_macos_docker_memory_darwin_encoding_error(self) -> None: + with patch("tutor.utils.open", mock_open()) as mock_open_settings: + mock_open_settings.return_value.__enter__.side_effect = TypeError + with self.assertRaises(exceptions.TutorError) as e: + utils.check_macos_docker_memory() + self.assertIn("Text encoding error", e.exception.args[0]) + + def test_is_http(self) -> None: + self.assertTrue(utils.is_http("http://overhang.io/tutor/main")) + self.assertTrue(utils.is_http("https://overhang.io/tutor/main")) + self.assertFalse(utils.is_http("/home/user/")) + self.assertFalse(utils.is_http("home/user/")) + self.assertFalse(utils.is_http("http-home/user/")) + + @patch("subprocess.run") + def test_is_docker_rootless(self, mock_run: MagicMock) -> None: + # Mock rootless `docker info` output + utils.is_docker_rootless.cache_clear() + mock_run.return_value.stdout = "some prefix\n rootless foo bar".encode("utf-8") + self.assertTrue(utils.is_docker_rootless()) + + # Mock regular `docker info` output + utils.is_docker_rootless.cache_clear() + mock_run.return_value.stdout = "some prefix, regular docker".encode("utf-8") + self.assertFalse(utils.is_docker_rootless()) + + @patch("subprocess.run") + def test_is_docker_rootless_podman(self, mock_run: MagicMock) -> None: + """Test the `is_docker_rootless` when podman is used or any other error with `docker info`""" + utils.is_docker_rootless.cache_clear() + mock_run.side_effect = subprocess.CalledProcessError(1, "docker info") + self.assertFalse(utils.is_docker_rootless()) + + def test_format_table(self) -> None: + rows: List[Tuple[str, ...]] = [ + ("a", "xyz", "value 1"), + ("abc", "x", "value 12345"), + ] + formatted = utils.format_table(rows, separator=" ") + self.assertEqual( + """ +a xyz value 1 +abc x value 12345 +""".strip(), + formatted, + ) diff --git a/tutor.spec b/tutor.spec index f089dab80a..035a564c68 100644 --- a/tutor.spec +++ b/tutor.spec @@ -1,28 +1,29 @@ # -*- mode: python -*- import importlib import os -import pkg_resources -import wcwidth +import importlib_metadata block_cipher = None datas = [("./tutor/templates", "./tutor/templates")] hidden_imports = [] -# Fix missing wcwidth/version.json file -datas.append((os.path.dirname(wcwidth.__file__), 'wcwidth')) - # Auto-discover plugins and include patches & templates folders -for entrypoint in pkg_resources.iter_entry_points("tutor.plugin.v0"): - plugin_name = entrypoint.name - plugin = entrypoint.load() - plugin_root = os.path.dirname(plugin.__file__) - plugin_root_module_name = os.path.basename(plugin_root) - hidden_imports.append(entrypoint.module_name) - for folder in ["patches", "templates"]: - path = os.path.join(plugin_root, folder) - if os.path.exists(path): - datas.append((path, os.path.join(plugin_root_module_name, folder))) +for entrypoint_version in ["tutor.plugin.v0", "tutor.plugin.v1"]: + for entrypoint in importlib_metadata.entry_points(group=entrypoint_version): + plugin_name = entrypoint.name + try: + plugin = importlib.import_module(entrypoint.value) + except Exception as e: + print(f"ERROR Failed to load plugin {plugin_name}: {e}") + continue + plugin_root = os.path.dirname(plugin.__file__) + plugin_root_module_name = os.path.basename(plugin_root) + hidden_imports.append(entrypoint.module) + for folder in ["patches", "templates"]: + path = os.path.join(plugin_root, folder) + if os.path.exists(path): + datas.append((path, os.path.join(plugin_root_module_name, folder))) # Fix license import: if we don't declare some modules, pyinstaller does not find them hidden_imports.append("tutorlts.__about__") hidden_imports.append("Crypto.Cipher.AES") diff --git a/tutor/__about__.py b/tutor/__about__.py index d0051a1567..fb9d827646 100644 --- a/tutor/__about__.py +++ b/tutor/__about__.py @@ -1 +1,28 @@ -__version__ = "11.0.4" +import os + +# Increment this version number to trigger a new release. See +# docs/tutor.html#versioning for information on the versioning scheme. +__version__ = "18.1.4" + +# The version suffix will be appended to the actual version, separated by a +# dash. Use this suffix to differentiate between the actual released version and +# the versions from other branches. For instance: set the suffix to "nightly" in +# the nightly branch. +# The suffix is cleanly separated from the __version__ in this module to avoid +# conflicts when merging branches. +__version_suffix__ = "" + +# The app name will be used to define the name of the default tutor root and +# plugin directory. To avoid conflicts between multiple locally-installed +# versions, if it is defined the version suffix will also be appended to the app +# name. +__app__ = os.environ.get("TUTOR_APP", "tutor") + +# Package version, as installed by pip, does not include the version suffix. +# Otherwise, nightly plugins will automatically install non-nightly Tutor +# version. +__package_version__ = __version__ + +if __version_suffix__: + __version__ += "-" + __version_suffix__ + __app__ += "-" + __version_suffix__ diff --git a/tutor/bindmount.py b/tutor/bindmount.py new file mode 100644 index 0000000000..e72521194a --- /dev/null +++ b/tutor/bindmount.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import os +import re +import typing as t +from functools import lru_cache + +from tutor import hooks, types + + +def get_mounts(config: types.Config) -> list[str]: + return types.get_typed(config, "MOUNTS", list) + + +def iter_mounts(user_mounts: list[str], *names: str) -> t.Iterable[str]: + """ + Iterate on the bind-mounts that are available to any given compose service. The list + of bind-mounts is parsed from `user_mounts` and we yield only those for service + `name`. + + Calling this function multiple times makes repeated calls to the parsing functions, + but that's OK because their result is cached. + """ + for user_mount in user_mounts: + for service, host_path, container_path in parse_mount(user_mount): + if service in names: + yield f"{host_path}:{container_path}" + + +def parse_mount(value: str) -> list[tuple[str, str, str]]: + """ + Parser for mount arguments of the form + "service1[,service2,...]:/host/path:/container/path" (explicit) or "/host/path". + + Returns a list of (service, host_path, container_path) tuples. + """ + mounts = parse_explicit_mount(value) or parse_implicit_mount(value) + return mounts + + +@lru_cache(maxsize=None) +def parse_explicit_mount(value: str) -> list[tuple[str, str, str]]: + """ + Argument is of the form "containers:/host/path:/container/path". + """ + # Note that this syntax does not allow us to include colon ':' characters in paths + match = re.match( + r"(?P[a-zA-Z0-9-_, ]+):(?P[^:]+):(?P[^:]+)", + value, + ) + if not match: + return [] + + mounts: list[tuple[str, str, str]] = [] + services: list[str] = [service.strip() for service in match["services"].split(",")] + host_path = os.path.abspath(os.path.expanduser(match["host_path"])) + host_path = host_path.replace(os.path.sep, "/") + container_path = match["container_path"] + for service in services: + if service: + mounts.append((service, host_path, container_path)) + return mounts + + +@lru_cache(maxsize=None) +def parse_implicit_mount(value: str) -> list[tuple[str, str, str]]: + """ + Argument is of the form "/path/to/host/directory" + """ + mounts: list[tuple[str, str, str]] = [] + host_path = os.path.abspath(os.path.expanduser(value)) + for service, container_path in hooks.Filters.COMPOSE_MOUNTS.iterate( + os.path.basename(host_path) + ): + mounts.append((service, host_path, container_path)) + return mounts diff --git a/tutor/commands/android.py b/tutor/commands/android.py deleted file mode 100644 index a268665761..0000000000 --- a/tutor/commands/android.py +++ /dev/null @@ -1,51 +0,0 @@ -import click - -from .compose import ScriptRunner -from .local import LocalContext -from .. import config as tutor_config -from .. import env as tutor_env -from .. import fmt -from .. import utils - - -@click.group(help="Build an Android app for your Open edX platform [BETA FEATURE]") -def android(): - pass - - -@click.command(help="Build the application") -@click.argument("mode", type=click.Choice(["debug", "release"])) -@click.pass_obj -def build(context, mode): - config = tutor_config.load(context.root) - docker_run(context.root, build_command(config, mode)) - fmt.echo_info( - "The {} APK file is available in {}".format( - mode, tutor_env.data_path(context.root, "android") - ) - ) - - -def build_command(config, target): - gradle_target = { - "debug": "assembleProdDebuggable", - "release": "assembleProdRelease", - }[target] - apk_folder = {"debug": "debuggable", "release": "release"}[target] - - command = """ -sed -i "s/APPLICATION_ID = .*/APPLICATION_ID = \\"{{ LMS_HOST|reverse_host|replace("-", "_") }}\\"/g" constants.gradle -./gradlew {gradle_target} -cp OpenEdXMobile/build/outputs/apk/prod/{apk_folder}/*.apk /openedx/data/""" - command = tutor_env.render_str(config, command) - command = command.format(gradle_target=gradle_target, apk_folder=apk_folder) - return command - - -def docker_run(root, command): - config = tutor_config.load(root) - runner = ScriptRunner(root, config, LocalContext.docker_compose) - runner.run_job("android", command) - - -android.add_command(build) diff --git a/tutor/commands/cli.py b/tutor/commands/cli.py index d6a3bd371e..e434a05e7a 100755 --- a/tutor/commands/cli.py +++ b/tutor/commands/cli.py @@ -1,61 +1,106 @@ -#! /usr/bin/env python3 +from __future__ import annotations + import sys +import typing as t import appdirs import click -import click_repl - -from .android import android -from .config import config_command -from .context import Context -from .dev import dev -from .images import images_command -from .k8s import k8s -from .local import local -from .plugins import plugins_command, add_plugin_commands -from .ui import ui -from .webui import webui -from ..__about__ import __version__ -from .. import exceptions -from .. import fmt -from .. import utils - - -def main(): + +from tutor import exceptions, fmt, hooks, utils +from tutor.__about__ import __app__, __version__ +from tutor.commands.config import config_command +from tutor.commands.context import Context +from tutor.commands.dev import dev +from tutor.commands.images import images_command +from tutor.commands.k8s import k8s +from tutor.commands.local import local +from tutor.commands.mounts import mounts_command +from tutor.commands.plugins import plugins_command + + +def main() -> None: try: - click_repl.register_repl(cli, name="ui") - cli.add_command(images_command) - cli.add_command(config_command) - cli.add_command(local) - cli.add_command(dev) - cli.add_command(android) - cli.add_command(k8s) - cli.add_command(ui) - cli.add_command(webui) - cli.add_command(print_help) - cli.add_command(plugins_command) - add_plugin_commands(cli) + # Everyone on board + # Note that this action should not be triggered in the module scope, because it + # makes it difficult for tests to rollback changes. + hooks.Actions.CORE_READY.do() cli() # pylint: disable=no-value-for-parameter except KeyboardInterrupt: pass except exceptions.TutorError as e: - fmt.echo_error("Error: {}".format(e.args[0])) + fmt.echo_error(f"Error: {e.args[0]}") sys.exit(1) -@click.group(context_settings={"help_option_names": ["-h", "--help", "help"]}) +class TutorCli(click.Group): + """ + Dynamically load subcommands at runtime. + + This is necessary to load plugin subcommands, based on the list of enabled + plugins (and thus of config.yml). + Docs: https://click.palletsprojects.com/en/latest/commands/#custom-multi-commands + """ + + IS_ROOT_READY = False + + def get_command( + self, ctx: click.Context, cmd_name: str + ) -> t.Optional[click.Command]: + """ + This is run when passing a command from the CLI. E.g: tutor config ... + """ + self.ensure_plugins_enabled(ctx) + return super().get_command(ctx, cmd_name=cmd_name) + + def list_commands(self, ctx: click.Context) -> list[str]: + """ + This is run in the following cases: + - shell autocompletion: tutor + - print help: tutor, tutor -h + """ + self.ensure_plugins_enabled(ctx) + return super().list_commands(ctx) + + def ensure_plugins_enabled(self, ctx: click.Context) -> None: + """ + We enable plugins as soon as possible to have access to commands. + """ + if not "root" in ctx.params: + # When generating docs, this function is called with empty args. + # That's ok, we just ignore it. + return + if not self.IS_ROOT_READY: + hooks.Actions.PROJECT_ROOT_READY.do(ctx.params["root"]) + self.IS_ROOT_READY = True + for cmd in hooks.Filters.CLI_COMMANDS.iterate(): + self.add_command(cmd) + + +@click.group( + cls=TutorCli, + invoke_without_command=True, + add_help_option=False, # Context is incorrectly loaded when help option is automatically added + help="Tutor is the Docker-based Open edX distribution designed for peace of mind.", +) @click.version_option(version=__version__) @click.option( "-r", "--root", envvar="TUTOR_ROOT", - default=appdirs.user_data_dir(appname="tutor"), + default=appdirs.user_data_dir(appname=__app__), show_default=True, type=click.Path(resolve_path=True), help="Root project directory (environment variable: TUTOR_ROOT)", ) +@click.option( + "-h", + "--help", + "show_help", + is_flag=True, + help="Print this help", +) @click.pass_context -def cli(context, root): +def cli(context: click.Context, root: str, show_help: bool) -> None: if utils.is_root(): fmt.echo_alert( "You are running Tutor as root. This is strongly not recommended. If you are doing this in order to access" @@ -63,12 +108,29 @@ def cli(context, root): "/install/linux/linux-postinstall/#manage-docker-as-a-non-root-user)" ) context.obj = Context(root) + context.help_option_names = ["-h", "--help"] + if context.invoked_subcommand is None or show_help: + click.echo(context.get_help()) @click.command(help="Print this help", name="help") -def print_help(): - with click.Context(cli) as context: - click.echo(cli.get_help(context)) +@click.pass_context +def help_command(context: click.Context) -> None: + context.invoke(cli, show_help=True) + + +hooks.Filters.CLI_COMMANDS.add_items( + [ + config_command, + dev, + help_command, + images_command, + k8s, + local, + mounts_command, + plugins_command, + ] +) if __name__ == "__main__": diff --git a/tutor/commands/compose.py b/tutor/commands/compose.py index 0e346f2db0..6c156ff210 100644 --- a/tutor/commands/compose.py +++ b/tutor/commands/compose.py @@ -1,95 +1,266 @@ +from __future__ import annotations + +import os +import typing as t + import click -from .. import config as tutor_config -from .. import env as tutor_env -from .. import fmt -from .. import scripts -from .. import serialize -from .. import utils +from tutor import bindmount +from tutor import config as tutor_config +from tutor import env as tutor_env +from tutor import fmt, hooks +from tutor import interactive as interactive_config +from tutor import utils +from tutor.commands import images, jobs +from tutor.commands.config import save as config_save_command +from tutor.commands.context import BaseTaskContext +from tutor.commands.upgrade import OPENEDX_RELEASE_NAMES +from tutor.commands.upgrade.compose import upgrade_from +from tutor.core.hooks import Filter # pylint: disable=unused-import +from tutor.exceptions import TutorError +from tutor.tasks import BaseComposeTaskRunner +from tutor.types import Config -class ScriptRunner(scripts.BaseRunner): - def __init__(self, root, config, docker_compose_func): +class ComposeTaskRunner(BaseComposeTaskRunner): + def __init__(self, root: str, config: Config): super().__init__(root, config) - self.docker_compose_func = docker_compose_func + self.project_name = "" + self.docker_compose_files: list[str] = [] + self.docker_compose_job_files: list[str] = [] - def run_job(self, service, command): + def docker_compose(self, *command: str) -> int: """ - Run the "{{ service }}-job" service from local/docker-compose.jobs.yml with the - specified command. For backward-compatibility reasons, if the corresponding - service does not exist, run the service from good old regular - docker-compose.yml. + Run docker-compose with the right yml files. """ - jobs_path = tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.yml") - job_service_name = "{}-job".format(service) - opts = [] if utils.is_a_tty() else ["-T"] - if job_service_name in serialize.load(open(jobs_path).read())["services"]: - self.docker_compose_func( - self.root, - self.config, - "-f", - jobs_path, - "run", - *opts, - "--rm", - job_service_name, - "sh", - "-e", - "-c", - command, + if "start" in command or "up" in command or "restart" in command: + # Note that we don't trigger the action on "run". That's because we + # don't want to trigger the action for every initialization script. + hooks.Actions.COMPOSE_PROJECT_STARTED.do( + self.root, self.config, self.project_name ) - else: - fmt.echo_alert( - ( - "The '{job_service_name}' service does not exist in {jobs_path}. " - "This might be caused by an older plugin. Tutor switched to a job " - "runner model for running one-time commands, such as database" - " initialisation. For the record, this is the command that we are " - "running:\n" - "\n" - " {command}\n" - "\n" - "Old-style job running will be deprecated soon. Please inform " - "your plugin maintainer!" - ).format( - job_service_name=job_service_name, - jobs_path=jobs_path, - command=command.replace("\n", "\n "), - ) + args = [] + for docker_compose_path in self.docker_compose_files: + if os.path.exists(docker_compose_path): + args += ["-f", docker_compose_path] + return utils.docker_compose( + *args, "--project-name", self.project_name, *command + ) + + def run_task(self, service: str, command: str) -> int: + """ + Run the "{{ service }}-job" service from local/docker-compose.jobs.yml with the + specified command. + """ + run_command = [] + for docker_compose_path in self.docker_compose_job_files: + path = tutor_env.pathjoin(self.root, docker_compose_path) + if os.path.exists(path): + run_command += ["-f", path] + run_command += ["run", "--rm"] + if not utils.is_a_tty(): + run_command += ["-T"] + job_service_name = f"{service}-job" + return self.docker_compose( + *run_command, + job_service_name, + "sh", + "-e", + "-c", + command, + ) + + +class BaseComposeContext(BaseTaskContext): + NAME: t.Literal["local", "dev"] + OPENEDX_SERVICES: list[str] = ["lms", "cms"] + + def job_runner(self, config: Config) -> ComposeTaskRunner: + raise NotImplementedError + + +@click.command(help="Configure and run Open edX from scratch") +@click.option("-I", "--non-interactive", is_flag=True, help="Run non-interactively") +@click.option("-p", "--pullimages", is_flag=True, help="Update docker images") +@click.option("--skip-build", is_flag=True, help="Skip building Docker images") +@click.pass_context +def launch( + context: click.Context, + non_interactive: bool, + pullimages: bool, + skip_build: bool, +) -> None: + context_name = context.obj.NAME + run_for_prod = False if context_name == "dev" else None + + utils.warn_macos_docker_memory() + + # Upgrade has to run before configuration + interactive_upgrade(context, not non_interactive, run_for_prod=run_for_prod) + interactive_configuration(context, not non_interactive, run_for_prod=run_for_prod) + + config = tutor_config.load(context.obj.root) + + if not skip_build: + click.echo(fmt.title("Building Docker images")) + images_to_build = hooks.Filters.IMAGES_BUILD_REQUIRED.apply([], context_name) + if not images_to_build: + fmt.echo_info("No image to build") + context.invoke(images.build, image_names=images_to_build) + + click.echo(fmt.title("Stopping any existing platform")) + context.invoke(stop) + + if pullimages: + click.echo(fmt.title("Docker image updates")) + context.invoke(dc_command, command="pull") + + click.echo(fmt.title("Starting the platform in detached mode")) + context.invoke(start, detach=True) + + click.echo(fmt.title("Database creation and migrations")) + context.invoke(do.commands["init"]) + + # Print the urls of the user-facing apps + public_app_hosts = "" + for host in hooks.Filters.APP_PUBLIC_HOSTS.iterate(context_name): + public_app_host = tutor_env.render_str( + config, "{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://" + host + ) + public_app_hosts += f" {public_app_host}\n" + if public_app_hosts: + fmt.echo_info( + f"""The platform is now running and can be accessed at the following urls: + +{public_app_hosts}""" + ) + + +def interactive_upgrade( + context: click.Context, interactive: bool, run_for_prod: t.Optional[bool] +) -> None: + """ + Piece of code that is only used in launch. + """ + run_upgrade_from_release = tutor_env.should_upgrade_from_release(context.obj.root) + if run_upgrade_from_release is not None: + click.echo(fmt.title("Upgrading from an older release")) + if interactive: + to_release = tutor_env.get_current_open_edx_release_name() + question = f"""You are about to upgrade your Open edX platform from {run_upgrade_from_release.capitalize()} to {to_release.capitalize()} + +It is strongly recommended to make a backup before upgrading. To do so, run: + + tutor local stop # or 'tutor dev stop' in development + sudo rsync -avr "$(tutor config printroot)"/ /tmp/tutor-backup/ + +In case of problem, to restore your backup you will then have to run: sudo rsync -avr /tmp/tutor-backup/ "$(tutor config printroot)"/ + +Are you sure you want to continue?""" + click.confirm( + fmt.question(question), default=True, abort=True, prompt_suffix=" " ) - self.docker_compose_func( - self.root, - self.config, - "run", - *opts, - "--rm", - service, - "sh", - "-e", - "-c", - command, + context.invoke( + upgrade, + from_release=run_upgrade_from_release, + ) + + # Update env and configuration + # Don't run in interactive mode, otherwise users gets prompted twice. + interactive_configuration(context, False, run_for_prod) + + # Post upgrade + if interactive: + question = f"""Your platform is being upgraded from {run_upgrade_from_release.capitalize()}. + +If you run custom Docker images, you must rebuild them now by running the following command in a different shell: + + tutor images build all # list your custom images here + +See the documentation for more information: + + https://docs.tutor.edly.io/install.html#upgrading-to-a-new-open-edx-release + +Press enter when you are ready to continue""" + click.confirm( + fmt.question(question), default=True, abort=True, prompt_suffix=" " ) -@click.command(help="Run all or a selection of configured Open edX services") +def interactive_configuration( + context: click.Context, + interactive: bool, + run_for_prod: t.Optional[bool] = None, +) -> None: + config = tutor_config.load_minimal(context.obj.root) + if interactive: + click.echo(fmt.title("Interactive platform configuration")) + interactive_config.ask_questions( + config, + run_for_prod=run_for_prod, + ) + tutor_config.save_config_file(context.obj.root, config) + config = tutor_config.load_full(context.obj.root) + tutor_env.save(context.obj.root, config) + + +@click.command( + short_help="Perform release-specific upgrade tasks", + help="Perform release-specific upgrade tasks. To perform a full upgrade remember to run `launch`.", +) +@click.option( + "--from", + "from_release", + type=click.Choice(OPENEDX_RELEASE_NAMES), +) +@click.pass_context +def upgrade(context: click.Context, from_release: t.Optional[str]) -> None: + fmt.echo_alert( + "This command only performs a partial upgrade of your Open edX platform. " + "To perform a full upgrade, you should run `tutor local launch` (or `tutor dev launch` " + "in development)." + ) + if from_release is None: + from_release = tutor_env.get_env_release(context.obj.root) + if from_release is None: + fmt.echo_info("Your environment is already up-to-date") + else: + upgrade_from(context, from_release) + # We update the environment to update the version + context.invoke(config_save_command) + + +@click.command( + short_help="Run all or a selection of services.", + help="Run all or a selection of services. Docker images will be rebuilt where necessary.", +) +@click.option("--build", is_flag=True, help="Build images on start") @click.option("-d", "--detach", is_flag=True, help="Start in daemon mode") @click.argument("services", metavar="service", nargs=-1) @click.pass_obj -def start(context, detach, services): +def start( + context: BaseComposeContext, + build: bool, + detach: bool, + services: list[str], +) -> None: command = ["up", "--remove-orphans"] + if build: + command.append("--build") if detach: command.append("-d") + # Start services config = tutor_config.load(context.root) - context.docker_compose(context.root, config, *command, *services) + context.job_runner(config).docker_compose(*command, *services) @click.command(help="Stop a running platform") @click.argument("services", metavar="service", nargs=-1) @click.pass_obj -def stop(context, services): +def stop(context: BaseComposeContext, services: list[str]) -> None: config = tutor_config.load(context.root) - context.docker_compose(context.root, config, "stop", *services) + context.job_runner(config).docker_compose("stop", *services) @click.command( @@ -98,21 +269,22 @@ def stop(context, services): ) @click.option("-d", "--detach", is_flag=True, help="Start in daemon mode") @click.argument("services", metavar="service", nargs=-1) -def reboot(detach, services): - stop.callback(services) - start.callback(detach, services) +@click.pass_context +def reboot(context: click.Context, detach: bool, services: list[str]) -> None: + context.invoke(stop, services=services) + context.invoke(start, detach=detach, services=services) @click.command( short_help="Restart some components from a running platform.", help="""Specify 'openedx' to restart the lms, cms and workers, or 'all' to -restart all services. Note that this performs a 'docker-compose restart', so new images +restart all services. Note that this performs a 'docker compose restart', so new images may not be taken into account. It is useful for reloading settings, for instance. To fully stop the platform, use the 'reboot' command.""", ) @click.argument("services", metavar="service", nargs=-1) @click.pass_obj -def restart(context, services): +def restart(context: BaseComposeContext, services: list[str]) -> None: config = tutor_config.load(context.root) command = ["restart"] if "all" in services: @@ -120,103 +292,89 @@ def restart(context, services): else: for service in services: if service == "openedx": - if config["RUN_LMS"]: - command += ["lms", "lms-worker"] - if config["RUN_CMS"]: - command += ["cms", "cms-worker"] + command += context.OPENEDX_SERVICES else: command.append(service) - context.docker_compose(context.root, config, *command) + context.job_runner(config).docker_compose(*command) -@click.command(help="Initialise all applications") -@click.option("-l", "--limit", help="Limit initialisation to this service or plugin") -@click.pass_obj -def init(context, limit): - config = tutor_config.load(context.root) - runner = ScriptRunner(context.root, config, context.docker_compose) - scripts.initialise(runner, limit_to=limit) - - -@click.command(help="Create an Open edX user and interactively set their password") -@click.option("--superuser", is_flag=True, help="Make superuser") -@click.option("--staff", is_flag=True, help="Make staff user") -@click.option( - "-p", - "--password", - help="Specify password from the command line. If undefined, you will be prompted to input a password", -) -@click.argument("name") -@click.argument("email") -@click.pass_obj -def createuser(context, superuser, staff, password, name, email): - config = tutor_config.load(context.root) - runner = ScriptRunner(context.root, config, context.docker_compose) - command = scripts.create_user_command( - superuser, staff, name, email, password=password - ) - runner.run_job("lms", command) +@jobs.do_group +def do() -> None: + """ + Run a custom job in the right container(s). + """ @click.command( - help="Set a theme for a given domain name. To reset to the default theme , use 'default' as the theme name." -) -@click.argument("theme_name") -@click.argument("domain_names", metavar="domain_name", nargs=-1) -@click.pass_obj -def settheme(context, theme_name, domain_names): - config = tutor_config.load(context.root) - runner = ScriptRunner(context.root, config, context.docker_compose) - for domain_name in domain_names: - scripts.set_theme(theme_name, domain_name, runner) - - -@click.command(help="Import the demo course") -@click.pass_obj -def importdemocourse(context): - config = tutor_config.load(context.root) - runner = ScriptRunner(context.root, config, context.docker_compose) - fmt.echo_info("Importing demo course") - scripts.import_demo_course(runner) - - -@click.command( - short_help="Direct interface to docker-compose.", + short_help="Run a command in a new container", help=( - "Direct interface to docker-compose. This is a wrapper around `docker-compose`. All commands, options and" - " arguments passed to this command will be forwarded to docker-compose." + "Run a command in a new container. This is a wrapper around `docker compose run`. Any option or argument passed" + " to this command will be forwarded to docker compose. Thus, you may use `-v` or `-p` to mount volumes and" + " expose ports." ), context_settings={"ignore_unknown_options": True}, - name="dc", ) @click.argument("args", nargs=-1, required=True) -@click.pass_obj -def dc_command(context, args): - config = tutor_config.load(context.root) - context.docker_compose(context.root, config, *args) +@click.pass_context +def run( + context: click.Context, + args: list[str], +) -> None: + extra_args = ["--rm"] + if not utils.is_a_tty(): + extra_args.append("-T") + context.invoke(dc_command, command="run", args=[*extra_args, *args]) @click.command( - short_help="Run a command in a new container", - help=( - "Run a command in a new container. This is a wrapper around `docker-compose run`. Any option or argument passed" - " to this command will be forwarded to docker-compose. Thus, you may use `-v` or `-p` to mount volumes and" - " expose ports." - ), - context_settings={"ignore_unknown_options": True}, + name="copyfrom", + help="Copy files/folders from a container directory to the local filesystem.", ) -@click.argument("args", nargs=-1, required=True) -def run(args): - command = ["run", "--rm"] - if not utils.is_a_tty(): - command.append("-T") - dc_command.callback([*command, *args]) +@click.argument("service") +@click.argument("container_path") +@click.argument( + "host_path", + type=click.Path(dir_okay=True, file_okay=False, resolve_path=True), +) +@click.pass_obj +def copyfrom( + context: BaseComposeContext, service: str, container_path: str, host_path: str +) -> None: + # Path management + container_root_path = "/tmp/mount" + container_dst_path = container_root_path + if not os.path.exists(host_path): + # Emulate cp semantics, where if the destination path does not exist + # then we copy to its parent and rename to the destination folder + container_dst_path += "/" + os.path.basename(host_path) + host_path = os.path.dirname(host_path) + if not os.path.exists(host_path): + raise TutorError( + f"Cannot create directory {host_path}. No such file or directory." + ) + + # cp/mv commands + command = f"cp --recursive --preserve {container_path} {container_dst_path}" + config = tutor_config.load(context.root) + runner = context.job_runner(config) + runner.docker_compose( + "run", + "--rm", + "--no-deps", + "--user=0", + f"--volume={host_path}:{container_root_path}", + service, + "sh", + "-e", + "-c", + command, + ) @click.command( short_help="Run a command in a running container", help=( - "Run a command in a running container. This is a wrapper around `docker-compose exec`. Any option or argument" + "Run a command in a running container. This is a wrapper around `docker compose exec`. Any option or argument" " passed to this command will be forwarded to docker-compose. Thus, you may use `-e` to manually define" " environment variables." ), @@ -224,37 +382,74 @@ def run(args): name="exec", ) @click.argument("args", nargs=-1, required=True) -def execute(args): - dc_command.callback(["exec", *args]) +@click.pass_context +def execute(context: click.Context, args: list[str]) -> None: + context.invoke(dc_command, command="exec", args=args) @click.command( short_help="View output from containers", - help="View output from containers. This is a wrapper around `docker-compose logs`.", + help="View output from containers. This is a wrapper around `docker compose logs`.", ) @click.option("-f", "--follow", is_flag=True, help="Follow log output") @click.option("--tail", type=int, help="Number of lines to show from each container") @click.argument("service", nargs=-1) -def logs(follow, tail, service): - command = ["logs"] +@click.pass_context +def logs(context: click.Context, follow: bool, tail: bool, service: str) -> None: + args = [] if follow: - command += ["--follow"] + args.append("--follow") if tail is not None: - command += ["--tail", str(tail)] - command += service - dc_command.callback(command) + args += ["--tail", str(tail)] + args += service + context.invoke(dc_command, command="logs", args=args) + + +@click.command(help="Print status information for containers") +@click.pass_context +def status(context: click.Context) -> None: + context.invoke(dc_command, command="ps") + + +@click.command( + short_help="Direct interface to docker compose.", + help=( + "Direct interface to docker compose. This is a wrapper around `docker compose`. Most commands, options and" + " arguments passed to this command will be forwarded as-is to docker compose." + ), + context_settings={"ignore_unknown_options": True}, + name="dc", +) +@click.argument("command") +@click.argument("args", nargs=-1) +@click.pass_obj +def dc_command( + context: BaseComposeContext, + command: str, + args: list[str], +) -> None: + config = tutor_config.load(context.root) + context.job_runner(config).docker_compose(command, *args) -def add_commands(command_group): +hooks.Filters.ENV_TEMPLATE_VARIABLES.add_item(("iter_mounts", bindmount.iter_mounts)) + + +def add_commands(command_group: click.Group) -> None: + command_group.add_command(launch) + command_group.add_command(upgrade) command_group.add_command(start) command_group.add_command(stop) command_group.add_command(restart) command_group.add_command(reboot) - command_group.add_command(init) - command_group.add_command(createuser) - command_group.add_command(importdemocourse) - command_group.add_command(settheme) command_group.add_command(dc_command) command_group.add_command(run) + command_group.add_command(copyfrom) command_group.add_command(execute) command_group.add_command(logs) + command_group.add_command(status) + + @hooks.Actions.PLUGINS_LOADED.add() + def _add_do_commands() -> None: + jobs.add_job_commands(do) + command_group.add_command(do) diff --git a/tutor/commands/config.py b/tutor/commands/config.py index 76bf2f23f2..068b8702a6 100644 --- a/tutor/commands/config.py +++ b/tutor/commands/config.py @@ -1,11 +1,18 @@ +from __future__ import annotations + +import json +import typing as t + import click +import click.shell_completion -from .. import config as tutor_config -from .. import env -from .. import exceptions -from .. import fmt -from .. import interactive as interactive_config -from .. import serialize +from tutor import config as tutor_config +from tutor import env, exceptions, fmt, hooks +from tutor import interactive as interactive_config +from tutor import serialize +from tutor.commands.context import Context +from tutor.commands.params import ConfigLoaderParam +from tutor.types import Config, ConfigValue @click.group( @@ -13,23 +20,80 @@ short_help="Configure Open edX", help="""Configure Open edX and store configuration values in $TUTOR_ROOT/config.yml""", ) -def config_command(): +def config_command() -> None: pass -class YamlParamType(click.ParamType): - name = "yaml" +class ConfigKeyParamType(ConfigLoaderParam): + name = "configkey" + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> list[click.shell_completion.CompletionItem]: + return [ + click.shell_completion.CompletionItem(key) + for key, _value in self._shell_complete_config_items(incomplete) + ] + + def _shell_complete_config_items( + self, incomplete: str + ) -> list[tuple[str, ConfigValue]]: + return [ + (key, value) + for key, value in self._candidate_config_items() + if key.startswith(incomplete) + ] + + def _candidate_config_items(self) -> t.Iterable[tuple[str, ConfigValue]]: + yield from self.config.items() + + +class ConfigKeyValParamType(ConfigKeyParamType): + """ + Parser for = command line arguments. + """ + + name = "configkeyval" + + def convert(self, value: str, param: t.Any, ctx: t.Any) -> tuple[str, t.Any]: + result = serialize.parse_key_value(value) + if result is None: + self.fail(f"'{value}' is not of the form 'key=value'.", param, ctx) + return result + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> list[click.shell_completion.CompletionItem]: + """ + Nice and friendly = auto-completion. + """ + if "=" not in incomplete: + # Auto-complete with '='. Note the single quotes which allow users to + # further auto-complete later. + return [ + click.shell_completion.CompletionItem(f"'{key}='") + for key, value in self._shell_complete_config_items(incomplete) + ] + if incomplete.endswith("="): + # raise ValueError(f"incomplete: <{incomplete}>") + # Auto-complete with '=' + return [ + click.shell_completion.CompletionItem(f"{key}={json.dumps(value)}") + for key, value in self._shell_complete_config_items(incomplete[:-1]) + ] + # Else, don't bother + return [] + + +class ConfigListKeyValParamType(ConfigKeyValParamType): + """ + Same as the parent class, but for keys of type `list`. + """ - def convert(self, value, param, ctx): - try: - k, v = value.split("=") - except ValueError: - self.fail("'{}' is not of the form 'key=value'.".format(value), param, ctx) - if not v: - # Empty strings are incorrectly interpreted as null values, which is - # incorrect. - v = "''" - return k, serialize.parse(v) + def _candidate_config_items(self) -> t.Iterable[tuple[str, ConfigValue]]: + for key, val in self.config.items(): + if isinstance(val, list): + yield key, val @click.command(help="Create and save configuration interactively") @@ -38,77 +102,162 @@ def convert(self, value, param, ctx): "-s", "--set", "set_vars", - type=YamlParamType(), + type=ConfigKeyValParamType(), multiple=True, metavar="KEY=VAL", help="Set a configuration value (can be used multiple times)", ) +@click.option( + "-a", + "--append", + "append_vars", + type=ConfigListKeyValParamType(), + multiple=True, + metavar="KEY=VAL", + help="Append an item to a configuration value of type list. The value will only be added if it is not already present. (can be used multiple times)", +) +@click.option( + "-A", + "--remove", + "remove_vars", + type=ConfigListKeyValParamType(), + multiple=True, + metavar="KEY=VAL", + help="Remove an item from a configuration value of type list (can be used multiple times)", +) @click.option( "-U", "--unset", "unset_vars", multiple=True, + type=ConfigKeyParamType(), help="Remove a configuration value (can be used multiple times)", ) +@click.option( + "-e", "--env-only", "env_only", is_flag=True, help="Skip updating config.yml" +) +@click.option( + "-c", + "--clean", + "clean_env", + is_flag=True, + help="Remove everything in the env directory before save", +) @click.pass_obj -def save(context, interactive, set_vars, unset_vars): - config, defaults = interactive_config.load_all( - context.root, interactive=interactive - ) +def save( + context: Context, + interactive: bool, + set_vars: list[tuple[str, t.Any]], + append_vars: list[tuple[str, t.Any]], + remove_vars: list[tuple[str, t.Any]], + unset_vars: list[str], + env_only: bool, + clean_env: bool, +) -> None: + config = tutor_config.load_minimal(context.root) + + # Add question to interactive prompt, such that the environment is automatically + # deleted if necessary in interactive mode. + @hooks.Actions.CONFIG_INTERACTIVE.add() + def _prompt_for_env_deletion(_config: Config) -> None: + if clean_env: + run_clean = click.confirm( + fmt.question("Remove existing Tutor environment directory?"), + prompt_suffix=" ", + default=True, + ) + if run_clean: + env.delete_env_dir(context.root) + + if interactive: + interactive_config.ask_questions(config) + elif clean_env: + env.delete_env_dir(context.root) if set_vars: - tutor_config.merge(config, dict(set_vars), force=True) + for key, value in set_vars: + config[key] = env.render_unknown(config, value) + if append_vars: + config_defaults = tutor_config.load_defaults() + for key, value in append_vars: + if key not in config: + config[key] = config[key] = config.get( + key, config_defaults.get(key, []) + ) + values = config[key] + if not isinstance(values, list): + raise exceptions.TutorError( + f"Could not append value to '{key}': current setting is of type '{values.__class__.__name__}', expected list." + ) + if not isinstance(value, str): + raise exceptions.TutorError( + f"Could not append value to '{key}': appended value is of type '{value.__class__.__name__}', expected str." + ) + if value not in values: + values.append(value) + if remove_vars: + for key, value in remove_vars: + values = config.get(key, []) + if not isinstance(values, list): + raise exceptions.TutorError( + f"Could not remove value from '{key}': current setting is of type '{values.__class__.__name__}', expected list." + ) + while value in values: + values.remove(value) for key in unset_vars: config.pop(key, None) - tutor_config.save_config_file(context.root, config) - tutor_config.merge(config, defaults) - env.save(context.root, config) - - -@click.command(help="Render a template folder with eventual extra configuration files") -@click.option( - "-x", - "--extra-config", - "extra_configs", - multiple=True, - type=click.Path(exists=True, resolve_path=True, dir_okay=False), - help="Load extra configuration file (can be used multiple times)", -) -@click.argument("src", type=click.Path(exists=True, resolve_path=True)) -@click.argument("dst") -@click.pass_obj -def render(context, extra_configs, src, dst): - config = tutor_config.load(context.root) - for extra_config in extra_configs: - tutor_config.merge( - config, tutor_config.load_config_file(extra_config), force=True - ) + if not env_only: + tutor_config.save_config_file(context.root, config) - renderer = env.Renderer(config, [src]) - renderer.render_all_to(dst) - fmt.echo_info("Templates rendered to {}".format(dst)) + # Reload configuration, without version checking + config = tutor_config.load_full(context.root) + env.save(context.root, config) @click.command(help="Print the project root") @click.pass_obj -def printroot(context): +def printroot(context: Context) -> None: click.echo(context.root) @click.command(help="Print a configuration value") -@click.argument("key") +@click.argument("key", type=ConfigKeyParamType()) @click.pass_obj -def printvalue(context, key): +def printvalue(context: Context, key: str) -> None: config = tutor_config.load(context.root) try: - # Note that this will incorrectly print None values - fmt.echo(config[key]) + value = config[key] except KeyError as e: - raise exceptions.TutorError( - "Missing configuration value: {}".format(key) - ) from e + raise exceptions.TutorError(f"Missing configuration value: {key}") from e + fmt.echo(serialize.str_format(value)) + + +@click.group(name="patches", help="Commands related to patches in configurations") +def patches_command() -> None: + pass + + +@click.command(name="list", help="Print all available patches") +@click.pass_obj +def patches_list(context: Context) -> None: + config = tutor_config.load(context.root) + renderer = env.PatchRenderer(config) + renderer.print_patches_locations() + + +@click.command(name="show", help="Print the rendered contents of a template patch") +@click.argument("name") +@click.pass_obj +def patches_show(context: Context, name: str) -> None: + config = tutor_config.load_full(context.root) + renderer = env.Renderer(config) + rendered = renderer.patch(name) + if rendered: + print(rendered) config_command.add_command(save) -config_command.add_command(render) config_command.add_command(printroot) config_command.add_command(printvalue) +patches_command.add_command(patches_list) +patches_command.add_command(patches_show) +config_command.add_command(patches_command) diff --git a/tutor/commands/context.py b/tutor/commands/context.py index dec153133e..abd4ea0352 100644 --- a/tutor/commands/context.py +++ b/tutor/commands/context.py @@ -1,4 +1,30 @@ -# pylint: disable=too-few-public-methods +from tutor.tasks import BaseTaskRunner +from tutor.types import Config + + class Context: - def __init__(self, root): + """ + Context object that is passed to all subcommands. + + The project `root` is passed to all subcommands of `tutor`; that's because + it is defined as an argument of the top-level command. For instance: + + $ tutor --root=... local run ... + """ + + def __init__(self, root: str) -> None: self.root = root + + +class BaseTaskContext(Context): + """ + Specialized context that subcommands may use. + + For instance `dev`, `local` and `k8s` define custom runners to run jobs. + """ + + def job_runner(self, config: Config) -> BaseTaskRunner: + """ + Return a runner capable of running docker-compose/kubectl commands. + """ + raise NotImplementedError diff --git a/tutor/commands/dev.py b/tutor/commands/dev.py index 825f8c4ac7..659e303dab 100644 --- a/tutor/commands/dev.py +++ b/tutor/commands/dev.py @@ -1,66 +1,67 @@ -import os +from __future__ import annotations + +import typing as t import click -from . import compose -from .context import Context -from .. import config as tutor_config -from .. import env as tutor_env -from .. import fmt -from .. import utils - - -# pylint: disable=too-few-public-methods -class DevContext(Context): - @staticmethod - def docker_compose(root, config, *command): - args = [] - for folder in ["local", "dev"]: - # Add docker-compose.yml and docker-compose.override.yml (if it exists) - # from "local" and "dev" folders (but not docker-compose.prod.yml) - args += [ - "-f", - tutor_env.pathjoin(root, folder, "docker-compose.yml"), - ] - override_path = tutor_env.pathjoin( - root, folder, "docker-compose.override.yml" - ) - if os.path.exists(override_path): - args += ["-f", override_path] - return utils.docker_compose( - *args, - "--project-name", - config["DEV_PROJECT_NAME"], - *command, - ) +from tutor import env as tutor_env +from tutor import hooks +from tutor.commands import compose +from tutor.types import Config, get_typed + + +class DevTaskRunner(compose.ComposeTaskRunner): + def __init__(self, root: str, config: Config): + """ + Load docker-compose files from dev/ and local/ + """ + super().__init__(root, config) + self.project_name = get_typed(self.config, "DEV_PROJECT_NAME", str) + self.docker_compose_files += [ + tutor_env.pathjoin(self.root, "local", "docker-compose.yml"), + tutor_env.pathjoin(self.root, "dev", "docker-compose.yml"), + tutor_env.pathjoin(self.root, "local", "docker-compose.override.yml"), + tutor_env.pathjoin(self.root, "dev", "docker-compose.override.yml"), + ] + self.docker_compose_job_files += [ + tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.yml"), + tutor_env.pathjoin(self.root, "dev", "docker-compose.jobs.yml"), + tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.override.yml"), + tutor_env.pathjoin(self.root, "dev", "docker-compose.jobs.override.yml"), + ] + + +class DevContext(compose.BaseComposeContext): + NAME = "dev" + + def job_runner(self, config: Config) -> DevTaskRunner: + return DevTaskRunner(self.root, config) @click.group(help="Run Open edX locally with development settings") @click.pass_context -def dev(context): +def dev(context: click.Context) -> None: context.obj = DevContext(context.obj.root) -@click.command( - help="Run a development server", - context_settings={"ignore_unknown_options": True}, -) -@click.argument("options", nargs=-1, required=False) -@click.argument("service") -@click.pass_obj -def runserver(context, options, service): - config = tutor_config.load(context.root) - if service in ["lms", "cms"]: - port = 8000 if service == "lms" else 8001 - host = config["LMS_HOST"] if service == "lms" else config["CMS_HOST"] - fmt.echo_info( - "The {} service will be available at http://{}:{}".format( - service, host, port - ) - ) - args = ["--service-ports", *options, service] - compose.run.callback(args) - - -dev.add_command(runserver) +@hooks.Actions.COMPOSE_PROJECT_STARTED.add() +def _stop_on_local_start(root: str, config: Config, project_name: str) -> None: + """ + Stop the dev platform as soon as a platform with a different project name is + started. + """ + runner = DevTaskRunner(root, config) + if project_name != runner.project_name: + runner.docker_compose("stop") + + +@hooks.Filters.IMAGES_BUILD_REQUIRED.add() +def _build_openedx_dev_on_launch( + image_names: list[str], context_name: t.Literal["local", "dev"] +) -> list[str]: + if context_name == "dev": + image_names.append("openedx-dev") + return image_names + + compose.add_commands(dev) diff --git a/tutor/commands/images.py b/tutor/commands/images.py index 63bbaf7aae..2a7f53255e 100644 --- a/tutor/commands/images.py +++ b/tutor/commands/images.py @@ -1,37 +1,163 @@ +from __future__ import annotations + +import os +import typing as t + import click -from .. import config as tutor_config -from .. import env as tutor_env -from .. import images -from .. import plugins -from .. import utils - -BASE_IMAGE_NAMES = ["openedx", "forum", "android"] -DEV_IMAGE_NAMES = ["openedx-dev"] -VENDOR_IMAGES = [ - "caddy", - "elasticsearch", - "mongodb", - "mysql", - "nginx", - "redis", - "smtp", +from tutor import bindmount +from tutor import config as tutor_config +from tutor import env as tutor_env +from tutor import exceptions, fmt, hooks, images, utils +from tutor.commands.context import Context +from tutor.commands.params import ConfigLoaderParam +from tutor.core.hooks import Filter +from tutor.types import Config + +BASE_IMAGE_NAMES = [ + ("openedx", "DOCKER_IMAGE_OPENEDX"), + ("permissions", "DOCKER_IMAGE_PERMISSIONS"), ] +@hooks.Filters.IMAGES_BUILD.add() +def _add_core_images_to_build( + build_images: list[tuple[str, t.Union[str, tuple[str, ...]], str, tuple[str, ...]]], + config: Config, +) -> list[tuple[str, t.Union[str, tuple[str, ...]], str, tuple[str, ...]]]: + """ + Add base images to the list of Docker images to build on `tutor build all`. + """ + for image, tag in BASE_IMAGE_NAMES: + build_images.append( + ( + image, + os.path.join("build", image), + tutor_config.get_typed(config, tag, str), + (), + ) + ) + + # Build openedx-dev image + build_images.append( + ( + "openedx-dev", + os.path.join("build", "openedx"), + tutor_config.get_typed(config, "DOCKER_IMAGE_OPENEDX_DEV", str), + ( + "--target=development", + f"--build-arg=APP_USER_ID={utils.get_user_id() or 1000}", + ), + ) + ) + + return build_images + + +@hooks.Filters.IMAGES_PULL.add() +def _add_images_to_pull( + remote_images: list[tuple[str, str]], config: Config +) -> list[tuple[str, str]]: + """ + Add base and vendor images to the list of Docker images to pull on `tutor pull all`. + """ + vendor_images = [ + ("caddy", "DOCKER_IMAGE_CADDY"), + ("elasticsearch", "DOCKER_IMAGE_ELASTICSEARCH"), + ("mongodb", "DOCKER_IMAGE_MONGODB"), + ("mysql", "DOCKER_IMAGE_MYSQL"), + ("redis", "DOCKER_IMAGE_REDIS"), + ("smtp", "DOCKER_IMAGE_SMTP"), + ] + for image, tag_name in vendor_images: + if config.get(f"RUN_{image.upper()}", True): + remote_images.append((image, tutor_config.get_typed(config, tag_name, str))) + for image, tag in BASE_IMAGE_NAMES: + remote_images.append((image, tutor_config.get_typed(config, tag, str))) + return remote_images + + +@hooks.Filters.IMAGES_PUSH.add() +def _add_core_images_to_push( + remote_images: list[tuple[str, str]], config: Config +) -> list[tuple[str, str]]: + """ + Add base images to the list of Docker images to push on `tutor push all`. + """ + for image, tag in BASE_IMAGE_NAMES: + remote_images.append((image, tutor_config.get_typed(config, tag, str))) + return remote_images + + +class ImageNameParam(ConfigLoaderParam): + """ + Convenient auto-completion of image names. + """ + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> list[click.shell_completion.CompletionItem]: + results = [] + for name in self.iter_image_names(): + if name.startswith(incomplete): + results.append(click.shell_completion.CompletionItem(name)) + return results + + def iter_image_names(self) -> t.Iterable["str"]: + raise NotImplementedError + + +class BuildImageNameParam(ImageNameParam): + def iter_image_names(self) -> t.Iterable["str"]: + for name, _path, _tag, _args in hooks.Filters.IMAGES_BUILD.iterate(self.config): + yield name + + +class PullImageNameParam(ImageNameParam): + def iter_image_names(self) -> t.Iterable["str"]: + for name, _tag in hooks.Filters.IMAGES_PULL.iterate(self.config): + yield name + + +class PushImageNameParam(ImageNameParam): + def iter_image_names(self) -> t.Iterable["str"]: + for name, _tag in hooks.Filters.IMAGES_PUSH.iterate(self.config): + yield name + + @click.group(name="images", short_help="Manage docker images") -def images_command(): +def images_command() -> None: pass -@click.command( - short_help="Build docker images", - help="Build the docker images necessary for an Open edX platform.", +@click.command() +@click.argument( + "image_names", + metavar="image", + nargs=-1, + type=BuildImageNameParam(), ) -@click.argument("image_names", metavar="image", nargs=-1) @click.option( "--no-cache", is_flag=True, help="Do not use cache when building the image" ) +@click.option( + "--no-registry-cache", + is_flag=True, + help="Do not use registry cache when building the image", +) +@click.option( + "--cache-to-registry", + is_flag=True, + help="Push the build cache to the remote registry. You should only enable this option if you have push rights to the remote registry.", +) +@click.option( + "--output", + "docker_output", + # Export image to docker. This is necessary to make the image available to docker-compose. + # The `--load` option is a shorthand for `--output=type=docker`. + default="type=docker", + help="Same as `docker build --output=...`.", +) @click.option( "-a", "--build-arg", @@ -45,8 +171,36 @@ def images_command(): multiple=True, help="Set a custom host-to-IP mapping (host:ip).", ) +@click.option( + "--target", + help="Set the target build stage to build.", +) +@click.option( + "-d", + "--docker-arg", + "docker_args", + multiple=True, + help="Set extra options for docker build command.", +) @click.pass_obj -def build(context, image_names, no_cache, build_args, add_hosts): +def build( + context: Context, + image_names: list[str], + no_cache: bool, + no_registry_cache: bool, + cache_to_registry: bool, + docker_output: str, + build_args: list[str], + add_hosts: list[str], + target: str, + docker_args: list[str], +) -> None: + """ + Build docker images + + Build the docker images necessary for an Open edX platform. By default, the remote + registry cache will be used for better performance. + """ config = tutor_config.load(context.root) command_args = [] if no_cache: @@ -55,96 +209,140 @@ def build(context, image_names, no_cache, build_args, add_hosts): command_args += ["--build-arg", build_arg] for add_host in add_hosts: command_args += ["--add-host", add_host] + if target: + command_args += ["--target", target] + if docker_output: + command_args.append(f"--output={docker_output}") + if docker_args: + command_args += docker_args + # Build context mounts + build_contexts = get_image_build_contexts(config) + for image in image_names: - build_image(context.root, config, image, *command_args) + for name, path, tag, custom_args in find_images_to_build(config, image): + image_build_args = [*command_args, *custom_args] + + # Registry cache + if not no_registry_cache: + image_build_args.append(f"--cache-from=type=registry,ref={tag}-cache") + if cache_to_registry: + image_build_args.append( + f"--cache-to=type=registry,mode=max,ref={tag}-cache,image-manifest=true" + ) + + # Build contexts + for host_path, stage_name in build_contexts.get(name, []): + fmt.echo_info( + f"Adding {host_path} to the build context '{stage_name}' of image '{image}'" + ) + image_build_args.append(f"--build-context={stage_name}={host_path}") + + # Build + images.build( + tutor_env.pathjoin(context.root, path), + tag, + *image_build_args, + ) + + +def get_image_build_contexts(config: Config) -> dict[str, list[tuple[str, str]]]: + """ + Return all build contexts for all images. + + A build context is to bind-mount a host directory at build-time. This is useful, for + instance to build a Docker image with a local git checkout of a remote repo. + + Users configure bind-mounts with the `MOUNTS` config setting. Plugins can then + automatically add build contexts based on these values. + """ + build_contexts: dict[str, list[tuple[str, str]]] = {} + for user_mount in bindmount.get_mounts(config): + for image_name, stage_name in hooks.Filters.IMAGES_BUILD_MOUNTS.iterate( + user_mount + ): + if image_name not in build_contexts: + build_contexts[image_name] = [] + build_contexts[image_name].append((user_mount, stage_name)) + return build_contexts @click.command(short_help="Pull images from the Docker registry") -@click.argument("image_names", metavar="image", nargs=-1) +@click.argument("image_names", metavar="image", type=PullImageNameParam(), nargs=-1) @click.pass_obj -def pull(context, image_names): - config = tutor_config.load(context.root) +def pull(context: Context, image_names: list[str]) -> None: + config = tutor_config.load_full(context.root) for image in image_names: - pull_image(config, image) + for tag in find_remote_image_tags(config, hooks.Filters.IMAGES_PULL, image): + images.pull(tag) @click.command(short_help="Push images to the Docker registry") -@click.argument("image_names", metavar="image", nargs=-1) +@click.argument("image_names", metavar="image", type=PushImageNameParam(), nargs=-1) @click.pass_obj -def push(context, image_names): - config = tutor_config.load(context.root) +def push(context: Context, image_names: list[str]) -> None: + config = tutor_config.load_full(context.root) for image in image_names: - push_image(config, image) + for tag in find_remote_image_tags(config, hooks.Filters.IMAGES_PUSH, image): + images.push(tag) @click.command(short_help="Print tag associated to a Docker image") -@click.argument("image_names", metavar="image", nargs=-1) +@click.argument("image_names", metavar="image", type=BuildImageNameParam(), nargs=-1) @click.pass_obj -def printtag(context, image_names): - config = tutor_config.load(context.root) +def printtag(context: Context, image_names: list[str]) -> None: + config = tutor_config.load_full(context.root) for image in image_names: - for _img, tag in iter_images(config, image, BASE_IMAGE_NAMES): - print(tag) - for _img, tag in iter_plugin_images(config, image, "build-image"): + for _name, _path, tag, _args in find_images_to_build(config, image): print(tag) -def build_image(root, config, image, *args): - # Build base images - for img, tag in iter_images(config, image, BASE_IMAGE_NAMES): - images.build(tutor_env.pathjoin(root, "build", img), tag, *args) - - # Build plugin images - for plugin, img, tag in iter_plugin_images(config, image, "build-image"): - images.build( - tutor_env.pathjoin(root, "plugins", plugin, "build", img), tag, *args - ) - - # Build dev images with user id argument - dev_build_arg = ["--build-arg", "USERID={}".format(utils.get_user_id())] - for img, tag in iter_images(config, image, DEV_IMAGE_NAMES): - images.build(tutor_env.pathjoin(root, "build", img), tag, *dev_build_arg, *args) - - -def pull_image(config, image): - for _img, tag in iter_images(config, image, all_image_names(config)): - images.pull(tag) - for _plugin, _img, tag in iter_plugin_images(config, image, "remote-image"): - images.pull(tag) - +def find_images_to_build( + config: Config, image: str +) -> t.Iterator[tuple[str, str, str, tuple[str, ...]]]: + """ + Iterate over all images to build. -def push_image(config, image): - for _img, tag in iter_images(config, image, BASE_IMAGE_NAMES): - images.push(tag) - for _plugin, _img, tag in iter_plugin_images(config, image, "remote-image"): - images.push(tag) + If no corresponding image is found, raise exception. + Yield: (name, path, tag, build args) + """ + found = False + for name, path, tag, args in hooks.Filters.IMAGES_BUILD.iterate(config): + relative_path = path if isinstance(path, str) else os.path.join(*path) + if image in [name, "all"]: + found = True + tag = tutor_env.render_str(config, tag) + yield (name, relative_path, tag, args) -def iter_images(config, image, image_list): - for img in image_list: - if image in [img, "all"]: - tag = images.get_tag(config, img) - yield img, tag + if not found: + raise ImageNotFoundError(image) -def iter_plugin_images(config, image, hook_name): - for plugin, hook in plugins.iter_hooks(config, hook_name): - for img, tag in hook.items(): - if image in [img, "all"]: - tag = tutor_env.render_str(config, tag) - yield plugin, img, tag +def find_remote_image_tags( + config: Config, + filtre: Filter[list[tuple[str, str]], [Config]], + image: str, +) -> t.Iterator[str]: + """ + Iterate over all images to push or pull. + If no corresponding image is found, raise exception. -def all_image_names(config): - return BASE_IMAGE_NAMES + vendor_image_names(config) + Yield: tag + """ + all_remote_images = filtre.iterate(config) + found = False + for name, tag in all_remote_images: + if image in [name, "all"]: + found = True + yield tutor_env.render_str(config, tag) + if not found: + raise ImageNotFoundError(image) -def vendor_image_names(config): - vendor_images = VENDOR_IMAGES[:] - for image in VENDOR_IMAGES: - if not config.get("RUN_" + image.upper(), True): - vendor_images.remove(image) - return vendor_images +class ImageNotFoundError(exceptions.TutorError): + def __init__(self, image_name: str): + super().__init__(f"Image '{image_name}' could not be found") images_command.add_command(build) diff --git a/tutor/commands/jobs.py b/tutor/commands/jobs.py new file mode 100644 index 0000000000..7510a83b31 --- /dev/null +++ b/tutor/commands/jobs.py @@ -0,0 +1,401 @@ +""" +Common jobs that must be added both to local, dev and k8s commands. +""" + +from __future__ import annotations + +import functools +import shlex +import typing as t + +import click +from typing_extensions import ParamSpec + +from tutor import config as tutor_config +from tutor import env, fmt, hooks +from tutor.hooks import priorities + + +class DoGroup(click.Group): + """ + A Click group that prints subcommands under 'Jobs' instead of 'Commands' when we run + `.. do --help`. Hackish but it works. + """ + + def get_help(self, ctx: click.Context) -> str: + return super().get_help(ctx).replace("Commands:\n", "Jobs:\n") + + +# A convenient easy-to-use decorator for creating `do` commands. +do_group = click.group(cls=DoGroup, subcommand_metavar="JOB [ARGS]...") + + +@hooks.Actions.CORE_READY.add() +def _add_core_init_tasks() -> None: + """ + Declare core init scripts at runtime. + + The context is important, because it allows us to select the init scripts based on + the --limit argument. + """ + with hooks.Contexts.app("mysql").enter(): + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ("mysql", env.read_core_template_file("jobs", "init", "mysql.sh")) + ) + with hooks.Contexts.app("lms").enter(): + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ( + "lms", + env.read_core_template_file("jobs", "init", "mounted-directories.sh"), + ), + # If edx-platform is mounted, then we may need to perform some setup + # before other initialization scripts can be run. + priority=priorities.HIGH, + ) + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ("lms", env.read_core_template_file("jobs", "init", "lms.sh")) + ) + with hooks.Contexts.app("cms").enter(): + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ("cms", env.read_core_template_file("jobs", "init", "cms.sh")) + ) + + +@click.command("init", help="Initialise all applications") +@click.option("-l", "--limit", help="Limit initialisation to this service or plugin") +def initialise(limit: t.Optional[str]) -> t.Iterator[tuple[str, str]]: + fmt.echo_info("Initialising all services...") + filter_context = hooks.Contexts.app(limit).name if limit else None + + for service, task in hooks.Filters.CLI_DO_INIT_TASKS.iterate_from_context( + filter_context + ): + fmt.echo_info(f"Running init task in {service}") + yield service, task + + fmt.echo_info("All services initialised.") + + +@click.command(help="Create an Open edX user and interactively set their password") +@click.option("--superuser", is_flag=True, help="Make superuser") +@click.option("--staff", is_flag=True, help="Make staff user") +@click.option( + "-p", + "--password", + help="Specify password from the command line. If undefined, you will be prompted to input a password", + prompt=True, + hide_input=True, +) +@click.argument("name") +@click.argument("email") +def createuser( + superuser: str, + staff: bool, + password: str, + name: str, + email: str, +) -> t.Iterable[tuple[str, str]]: + """ + Create an Open edX user + + Password can be passed as an option or will be set interactively. + """ + yield ("lms", create_user_template(superuser, staff, name, email, password)) + + +def create_user_template( + superuser: str, staff: bool, username: str, email: str, password: str +) -> str: + opts = "" + if superuser: + opts += " --superuser" + if staff: + opts += " --staff" + return f""" +./manage.py lms manage_user {opts} {username} {email} +./manage.py lms shell -c " +from django.contrib.auth import get_user_model +u = get_user_model().objects.get(username='{username}') +u.set_password('{password}') +u.save()" +""" + + +@click.command(help="Import the demo course") +@click.option( + "-r", + "--repo", + default="https://github.com/openedx/openedx-demo-course", + show_default=True, + help="Git repository that contains the course to be imported", +) +@click.option( + "-d", + "--repo-dir", + default="", + show_default=True, + help="Git relative subdirectory to import data from. If unspecified, will default to the directory containing course.xml", +) +@click.option( + "-v", + "--version", + help="Git branch, tag or sha1 identifier. If unspecified, will default to the value of the OPENEDX_COMMON_VERSION setting.", +) +def importdemocourse( + repo: str, repo_dir: str, version: t.Optional[str] +) -> t.Iterable[tuple[str, str]]: + version = version or "{{ OPENEDX_COMMON_VERSION }}" + template = f""" +# Clone the repo +git clone {repo} --branch {version} --depth 1 /tmp/course + +# Determine root directory for course import. If one is provided, use that. +# Otherwise, use the directory containing course.xml, failing if there isn't exactly one. +if [ -n "{repo_dir}" ] ; then + course_root=/tmp/course/{repo_dir} +else + course_xml_first="$(find /tmp/course -name course.xml | head -n 1)" + course_xml_extra="$(find /tmp/course -name course.xml | tail -n +2)" + echo "INFO: Found course.xml files(s): $course_xml_first $course_xml_extra" + if [ -z "$course_xml_first" ] ; then + echo "ERROR: Could not find course.xml. Are you sure this is the right repository?" + exit 1 + fi + if [ -n "$course_xml_extra" ] ; then + echo "ERROR: Found multiple course.xml files--course root is ambiguous!" + echo " Please specify a course root dir (relative to repo root) using --repo-dir." + exit 1 + fi + course_root="$(dirname "$course_xml_first")" +fi +echo "INFO: Will import course data at: $course_root" && echo + +# Import into CMS +python ./manage.py cms import ../data "$course_root" + +# Re-index courses +./manage.py cms reindex_course --all --setup""" + yield ("cms", template) + + +@click.command(help="Import the demo content libraries") +@click.argument("owner_username") +@click.option( + "-r", + "--repo", + default="https://github.com/openedx/openedx-demo-course", + show_default=True, + help="Git repository that contains the library/libraries to be imported", +) +@click.option( + "-v", + "--version", + help="Git branch, tag or sha1 identifier. If unspecified, will default to the value of the OPENEDX_COMMON_VERSION setting.", +) +def importdemolibraries( + owner_username: str, repo: str, version: t.Optional[str] +) -> t.Iterable[tuple[str, str]]: + version = version or "{{ OPENEDX_COMMON_VERSION }}" + template = f""" +# Clone the repo +git clone {repo} --branch {version} --depth 1 /tmp/library + +# Fail loudly if: +# * there no library.xml files, or +# * any library.xml is not within a directory named "library/" (upstream edx-platform expectation). +if ! find /tmp/library -name library.xml | grep -q "." ; then + echo "ERROR: No library.xml files found in repository. Are you sure this is the right repository and version?" + exit 1 +fi + +# For every library.xml file, create a tar of its parent directory, and import into CMS. +for lib_root in $(find /tmp/library -name library.xml | xargs dirname) ; do + echo "INFO: Will import library at $lib_root" + if [ "$(basename "$lib_root")" != "library" ] ; then + echo "ERROR: can only import library.xml files that are within a directory named 'library'" + exit 1 + fi + rm -rf /tmp/library.tar.gz + ( cd "$(dirname "$lib_root")" && tar czvf /tmp/library.tar.gz library ) + yes | ./manage.py cms import_content_library /tmp/library.tar.gz {owner_username} +done""" + yield ("cms", template) + + +@click.command( + name="print-edx-platform-setting", + help="Print the value of an edx-platform Django setting.", +) +@click.argument("setting") +@click.option( + "-s", + "--service", + type=click.Choice(["lms", "cms"]), + default="lms", + show_default=True, + help="Service to fetch the setting from", +) +def print_edx_platform_setting( + setting: str, service: str +) -> t.Iterable[tuple[str, str]]: + command = f"./manage.py {service} shell -c 'from django.conf import settings; print(settings.{setting})'" + yield (service, command) + + +@click.command() +@click.option( + "-d", + "--domain", + "domains", + multiple=True, + help=( + "Limit the theme to these domain names. By default, the theme is " + "applied to the LMS and the CMS, both in development and production mode" + ), +) +@click.argument("theme_name") +def settheme(domains: list[str], theme_name: str) -> t.Iterable[tuple[str, str]]: + """ + Assign a theme to the LMS and the CMS. + + To reset to the default theme , use 'default' as the theme name. + """ + yield ("lms", set_theme_template(theme_name, domains)) + + +def set_theme_template(theme_name: str, domain_names: list[str]) -> str: + """ + For each domain, get or create a Site object and assign the selected theme. + """ + # Note that there are no double quotes " in this piece of code + python_command = """ +import sys +from django.contrib.sites.models import Site +def assign_theme(name, domain): + print('Assigning theme', name, 'to', domain) + if len(domain) > 50: + sys.stderr.write( + 'Assigning a theme to a site with a long (> 50 characters) domain name.' + ' The displayed site name will be truncated to 50 characters.\\n' + ) + site, _ = Site.objects.get_or_create(domain=domain) + if not site.name: + name_max_length = Site._meta.get_field('name').max_length + site.name = domain[:name_max_length] + site.save() + site.themes.all().delete() + if name != 'default': + site.themes.create(theme_dir_name=name) +""" + domain_names = domain_names or [ + "{{ LMS_HOST }}", + "{{ LMS_HOST }}:8000", + "{{ CMS_HOST }}", + "{{ CMS_HOST }}:8001", + "{{ PREVIEW_LMS_HOST }}", + "{{ PREVIEW_LMS_HOST }}:8000", + ] + for domain_name in domain_names: + python_command += f"assign_theme('{theme_name}', '{domain_name}')\n" + return f'./manage.py lms shell -c "{python_command}"' + + +@click.command(context_settings={"ignore_unknown_options": True}) +@click.argument("args", nargs=-1) +def sqlshell(args: list[str]) -> t.Iterable[tuple[str, str]]: + """ + Open an SQL shell as root + + Extra arguments will be passed to the `mysql` command verbatim. For instance, to + show tables from the "openedx" database, run `do sqlshell openedx -e 'show tables'`. + """ + command = "mysql --user={{ MYSQL_ROOT_USERNAME }} --password={{ MYSQL_ROOT_PASSWORD }} --host={{ MYSQL_HOST }} --port={{ MYSQL_PORT }} --default-character-set=utf8mb4" + if args: + command += " " + shlex.join(args) # pylint: disable=protected-access + yield ("lms", command) + + +def add_job_commands(do_command_group: click.Group) -> None: + """ + This is meant to be called with the `local/dev/k8s do` group commands, to add the + different `do` subcommands. + """ + for subcommand in hooks.Filters.CLI_DO_COMMANDS.iterate(): + assert isinstance(subcommand, click.Command) + do_command_group.add_command(subcommand) + + +@hooks.Actions.PLUGINS_LOADED.add() +def _patch_do_commands_callbacks() -> None: + """ + After plugins have been loaded, patch `do` subcommands such that their output is + forwarded to `do_callback`. + + This function is not called as part of add_job_commands because subcommands must be + patched just once. + """ + for subcommand in hooks.Filters.CLI_DO_COMMANDS.iterate(): + if not isinstance(subcommand, click.Command): + raise ValueError( + f"Command {subcommand} which was added to the CLI_DO_COMMANDS filter must be an instance of click.Command" + ) + # Modify the subcommand callback such that job results are processed by do_callback + if subcommand.callback is None: + raise ValueError("Cannot patch None callback") + if subcommand.name is None: + raise ValueError("Defined job with None name") + subcommand.callback = _patch_callback(subcommand.name, subcommand.callback) + + +P = ParamSpec("P") + + +def _patch_callback( + job_name: str, func: t.Callable[P, t.Iterable[tuple[str, str]]] +) -> t.Callable[P, None]: + """ + Modify a subcommand callback function such that its results are processed by `do_callback`. + """ + + def new_callback(*args: P.args, **kwargs: P.kwargs) -> None: + hooks.Actions.DO_JOB.do(job_name, *args, **kwargs) + do_callback(func(*args, **kwargs)) + + # Make the new callback behave like the old one + functools.update_wrapper(new_callback, func) + + return new_callback + + +def do_callback(service_commands: t.Iterable[tuple[str, str]]) -> None: + """ + This function must be added as a callback to all `do` subcommands. + + `do` subcommands don't actually run any task. They just yield tuples of (service + name, unrendered script string). This function is responsible for actually running + the scripts. It does the following: + + - Prefix the script with a base command + - Render the script string + - Run a job in the right container + + This callback is added to the "do" subcommands by the `add_job_commands` function. + """ + context = click.get_current_context().obj + config = tutor_config.load(context.root) + runner = context.job_runner(config) + for service, command in service_commands: + runner.run_task_from_str(service, command) + + +hooks.Filters.CLI_DO_COMMANDS.add_items( + [ + createuser, + importdemocourse, + importdemolibraries, + initialise, + print_edx_platform_setting, + settheme, + sqlshell, + ] +) diff --git a/tutor/commands/k8s.py b/tutor/commands/k8s.py index 6bca266810..5064366c0c 100644 --- a/tutor/commands/k8s.py +++ b/tutor/commands/k8s.py @@ -1,45 +1,259 @@ +import os from datetime import datetime from time import sleep +from typing import Any, Iterable, List, Optional, Type import click -from .. import config as tutor_config -from .. import env as tutor_env -from .. import exceptions -from .. import fmt -from .. import interactive as interactive_config -from .. import scripts -from .. import serialize -from .. import utils +from tutor import config as tutor_config +from tutor import env as tutor_env +from tutor import exceptions, fmt, hooks +from tutor import interactive as interactive_config +from tutor import serialize, utils +from tutor.commands import jobs +from tutor.commands.config import save as config_save_command +from tutor.commands.context import BaseTaskContext +from tutor.commands.upgrade import OPENEDX_RELEASE_NAMES +from tutor.commands.upgrade.k8s import upgrade_from +from tutor.tasks import BaseTaskRunner +from tutor.types import Config, get_typed + + +class K8sClients: + _instance = None + + def __init__(self) -> None: + # Loading the kubernetes module here to avoid import overhead + # pylint: disable=import-outside-toplevel + from kubernetes import client, config + + if os.path.exists( + os.path.expanduser(config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION) + ): + # found the kubeconfig file, let's load it! + config.load_kube_config() + elif ( + config.incluster_config.SERVICE_HOST_ENV_NAME in os.environ + and config.incluster_config.SERVICE_PORT_ENV_NAME in os.environ + ): + # We are running inside a cluster, let's load the in-cluster configuration. + config.load_incluster_config() + else: + raise exceptions.TutorError( + f"there is no Kubernetes configuration file located in {config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION}, and the service environment variables {config.incluster_config.SERVICE_HOST_ENV_NAME} and {config.incluster_config.SERVICE_PORT_ENV_NAME} have not been defined. To connect to a cluster, please configure your host appropriately." + ) + + self._batch_api = None + self._core_api = None + self._client = client + + @classmethod + def instance(cls: Type["K8sClients"]) -> "K8sClients": + if cls._instance is None: + cls._instance = cls() + return cls._instance + + @property + def batch_api(self): # type: ignore + if self._batch_api is None: + self._batch_api = self._client.BatchV1Api() + return self._batch_api + + @property + def core_api(self): # type: ignore + if self._core_api is None: + self._core_api = self._client.CoreV1Api() + return self._core_api + + +class K8sTaskRunner(BaseTaskRunner): + """ + Run tasks (bash commands) in Kubernetes-managed services. + + Note: a single Tutor "task" correspond to a Kubernetes "job": + https://kubernetes.io/docs/concepts/workloads/controllers/job/ + A Tutor "job" is composed of multiple Tutor tasks run in different services. + + In Kubernetes, each task that is expected to run in a "myservice" container will + trigger the "myservice-job" Kubernetes job. This job definition must be present in + the "k8s/jobs.yml" template. + """ + + def run_task(self, service: str, command: str) -> int: + canonical_job_name = f"{service}-job" + all_jobs = list(self._load_jobs()) + job = self._find_job(canonical_job_name, all_jobs) + # Create a unique job name to make it deduplicate jobs and make it easier to + # find later. Logs of older jobs will remain available for some time. + job_name = canonical_job_name + "-" + datetime.now().strftime("%Y%m%d%H%M%S") + + # Wait until all other jobs are completed + while True: + active_jobs = self.active_job_names() + if not active_jobs: + break + fmt.echo_info( + f"Waiting for active jobs to terminate: {' '.join(active_jobs)}" + ) + sleep(5) + + # Configure job + job["metadata"]["name"] = job_name + job["metadata"].setdefault("labels", {}) + job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name + # Define k8s entrypoint/args + shell_command = ["sh", "-e", "-c"] + if job["spec"]["template"]["spec"]["containers"][0].get("command") == []: + # In some cases, we need to bypass the container entrypoint. + # Unfortunately, AFAIK, there is no way to do so in K8s manifests. So we mark + # some jobs with "command: []". For these jobs, the entrypoint becomes "sh -e -c". + # We do not do this for every job, because some (most) entrypoints are actually useful. + job["spec"]["template"]["spec"]["containers"][0]["command"] = shell_command + container_args = [command] + else: + container_args = shell_command + [command] + job["spec"]["template"]["spec"]["containers"][0]["args"] = container_args + job["spec"]["backoffLimit"] = 1 + job["spec"]["ttlSecondsAfterFinished"] = 3600 + + with open( + tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w", encoding="utf-8" + ) as job_file: + serialize.dump_all(all_jobs, job_file) + + # We cannot use the k8s API to create the job: configMap and volume names need + # to be found with the right suffixes. + kubectl_apply( + self.root, + "--selector", + f"app.kubernetes.io/name={job_name}", + ) + + message = ( + "Job {job_name} is running. To view the logs from this job, run:\n\n" + """ kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """ + """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n""" + "Waiting for job completion..." + ).format(job_name=job_name, namespace=k8s_namespace(self.config)) + fmt.echo_info(message) + + # Wait for completion + field_selector = f"metadata.name={job_name}" + while True: + namespaced_jobs = K8sClients.instance().batch_api.list_namespaced_job( + k8s_namespace(self.config), field_selector=field_selector + ) + if not namespaced_jobs.items: + continue + job = namespaced_jobs.items[0] + if not job.status.active: + if job.status.succeeded: + fmt.echo_info(f"Job {job_name} successful.") + break + if job.status.failed: + raise exceptions.TutorError( + f"Job {job_name} failed. View the job logs to debug this issue." + ) + sleep(5) + return 0 + + def load_job(self, name: str) -> Any: + """ + Find a given job definition in the rendered k8s/jobs.yml template. + """ + return self._find_job(name, self._load_jobs()) + + def _find_job(self, name: str, all_jobs: Iterable[Any]) -> Any: + """ + Find the matching job definition in the in the list of jobs provided. + + Returns the found job's manifest. + """ + for job in all_jobs: + job_name = job["metadata"]["name"] + if not isinstance(job_name, str): + raise exceptions.TutorError( + f"Invalid job name: '{job_name}'. Expected str." + ) + if job_name == name: + return job + raise exceptions.TutorError(f"Could not find job '{name}'") + + def _load_jobs(self) -> Iterable[Any]: + manifests = self.render("k8s", "jobs.yml") + for manifest in serialize.load_all(manifests): + if manifest["kind"] == "Job": + yield manifest + + def active_job_names(self) -> List[str]: + """ + Return a list of active job names + Docs: + https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#list-job-v1-batch + + This is necessary to make sure that we don't run the same job multiple times at + the same time. + """ + api = K8sClients.instance().batch_api + return [ + job.metadata.name + for job in api.list_namespaced_job(k8s_namespace(self.config)).items + if job.status.active + ] + + +class K8sContext(BaseTaskContext): + def job_runner(self, config: Config) -> K8sTaskRunner: + return K8sTaskRunner(self.root, config) @click.group(help="Run Open edX on Kubernetes") -def k8s(): - pass +@click.pass_context +def k8s(context: click.Context) -> None: + context.obj = K8sContext(context.obj.root) @click.command(help="Configure and run Open edX from scratch") @click.option("-I", "--non-interactive", is_flag=True, help="Run non-interactively") -@click.pass_obj -def quickstart(context, non_interactive): - click.echo(fmt.title("Interactive platform configuration")) - config = interactive_config.update(context.root, interactive=(not non_interactive)) - if not config["RUN_CADDY"]: - fmt.echo_alert( - "Potentially invalid configuration: RUN_CADDY=false\n" - "This setting might have been defined because you previously set WEB_PROXY=true. This is no longer" - " necessary in order to get Tutor to work on Kubernetes. In Tutor v11+ a Caddy-based load balancer is" - " provided out of the box to handle SSL/TLS certificate generation at runtime. If you disable this" - " service, you will have to configure an Ingress resource and a certificate manager yourself to redirect" - " traffic to the nginx service. See the Kubernetes section in the Tutor documentation for more" - " information." +@click.pass_context +def launch(context: click.Context, non_interactive: bool) -> None: + run_upgrade_from_release = tutor_env.should_upgrade_from_release(context.obj.root) + if run_upgrade_from_release is not None: + click.echo(fmt.title("Upgrading from an older release")) + context.invoke( + upgrade, + from_release=tutor_env.get_env_release(context.obj.root), ) - click.echo(fmt.title("Updating the current environment")) - tutor_env.save(context.root, config) + + config = tutor_config.load_minimal(context.obj.root) + if not non_interactive: + click.echo(fmt.title("Interactive platform configuration")) + interactive_config.ask_questions(config, run_for_prod=True) + tutor_config.save_config_file(context.obj.root, config) + config = tutor_config.load_full(context.obj.root) + tutor_env.save(context.obj.root, config) + + if run_upgrade_from_release and not non_interactive: + question = f"""Your platform is being upgraded from {run_upgrade_from_release.capitalize()}. + +If you run custom Docker images, you must rebuild and push them to your private repository now by running the following +commands in a different shell: + + tutor images build all # add your custom images here + tutor images push all + +Press enter when you are ready to continue""" + click.confirm( + fmt.question(question), default=True, abort=True, prompt_suffix=" " + ) + click.echo(fmt.title("Starting the platform")) - start.callback() + context.invoke(start) + click.echo(fmt.title("Database creation and migrations")) - init.callback(limit=None) + context.invoke(init, limit=None) + + config = tutor_config.load(context.obj.root) fmt.echo_info( """Your Open edX platform is ready and can be accessed at the following urls: @@ -53,69 +267,97 @@ def quickstart(context, non_interactive): ) -@click.command(help="Run all configured Open edX services") +@click.command( + short_help="Run all configured Open edX resources", + help=( + "Run all configured Open edX resources. You may limit this command to " + "some resources by passing name arguments." + ), +) +@click.argument("names", metavar="name", nargs=-1) @click.pass_obj -def start(context): - # Create namespace - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(context.root), - "--wait", - "--selector", - "app.kubernetes.io/component=namespace", - ) - # Create volumes - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(context.root), - "--wait", - "--selector", - "app.kubernetes.io/component=volume", - ) - # Create everything else except jobs - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(context.root), - "--selector", - # Here use `notin (job, xxx)` when there are other components to ignore - "app.kubernetes.io/component!=job", - ) +def start(context: K8sContext, names: List[str]) -> None: + config = tutor_config.load(context.root) + # Create namespace, if necessary + # Note that this step should not be run for some users, in particular those + # who do not have permission to edit the namespace. + try: + utils.kubectl("get", "namespaces", k8s_namespace(config)) + fmt.echo_info("Namespace already exists: skipping creation.") + except exceptions.TutorError: + fmt.echo_info("Namespace does not exist: now creating it...") + kubectl_apply( + context.root, + "--wait", + "--selector", + "app.kubernetes.io/component=namespace", + ) + + names = names or ["all"] + for name in names: + if name == "all": + # Create everything except jobs + kubectl_apply( + context.root, + "--selector", + "app.kubernetes.io/component notin (job,namespace)", + ) + else: + kubectl_apply( + context.root, + "--selector", + f"app.kubernetes.io/name={name}", + ) -@click.command(help="Stop a running platform") +@click.command( + short_help="Stop a running platform", + help=( + "Stop a running platform by deleting all resources, except for volumes. " + "You may limit this command to some resources by passing name arguments." + ), +) +@click.argument("names", metavar="name", nargs=-1) @click.pass_obj -def stop(context): +def stop(context: K8sContext, names: List[str]) -> None: config = tutor_config.load(context.root) + names = names or ["all"] + for name in names: + if name == "all": + delete_resources(config) + else: + delete_resources(config, name=name) + + +def delete_resources( + config: Config, resources: Optional[List[str]] = None, name: Optional[str] = None +) -> None: + """ + Delete resources by type and name. + + The load balancer is never deleted. + """ + resources = resources or ["deployments", "services", "configmaps", "jobs"] + not_lb_selector = "app.kubernetes.io/component!=loadbalancer" + name_selector = [f"app.kubernetes.io/name={name}"] if name else [] utils.kubectl( "delete", - *resource_selector(config), - "deployments,services,configmaps,jobs", + *resource_selector(config, not_lb_selector, *name_selector), + ",".join(resources), ) @click.command(help="Reboot an existing platform") -def reboot(): - stop.callback() - start.callback() - - -def resource_selector(config, *selectors): - """ - Convenient utility for filtering only the resources that belong to this project. - """ - selector = ",".join( - ["app.kubernetes.io/instance=openedx-" + config["ID"]] + list(selectors) - ) - return ["--namespace", config["K8S_NAMESPACE"], "--selector=" + selector] +@click.pass_context +def reboot(context: click.Context) -> None: + context.invoke(stop) + context.invoke(start) @click.command(help="Completely delete an existing platform") @click.option("-y", "--yes", is_flag=True, help="Do not ask for confirmation") @click.pass_obj -def delete(context, yes): +def delete(context: K8sContext, yes: bool) -> None: if not yes: click.confirm( "Are you sure you want to delete the platform? All data will be removed.", @@ -130,67 +372,65 @@ def delete(context, yes): ) -@click.command(help="Initialise all applications") -@click.option("-l", "--limit", help="Limit initialisation to this service or plugin") +@jobs.do_group @click.pass_obj -def init(context, limit): - config = tutor_config.load(context.root) - runner = K8sScriptRunner(context.root, config) - for service in ["mysql", "elasticsearch", "mongodb"]: - if tutor_config.is_service_activated(config, service): - wait_for_pod_ready(config, service) - scripts.initialise(runner, limit_to=limit) +def do(context: K8sContext) -> None: + """ + Run a custom job in the right container(s). + We make sure that some essential containers (databases, proxy) are up before we + launch the jobs. + """ -@click.command(help="Create an Open edX user and interactively set their password") -@click.option("--superuser", is_flag=True, help="Make superuser") -@click.option("--staff", is_flag=True, help="Make staff user") -@click.option( - "-p", - "--password", - help="Specify password from the command line. If undefined, you will be prompted to input a password", -) -@click.argument("name") -@click.argument("email") -@click.pass_obj -def createuser(context, superuser, staff, password, name, email): - config = tutor_config.load(context.root) - command = scripts.create_user_command( - superuser, staff, name, email, password=password - ) - # This needs to be interactive in case the user needs to type a password - kubectl_exec(config, "lms", command, attach=True) + @hooks.Actions.DO_JOB.add() + def _start_base_deployments(_job_name: str, *_args: Any, **_kwargs: Any) -> None: + """ + We add this logic to an action callback because we do not want to trigger it + whenever we run `tutor k8s do --help`. + """ + config = tutor_config.load(context.root) + wait_for_deployment_ready(config, "caddy") + for name in ["elasticsearch", "mysql", "mongodb"]: + if tutor_config.is_service_activated(config, name): + wait_for_deployment_ready(config, name) -@click.command(help="Import the demo course") -@click.pass_obj -def importdemocourse(context): - fmt.echo_info("Importing demo course") - config = tutor_config.load(context.root) - runner = K8sScriptRunner(context.root, config) - scripts.import_demo_course(runner) +@click.command(help="Initialise all applications") +@click.option("-l", "--limit", help="Limit initialisation to this service or plugin") +@click.pass_context +def init(context: click.Context, limit: Optional[str]) -> None: + context.invoke(do.commands["init"], limit=limit) -@click.command( - help="Set a theme for a given domain name. To reset to the default theme , use 'default' as the theme name." -) -@click.argument("theme_name") -@click.argument("domain_names", metavar="domain_name", nargs=-1) +@click.command(help="Scale the number of replicas of a given deployment") +@click.argument("deployment") +@click.argument("replicas", type=int) @click.pass_obj -def settheme(context, theme_name, domain_names): +def scale(context: K8sContext, deployment: str, replicas: int) -> None: config = tutor_config.load(context.root) - runner = K8sScriptRunner(context.root, config) - for domain_name in domain_names: - scripts.set_theme(theme_name, domain_name, runner) + utils.kubectl( + "scale", + # Note that we don't use the full resource selector because selectors + # are not compatible with the deployment/ argument. + *resource_namespace_selector( + config, + ), + f"--replicas={replicas}", + f"deployment/{deployment}", + ) -@click.command(name="exec", help="Execute a command in a pod of the given application") +@click.command( + name="exec", + help="Execute a command in a pod of the given application", + context_settings={"ignore_unknown_options": True}, +) @click.argument("service") -@click.argument("command") +@click.argument("args", nargs=-1, required=True) @click.pass_obj -def exec_command(context, service, command): +def exec_command(context: K8sContext, service: str, args: List[str]) -> None: config = tutor_config.load(context.root) - kubectl_exec(config, service, command, attach=True) + kubectl_exec(config, service, args) @click.command(help="View output from containers") @@ -199,7 +439,9 @@ def exec_command(context, service, command): @click.option("--tail", type=int, help="Number of lines to show from each container") @click.argument("service") @click.pass_obj -def logs(context, container, follow, tail, service): +def logs( + context: K8sContext, container: str, follow: bool, tail: bool, service: str +) -> None: config = tutor_config.load(context.root) command = ["logs"] @@ -219,273 +461,135 @@ def logs(context, container, follow, tail, service): @click.command(help="Wait for a pod to become ready") @click.argument("name") @click.pass_obj -def wait(context, name): +def wait(context: K8sContext, name: str) -> None: config = tutor_config.load(context.root) - wait_for_pod_ready(config, name) + wait_for_deployment_ready(config, name) -@click.command(help="Upgrade from a previous Open edX named release") +@click.command( + short_help="Perform release-specific upgrade tasks", + help="Perform release-specific upgrade tasks. To perform a full upgrade remember to run `launch`.", +) @click.option( - "--from", "from_version", default="ironwood", type=click.Choice(["ironwood"]) + "--from", + "from_release", + type=click.Choice(OPENEDX_RELEASE_NAMES), ) -@click.pass_obj -def upgrade(context, from_version): - config = tutor_config.load(context.root) - - running_version = from_version - if running_version == "ironwood": - upgrade_from_ironwood(config) - running_version = "juniper" - - if running_version == "juniper": - - running_version = "koa" - - -def upgrade_from_ironwood(config): - if not config["RUN_MONGODB"]: - fmt.echo_info( - "You are not running MongDB (RUN_MONGODB=false). It is your " - "responsibility to upgrade your MongoDb instance to v3.6. There is " - "nothing left to do to upgrade from Ironwood." - ) - return - message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Ironwood, you should upgrade -your MongoDb cluster from v3.2 to v3.6. You should run something similar to: - - # Upgrade from v3.2 to v3.4 - tutor k8s stop - tutor config save --set DOCKER_IMAGE_MONGODB=mongo:3.4.24 - tutor k8s start - tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "3.4" })' - - # Upgrade from v3.4 to v3.6 - tutor k8s stop - tutor config save --set DOCKER_IMAGE_MONGODB=mongo:3.6.18 - tutor k8s start - tutor k8s exec mongodb mongo --eval 'db.adminCommand({ setFeatureCompatibilityVersion: "3.6" })' - - tutor config save --unset DOCKER_IMAGE_MONGODB""" - fmt.echo_info(message) - - -def upgrade_from_juniper(config): - if not config["RUN_MYSQL"]: - fmt.echo_info( - "You are not running MySQL (RUN_MYSQL=false). It is your " - "responsibility to upgrade your MySQL instance to v5.7. There is " - "nothing left to do to upgrade from Juniper." +@click.pass_context +def upgrade(context: click.Context, from_release: Optional[str]) -> None: + if from_release is None: + from_release = tutor_env.get_env_release(context.obj.root) + if from_release is None: + fmt.echo_info("Your environment is already up-to-date") + else: + fmt.echo_alert( + "This command only performs a partial upgrade of your Open edX platform. " + "To perform a full upgrade, you should run `tutor k8s launch`." ) - return - - message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Juniper, you should upgrade -your MySQL database from v5.6 to v5.7. You should run something similar to: + upgrade_from(context, from_release) + # We update the environment to update the version + context.invoke(config_save_command) - tutor k8s start - tutor k8s exec mysql bash -e -c "mysql_upgrade \ - -u $(tutor config printvalue MYSQL_ROOT_USERNAME) \ - --password='$(tutor config printvalue MYSQL_ROOT_PASSWORD)' -""" - fmt.echo_info(message) +@click.command( + short_help="Direct interface to `kubectl apply`.", + help=( + "Direct interface to `kubnectl-apply`. This is a wrapper around `kubectl apply`. A;; options and" + " arguments passed to this command will be forwarded as-is to `kubectl apply`." + ), + context_settings={"ignore_unknown_options": True}, + name="apply", +) +@click.argument("args", nargs=-1) +@click.pass_obj +def apply_command(context: K8sContext, args: List[str]) -> None: + kubectl_apply(context.root, *args) -class K8sClients: - _instance = None - - def __init__(self): - # Loading the kubernetes module here to avoid import overhead - from kubernetes import client, config # pylint: disable=import-outside-toplevel - - config.load_kube_config() - self._batch_api = None - self._core_api = None - self._client = client - - @classmethod - def instance(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance - - @property - def batch_api(self): - if self._batch_api is None: - self._batch_api = self._client.BatchV1Api() - return self._batch_api - - @property - def core_api(self): - if self._core_api is None: - self._core_api = self._client.CoreV1Api() - return self._core_api - - -class K8sScriptRunner(scripts.BaseRunner): - def load_job(self, name): - jobs = self.render("k8s", "jobs.yml") - for job in serialize.load_all(jobs): - if job["metadata"]["name"] == name: - return job - raise ValueError("Could not find job '{}'".format(name)) - - def active_job_names(self): - """ - Return a list of active job names - Docs: - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#list-job-v1-batch - """ - api = K8sClients.instance().batch_api - return [ - job.metadata.name - for job in api.list_namespaced_job(self.config["K8S_NAMESPACE"]).items - if job.status.active - ] - - def run_job(self, service, command): - job_name = "{}-job".format(service) - try: - job = self.load_job(job_name) - except ValueError: - message = ( - "The '{job_name}' kubernetes job does not exist in the list of job " - "runners. This might be caused by an older plugin. Tutor switched to a" - " job runner model for running one-time commands, such as database" - " initialisation. For the record, this is the command that we are " - "running:\n" - "\n" - " {command}\n" - "\n" - "Old-style job running will be deprecated soon. Please inform " - "your plugin maintainer!" - ).format( - job_name=job_name, - command=command.replace("\n", "\n "), - ) - fmt.echo_alert(message) - wait_for_pod_ready(self.config, service) - kubectl_exec(self.config, service, command) - return - # Create a unique job name to make it deduplicate jobs and make it easier to - # find later. Logs of older jobs will remain available for some time. - job_name += "-" + datetime.now().strftime("%Y%m%d%H%M%S") - # Wait until all other jobs are completed - while True: - active_jobs = self.active_job_names() - if not active_jobs: - break - fmt.echo_info( - "Waiting for active jobs to terminate: {}".format(" ".join(active_jobs)) - ) - sleep(5) +def kubectl_apply(root: str, *args: str) -> None: + utils.kubectl("apply", "--kustomize", tutor_env.pathjoin(root), *args) - # Configure job - job["metadata"]["name"] = job_name - job["metadata"].setdefault("labels", {}) - job["metadata"]["labels"]["app.kubernetes.io/name"] = job_name - # Define k8s entrypoint/args - shell_command = ["sh", "-e", "-c"] - if job["spec"]["template"]["spec"]["containers"][0].get("command") == []: - # Empty "command" (aka: entrypoint) might not be taken into account by jobs, so we need to manually - # override the entrypoint. We do not do this for every job, because some entrypoints are actually useful. - job["spec"]["template"]["spec"]["containers"][0]["command"] = shell_command - container_args = [command] - else: - container_args = shell_command + [command] - job["spec"]["template"]["spec"]["containers"][0]["args"] = container_args - job["spec"]["backoffLimit"] = 1 - job["spec"]["ttlSecondsAfterFinished"] = 3600 - # Save patched job to "jobs.yml" file - with open(tutor_env.pathjoin(self.root, "k8s", "jobs.yml"), "w") as job_file: - serialize.dump(job, job_file) - # We cannot use the k8s API to create the job: configMap and volume names need - # to be found with the right suffixes. - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(self.root), - "--selector", - "app.kubernetes.io/name={}".format(job_name), - ) - message = ( - "Job {job_name} is running. To view the logs from this job, run:\n\n" - """ kubectl logs --namespace={namespace} --follow $(kubectl get --namespace={namespace} pods """ - """--selector=job-name={job_name} -o=jsonpath="{{.items[0].metadata.name}}")\n\n""" - "Waiting for job completion..." - ).format(job_name=job_name, namespace=self.config["K8S_NAMESPACE"]) - fmt.echo_info(message) - - # Wait for completion - field_selector = "metadata.name={}".format(job_name) - while True: - jobs = K8sClients.instance().batch_api.list_namespaced_job( - self.config["K8S_NAMESPACE"], field_selector=field_selector - ) - if not jobs.items: - continue - job = jobs.items[0] - if not job.status.active: - if job.status.succeeded: - fmt.echo_info("Job {} successful.".format(job_name)) - break - if job.status.failed: - raise exceptions.TutorError( - "Job {} failed. View the job logs to debug this issue.".format( - job_name - ) - ) - sleep(5) +@click.command(help="Print status information for all k8s resources") +@click.pass_obj +def status(context: K8sContext) -> int: + config = tutor_config.load(context.root) + return utils.kubectl("get", "all", *resource_namespace_selector(config)) -def kubectl_exec(config, service, command, attach=False): - selector = "app.kubernetes.io/name={}".format(service) +def kubectl_exec(config: Config, service: str, command: List[str]) -> int: + selector = f"app.kubernetes.io/name={service}" pods = K8sClients.instance().core_api.list_namespaced_pod( - namespace=config["K8S_NAMESPACE"], label_selector=selector + namespace=k8s_namespace(config), label_selector=selector ) if not pods.items: raise exceptions.TutorError( - "Could not find an active pod for the {} service".format(service) + f"Could not find an active pod for the {service} service" ) pod_name = pods.items[0].metadata.name # Run command - attach_opts = ["-i", "-t"] if attach else [] - utils.kubectl( + return utils.kubectl( "exec", - *attach_opts, + "--stdin", + "--tty", "--namespace", - config["K8S_NAMESPACE"], + k8s_namespace(config), pod_name, "--", - "sh", - "-e", - "-c", - command, + *command, ) -def wait_for_pod_ready(config, service): - fmt.echo_info("Waiting for a {} pod to be ready...".format(service)) +def wait_for_deployment_ready(config: Config, service: str) -> None: + fmt.echo_info(f"Waiting for a {service} deployment to be ready...") utils.kubectl( "wait", - *resource_selector(config, "app.kubernetes.io/name={}".format(service)), - "--for=condition=ContainersReady", + *resource_selector(config, f"app.kubernetes.io/name={service}"), + "--for=condition=Available=True", "--timeout=600s", - "pod", + "deployment", + ) + + +def resource_selector(config: Config, *selectors: str) -> List[str]: + """ + Convenient utility to filter the resources that belong to this project. + """ + selector = ",".join( + ["app.kubernetes.io/instance=openedx-" + get_typed(config, "ID", str)] + + list(selectors) ) + return resource_namespace_selector(config) + ["--selector=" + selector] + + +def resource_namespace_selector(config: Config) -> List[str]: + """ + Convenient utility to filter the resources that belong to this project namespace. + """ + return ["--namespace", k8s_namespace(config)] -k8s.add_command(quickstart) +def k8s_namespace(config: Config) -> str: + return get_typed(config, "K8S_NAMESPACE", str) + + +k8s.add_command(launch) k8s.add_command(start) k8s.add_command(stop) k8s.add_command(reboot) k8s.add_command(delete) k8s.add_command(init) -k8s.add_command(createuser) -k8s.add_command(importdemocourse) -k8s.add_command(settheme) +k8s.add_command(scale) k8s.add_command(exec_command) k8s.add_command(logs) k8s.add_command(wait) k8s.add_command(upgrade) +k8s.add_command(apply_command) +k8s.add_command(status) + + +@hooks.Actions.PLUGINS_LOADED.add() +def _add_k8s_do_commands() -> None: + jobs.add_job_commands(do) + k8s.add_command(do) diff --git a/tutor/commands/local.py b/tutor/commands/local.py index 27b6b85b74..a9a6141524 100644 --- a/tutor/commands/local.py +++ b/tutor/commands/local.py @@ -1,196 +1,55 @@ -import os +from __future__ import annotations import click -from .. import config as tutor_config -from .. import env as tutor_env -from .. import fmt, utils -from . import compose -from .config import save as config_save_command -from .context import Context +from tutor import env as tutor_env +from tutor import hooks +from tutor.commands import compose +from tutor.types import Config, get_typed + + +class LocalTaskRunner(compose.ComposeTaskRunner): + def __init__(self, root: str, config: Config): + """ + Load docker-compose files from local/. + """ + super().__init__(root, config) + self.project_name = get_typed(self.config, "LOCAL_PROJECT_NAME", str) + self.docker_compose_files += [ + tutor_env.pathjoin(self.root, "local", "docker-compose.yml"), + tutor_env.pathjoin(self.root, "local", "docker-compose.prod.yml"), + tutor_env.pathjoin(self.root, "local", "docker-compose.override.yml"), + ] + self.docker_compose_job_files += [ + tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.yml"), + tutor_env.pathjoin(self.root, "local", "docker-compose.jobs.override.yml"), + ] # pylint: disable=too-few-public-methods -class LocalContext(Context): - @staticmethod - def docker_compose(root, config, *command): - args = [] - override_path = tutor_env.pathjoin(root, "local", "docker-compose.override.yml") - if os.path.exists(override_path): - args += ["-f", override_path] - return utils.docker_compose( - "-f", - tutor_env.pathjoin(root, "local", "docker-compose.yml"), - "-f", - tutor_env.pathjoin(root, "local", "docker-compose.prod.yml"), - *args, - "--project-name", - config["LOCAL_PROJECT_NAME"], - *command - ) +class LocalContext(compose.BaseComposeContext): + NAME = "local" + OPENEDX_SERVICES = ["lms", "cms", "lms-worker", "cms-worker"] + + def job_runner(self, config: Config) -> LocalTaskRunner: + return LocalTaskRunner(self.root, config) @click.group(help="Run Open edX locally with docker-compose") @click.pass_context -def local(context): +def local(context: click.Context) -> None: context.obj = LocalContext(context.obj.root) -@click.command(help="Configure and run Open edX from scratch") -@click.option("-I", "--non-interactive", is_flag=True, help="Run non-interactively") -@click.option( - "-p", "--pullimages", "pullimages_", is_flag=True, help="Update docker images" -) -@click.pass_obj -def quickstart(context, non_interactive, pullimages_): - if tutor_env.needs_major_upgrade(context.root): - click.echo(fmt.title("Upgrading from an older release")) - upgrade.callback( - from_version=tutor_env.current_release(context.root), - non_interactive=non_interactive, - ) - - click.echo(fmt.title("Interactive platform configuration")) - config_save_command.callback( - interactive=(not non_interactive), set_vars=[], unset_vars=[] - ) - click.echo(fmt.title("Stopping any existing platform")) - compose.stop.callback([]) - if pullimages_: - click.echo(fmt.title("Docker image updates")) - compose.dc_command.callback(["pull"]) - click.echo(fmt.title("Starting the platform in detached mode")) - compose.start.callback(True, []) - click.echo(fmt.title("Database creation and migrations")) - compose.init.callback(limit=None) - - config = tutor_config.load(context.root) - fmt.echo_info( - """The Open edX platform is now running in detached mode -Your Open edX platform is ready and can be accessed at the following urls: - - {http}://{lms_host} - {http}://{cms_host} - """.format( - http="https" if config["ENABLE_HTTPS"] else "http", - lms_host=config["LMS_HOST"], - cms_host=config["CMS_HOST"], - ) - ) - - -@click.command(help="Upgrade from a previous Open edX named release") -@click.option( - "--from", - "from_version", - default="juniper", - type=click.Choice(["ironwood", "juniper"]), -) -@click.option("-I", "--non-interactive", is_flag=True, help="Run non-interactively") -@click.pass_obj -def upgrade(context, from_version, non_interactive): - config = tutor_config.load_no_check(context.root) - - if not non_interactive: - question = """You are about to upgrade your Open edX platform. It is strongly recommended to make a backup before upgrading. To do so, run: - - tutor local stop - sudo rsync -avr "$(tutor config printroot)"/ /tmp/tutor-backup/ - -In case of problem, to restore your backup you will then have to run: sudo rsync -avr /tmp/tutor-backup/ "$(tutor config printroot)"/ - -Are you sure you want to continue?""" - click.confirm( - fmt.question(question), default=True, abort=True, prompt_suffix=" " - ) - - running_version = from_version - if running_version == "ironwood": - upgrade_from_ironwood(context, config) - running_version = "juniper" - - if running_version == "juniper": - upgrade_from_juniper(context, config) - running_version = "koa" - - -def upgrade_from_ironwood(context, config): - click.echo(fmt.title("Upgrading from Ironwood")) - tutor_env.save(context.root, config) - - click.echo(fmt.title("Stopping any existing platform")) - compose.stop.callback([]) - - if not config["RUN_MONGODB"]: - fmt.echo_info( - "You are not running MongDB (RUN_MONGODB=false). It is your " - "responsibility to upgrade your MongoDb instance to v3.6. There is " - "nothing left to do to upgrade from Ironwood." - ) - return - - # Note that the DOCKER_IMAGE_MONGODB value is never saved, because we only save the - # environment, not the configuration. - click.echo(fmt.title("Upgrading MongoDb from v3.2 to v3.4")) - config["DOCKER_IMAGE_MONGODB"] = "mongo:3.4.24" - tutor_env.save(context.root, config) - compose.start.callback(detach=True, services=["mongodb"]) - compose.execute.callback( - [ - "mongodb", - "mongo", - "--eval", - 'db.adminCommand({ setFeatureCompatibilityVersion: "3.4" })', - ] - ) - compose.stop.callback([]) - - click.echo(fmt.title("Upgrading MongoDb from v3.4 to v3.6")) - config["DOCKER_IMAGE_MONGODB"] = "mongo:3.6.18" - tutor_env.save(context.root, config) - compose.start.callback(detach=True, services=["mongodb"]) - compose.execute.callback( - [ - "mongodb", - "mongo", - "--eval", - 'db.adminCommand({ setFeatureCompatibilityVersion: "3.6" })', - ] - ) - compose.stop.callback([]) - - -def upgrade_from_juniper(context, config): - click.echo(fmt.title("Upgrading from Juniper")) - tutor_env.save(context.root, config) - - click.echo(fmt.title("Stopping any existing platform")) - compose.stop.callback([]) - - if not config["RUN_MYSQL"]: - fmt.echo_info( - "You are not running MySQL (RUN_MYSQL=false). It is your " - "responsibility to upgrade your MySQL instance to v5.7. There is " - "nothing left to do to upgrade from Juniper." - ) - return - - click.echo(fmt.title("Upgrading MySQL from v5.6 to v5.7")) - compose.start.callback(detach=True, services=["mysql"]) - compose.execute.callback( - [ - "mysql", - "bash", - "-e", - "-c", - "mysql_upgrade -u {} --password='{}'".format( - config["MYSQL_ROOT_USERNAME"], config["MYSQL_ROOT_PASSWORD"] - ), - ] - ) - compose.stop.callback([]) +@hooks.Actions.COMPOSE_PROJECT_STARTED.add() +def _stop_on_dev_start(root: str, config: Config, project_name: str) -> None: + """ + Stop the local platform as soon as a platform with a different project name is + started. + """ + runner = LocalTaskRunner(root, config) + if project_name != runner.project_name: + runner.docker_compose("stop") -local.add_command(quickstart) -local.add_command(upgrade) compose.add_commands(local) diff --git a/tutor/commands/mounts.py b/tutor/commands/mounts.py new file mode 100644 index 0000000000..c97075521b --- /dev/null +++ b/tutor/commands/mounts.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import os + +import click +import yaml + +from tutor import bindmount +from tutor import config as tutor_config +from tutor import exceptions, fmt, hooks +from tutor.commands.config import save as config_save +from tutor.commands.context import Context +from tutor.commands.params import ConfigLoaderParam + + +class MountParamType(ConfigLoaderParam): + name = "mount" + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> list[click.shell_completion.CompletionItem]: + mounts = bindmount.get_mounts(self.config) + return [ + click.shell_completion.CompletionItem(mount) + for mount in mounts + if mount.startswith(incomplete) + ] + + +@click.group(name="mounts") +def mounts_command() -> None: + """ + Manage host bind-mounts + + Bind-mounted folders are used both in image building, development (`dev` commands) + and `local` deployments. + """ + + +@click.command(name="list") +@click.pass_obj +def mounts_list(context: Context) -> None: + """ + List bind-mounted folders + + Entries will be fetched from the `MOUNTS` project setting. + """ + config = tutor_config.load(context.root) + mounts = [] + for mount_name in bindmount.get_mounts(config): + build_mounts = [ + {"image": image_name, "context": stage_name} + for image_name, stage_name in hooks.Filters.IMAGES_BUILD_MOUNTS.iterate( + mount_name + ) + ] + compose_mounts = [ + { + "service": service, + "container_path": container_path, + } + for service, _host_path, container_path in bindmount.parse_mount(mount_name) + ] + mounts.append( + { + "name": mount_name, + "build_mounts": build_mounts, + "compose_mounts": compose_mounts, + } + ) + fmt.echo(yaml.dump(mounts, default_flow_style=False, sort_keys=False)) + + +@click.command(name="add") +@click.argument("mounts", metavar="mount", type=click.Path(), nargs=-1) +@click.pass_context +def mounts_add(context: click.Context, mounts: list[str]) -> None: + """ + Add a bind-mounted folder + + The bind-mounted folder will be added to the project configuration, in the ``MOUNTS`` + setting. + + Values passed to this command can take one of two forms. The first is explicit:: + + tutor mounts add myservice:/host/path:/container/path + + The second is implicit:: + + tutor mounts add /host/path + + With the explicit form, the value means "bind-mount the host folder /host/path to + /container/path in the "myservice" container at run time". + + With the implicit form, plugins are in charge of automatically detecting in which + containers and locations the /host/path folder should be bind-mounted. In this case, + folders can be bind-mounted at build-time -- which cannot be achieved with the + explicit form. + """ + new_mounts = [] + for mount in mounts: + if not bindmount.parse_explicit_mount(mount): + # Path is implicit: check that this path is valid + # (we don't try to validate explicit mounts) + mount = os.path.abspath(os.path.expanduser(mount)) + if not os.path.exists(mount): + raise exceptions.TutorError(f"Path {mount} does not exist on the host") + new_mounts.append(mount) + fmt.echo_info(f"Adding bind-mount: {mount}") + + context.invoke(config_save, append_vars=[("MOUNTS", mount) for mount in new_mounts]) + + +@click.command(name="remove") +@click.argument("mounts", metavar="mount", type=MountParamType(), nargs=-1) +@click.pass_context +def mounts_remove(context: click.Context, mounts: list[str]) -> None: + """ + Remove a bind-mounted folder + + The bind-mounted folder will be removed from the ``MOUNTS`` project setting. + """ + removed_mounts = [] + for mount in mounts: + if not bindmount.parse_explicit_mount(mount): + # Path is implicit: expand it + mount = os.path.abspath(os.path.expanduser(mount)) + removed_mounts.append(mount) + fmt.echo_info(f"Removing bind-mount: {mount}") + + context.invoke( + config_save, remove_vars=[("MOUNTS", mount) for mount in removed_mounts] + ) + + +mounts_command.add_command(mounts_list) +mounts_command.add_command(mounts_add) +mounts_command.add_command(mounts_remove) diff --git a/tutor/commands/params.py b/tutor/commands/params.py new file mode 100644 index 0000000000..a9e0e6be22 --- /dev/null +++ b/tutor/commands/params.py @@ -0,0 +1,29 @@ +import typing as t + +import click + +from tutor import config as tutor_config +from tutor import hooks +from tutor.types import Config + + +class ConfigLoaderParam(click.ParamType): + """ + Convenient param child class that automatically loads the user configuration on auto-complete. + """ + + def __init__(self) -> None: + self.root = None + self._config: t.Optional[Config] = None + + @hooks.Actions.PROJECT_ROOT_READY.add() + def _on_root_ready(root: str) -> None: + self.root = root + + @property + def config(self) -> Config: + if self.root is None: + return {} + if self._config is None: + self._config = tutor_config.load_full(self.root) + return self._config diff --git a/tutor/commands/plugins.py b/tutor/commands/plugins.py index 4a356a7a9f..4908c93bf2 100644 --- a/tutor/commands/plugins.py +++ b/tutor/commands/plugins.py @@ -1,145 +1,420 @@ +from __future__ import annotations + import os -import shutil -import urllib.request +import tempfile +import typing as t import click +import click.shell_completion + +from tutor import config as tutor_config +from tutor import exceptions, fmt, hooks, plugins, utils +from tutor.commands.config import save as config_save_command +from tutor.plugins import indexes +from tutor.plugins.base import PLUGINS_ROOT, PLUGINS_ROOT_ENV_VAR_NAME +from tutor.types import Config + +from .context import Context + + +class PluginName(click.ParamType): + """ + Convenient param type that supports autocompletion of installed plugin names. + """ + + def __init__(self, allow_all: bool = False): + self.allow_all = allow_all + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> list[click.shell_completion.CompletionItem]: + return [ + click.shell_completion.CompletionItem(name) + for name in self.get_names(incomplete) + ] + + def get_names(self, incomplete: str) -> list[str]: + candidates = [] + if self.allow_all: + candidates.append("all") + candidates += [name for name, _ in plugins.iter_info()] + + return [name for name in candidates if name.startswith(incomplete)] + + +class IndexPluginName(click.ParamType): + """ + Param type for auto-completion of plugin names found in index cache. + """ + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> t.List[click.shell_completion.CompletionItem]: + return [ + click.shell_completion.CompletionItem(entry.name) + for entry in indexes.iter_cache_entries() + if entry.name.startswith(incomplete.lower()) + ] -from .. import config as tutor_config -from .. import env as tutor_env -from .. import exceptions -from .. import fmt -from .. import plugins + +class IndexPluginNameOrLocation(IndexPluginName): + """ + Same as IndexPluginName but also auto-completes file location. + """ + + def shell_complete( + self, ctx: click.Context, param: click.Parameter, incomplete: str + ) -> t.List[click.shell_completion.CompletionItem]: + # Auto-complete plugin names + autocompleted = super().shell_complete(ctx, param, incomplete) + # Auto-complete local paths + autocompleted += click.Path().shell_complete(ctx, param, incomplete) + return autocompleted @click.group( name="plugins", short_help="Manage Tutor plugins", - help="Manage Tutor plugins to add new features and customize your Open edX platform", ) -def plugins_command(): +def plugins_command() -> None: """ - All plugin commands should work even if there is no existing config file. This is - because users might enable plugins prior to configuration or environment generation. + Manage Tutor plugins to add new features and customise your Open edX platform. + + Plugins can be downloaded from local and remote indexes. See the `tutor plugins + index` subcommand. + + After the plugin index cache has been updated, plugins can be searched with: + + tutor plugins search + + Plugins are installed with: + + tutor plugins install """ + # All plugin commands should work even if there is no existing config file. This is + # because users might enable or install plugins prior to configuration or + # environment generation. + # Thus, usage of `config.load_full` is prohibited. -@click.command(name="list", help="List installed plugins") -@click.pass_obj -def list_command(context): - config = tutor_config.load_user(context.root) - for plugin in plugins.iter_installed(): - status = "" if plugins.is_enabled(config, plugin.name) else " (disabled)" - print( - "{plugin}=={version}{status}".format( - plugin=plugin.name, status=status, version=plugin.version +@click.command( + short_help="Print the location of file-based plugins", + help=f"""Print the location of yaml-based plugins: both python v1 and yaml v0 plugins. This location can be manually +defined by setting the {PLUGINS_ROOT_ENV_VAR_NAME} environment variable""", +) +def printroot() -> None: + fmt.echo(PLUGINS_ROOT) + + +@click.command(name="list") +@click.option( + "-e", + "--enabled", + "show_enabled_only", + is_flag=True, + help="Display enabled plugins only", +) +def list_command(show_enabled_only: bool) -> None: + """ + List installed plugins. + """ + plugins_table: list[tuple[str, ...]] = [("NAME", "STATUS", "VERSION")] + for plugin, plugin_info in plugins.iter_info(): + is_enabled = plugins.is_loaded(plugin) + if is_enabled or not show_enabled_only: + plugins_table.append( + ( + plugin, + plugin_status(plugin), + (plugin_info or "").replace("\n", " "), + ) ) - ) + fmt.echo(utils.format_table(plugins_table)) @click.command(help="Enable a plugin") -@click.argument("plugin_names", metavar="plugin", nargs=-1) -@click.pass_obj -def enable(context, plugin_names): - config = tutor_config.load_user(context.root) +@click.argument("plugin_names", metavar="plugin", nargs=-1, type=PluginName()) +@click.pass_context +def enable(context: click.Context, plugin_names: list[str]) -> None: + config = tutor_config.load_minimal(context.obj.root) for plugin in plugin_names: - plugins.enable(config, plugin) - fmt.echo_info("Plugin {} enabled".format(plugin)) - tutor_config.save_config_file(context.root, config) - fmt.echo_info( - "You should now re-generate your environment with `tutor config save`." - ) + plugins.load(plugin) + fmt.echo_info(f"Plugin {plugin} enabled") + tutor_config.save_enabled_plugins(config) + tutor_config.save_config_file(context.obj.root, config) + context.invoke(config_save_command, env_only=True) @click.command( short_help="Disable a plugin", help="Disable one or more plugins. Specify 'all' to disable all enabled plugins at once.", ) -@click.argument("plugin_names", metavar="plugin", nargs=-1) +@click.argument( + "plugin_names", metavar="plugin", nargs=-1, type=PluginName(allow_all=True) +) +@click.pass_context +def disable(context: click.Context, plugin_names: list[str]) -> None: + config = tutor_config.load_minimal(context.obj.root) + disable_all = "all" in plugin_names + disabled: list[str] = [] + for plugin in tutor_config.get_enabled_plugins(config): + if disable_all or plugin in plugin_names: + fmt.echo_info(f"Disabling plugin {plugin}...") + hooks.Actions.PLUGIN_UNLOADED.do(plugin, context.obj.root, config) + disabled.append(plugin) + fmt.echo_info(f"Plugin {plugin} disabled") + if disabled: + tutor_config.save_config_file(context.obj.root, config) + context.invoke(config_save_command, env_only=True) + + +@click.command(name="update") @click.pass_obj -def disable(context, plugin_names): - config = tutor_config.load_user(context.root) - if "all" in plugin_names: - plugin_names = [plugin.name for plugin in plugins.iter_enabled(config)] - for plugin_name in plugin_names: - plugins.disable(config, plugin_name) - delete_plugin(context.root, plugin_name) - - tutor_config.save_config_file(context.root, config) - fmt.echo_info( - "You should now re-generate your environment with `tutor config save`." - ) +def update(context: Context) -> None: + """ + Update the list of available plugins. + """ + config = tutor_config.load(context.root) + update_indexes(config) -def delete_plugin(root, name): - plugin_dir = tutor_env.pathjoin(root, "plugins", name) - if os.path.exists(plugin_dir): +def update_indexes(config: Config) -> None: + all_plugins = indexes.fetch(config) + cache_path = indexes.save_cache(all_plugins) + fmt.echo_info(f"Plugin index local cache: {cache_path}") + + +@click.command() +@click.argument("names", metavar="name", type=IndexPluginNameOrLocation(), nargs=-1) +def install(names: list[str]) -> None: + """ + Install one or more plugins. + + Each plugin name can be one of: + + 1. A plugin name from the plugin indexes (see `tutor plugins search`) + 2. A local file that will be copied to the plugins root + 3. An http(s) location that will be downloaded to the plugins root + + In cases 2. and 3., the plugin root corresponds to the path given by `tutor plugins + printroot`. + """ + find_and_install(names, []) + + +@click.command() +@click.argument("names", metavar="name", type=IndexPluginName(), nargs=-1) +def upgrade(names: list[str]) -> None: + """ + Upgrade one or more plugins. + + Specify "all" to upgrade all installed plugins. This command will only print a + warning for plugins which cannot be found. + """ + if "all" in names: + names = list(plugins.iter_installed()) + available_names = [] + for name in names: try: - shutil.rmtree(plugin_dir) - except PermissionError as e: - raise exceptions.TutorError( - "Could not delete file {} from plugin {} in folder {}".format( - e.filename, name, plugin_dir - ) + indexes.find_in_cache(name) + except exceptions.TutorError: + fmt.echo_error( + f"Failed to upgrade '{name}': plugin could not be found in indexes" ) + else: + available_names.append(name) + find_and_install(available_names, ["--upgrade"]) -@click.command( - short_help="Print the location of yaml-based plugins", - help="""Print the location of yaml-based plugins. This location can be manually -defined by setting the {} environment variable""".format( - plugins.DictPlugin.ROOT_ENV_VAR_NAME - ), -) -def printroot(): - fmt.echo(plugins.DictPlugin.ROOT) +def find_and_install(names: list[str], pip_install_opts: t.List[str]) -> None: + """ + Find and install a list of plugins, given by name. Single-file plugins are + downloaded/copied. Python packages are or pip-installed. + """ + single_file_plugins = [] + pip_requirements = [] + for name in names: + if utils.is_url(name): + single_file_plugins.append(name) + else: + plugin = indexes.find_in_cache(name) + src = hooks.Filters.PLUGIN_INDEX_ENTRY_TO_INSTALL.apply(plugin.data)[ + "src" + ].strip() + if utils.is_url(src): + single_file_plugins.append(src) + else: + # Create requirements file where each plugin reqs is prefixed by a + # comment with its name + pip_requirements.append(f"# {name}\n{src}") -@click.command( - short_help="Install a plugin", - help="""Install a plugin, either from a local YAML file or a remote, web-hosted -location. The plugin will be installed to {}.""".format( - plugins.DictPlugin.ROOT_ENV_VAR_NAME - ), -) -@click.argument("location") -def install(location): - basename = os.path.basename(location) - if not basename.endswith(".yml"): - basename += ".yml" - plugin_path = os.path.join(plugins.DictPlugin.ROOT, basename) - - if location.startswith("http"): - # Download file - response = urllib.request.urlopen(location) - content = response.read().decode() - elif os.path.isfile(location): - # Read file - with open(location) as f: - content = f.read() - else: - raise exceptions.TutorError("No plugin found at {}".format(location)) + for url in single_file_plugins: + install_single_file_plugin(url) + if pip_requirements: + # pip install -r reqs.txt + requirements_txt = "\n".join(pip_requirements) + with tempfile.NamedTemporaryFile( + prefix="tutor-reqs-", suffix=".txt", mode="w" + ) as tmp_reqs: + tmp_reqs.write(requirements_txt) + tmp_reqs.flush() + fmt.echo_info(f"Installing pip requirements:\n{requirements_txt}") + utils.execute( + "pip", "install", *pip_install_opts, "--requirement", tmp_reqs.name + ) + + +def install_single_file_plugin(location: str) -> None: + """ + Download or copy a single file to the plugins root. + """ + plugin_path = os.path.join(PLUGINS_ROOT, os.path.basename(location)) + if not plugin_path.endswith(".yml") and not plugin_path.endswith(".py"): + plugin_path += ".py" + # Read url + fmt.echo_info(f"Downloading plugin from {location}...") + content = utils.read_url(location) # Save file - if not os.path.exists(plugins.DictPlugin.ROOT): - os.makedirs(plugins.DictPlugin.ROOT) - with open(plugin_path, "w", newline="\n") as f: + utils.ensure_file_directory_exists(plugin_path) + with open(plugin_path, "w", newline="\n", encoding="utf-8") as f: f.write(content) - fmt.echo_info("Plugin installed at {}".format(plugin_path)) + fmt.echo_info(f"Plugin installed at {plugin_path}") + + +@click.command() +@click.argument("pattern", default="") +def search(pattern: str) -> None: + """ + Search in plugin descriptions. + """ + results: list[tuple[str, ...]] = [("NAME", "STATUS", "DESCRIPTION")] + for plugin in indexes.iter_cache_entries(): + if plugin.match(pattern): + results.append( + ( + plugin.name, + plugin_status(plugin.name), + plugin.short_description, + ) + ) + print(utils.format_table(results)) + + +@click.command() +@click.argument("name", type=IndexPluginName()) +def show(name: str) -> None: + """ + Show plugin details from index. + """ + name = name.lower() + for plugin in indexes.iter_cache_entries(): + if plugin.name == name: + fmt.echo( + f"""Name: {plugin.name} +Source: {plugin.src} +Status: {plugin_status(name)} +Author: {plugin.author} +Maintainer: {plugin.maintainer} +Homepage: {plugin.url} +Index: {plugin.index} +Description: {plugin.description}""" + ) + return + raise exceptions.TutorError( + f"No information available for plugin: '{name}'. Plugin could not be found in indexes." + ) + + +def plugin_status(name: str) -> str: + """ + Return the status of a plugin. Either: "enabled", "installed" or "not installed". + """ + if plugins.is_loaded(name): + return "βœ… enabled" + if plugins.is_installed(name): + return "installed" + return "not installed" + + +@click.group(name="index", short_help="Manage plugin indexes") +def index_command() -> None: + """ + Manage plugin indices. + + A plugin index is a list of Tutor plugins. An index can be public and shared with + the community, or private, for instance to share plugins with a select group of + users. Plugin indexes are a great way to share your plugins with other Tutor users. + By default, only the official plugin index is enabled. + + Plugin indexes are fetched by running: + + tutor plugins update + Plugin index cache is stored locally in the following subdirectory of the Tutor project environment: -def add_plugin_commands(command_group): + plugins/index/cache.yml """ - Add commands provided by all plugins to the given command group. Each command is - added with a name that is equal to the plugin name. + + +@click.command(name="list", help="List plugin indexes") +@click.pass_obj +def index_list(context: Context) -> None: + """ + Print plugin indexes. """ - for plugin in plugins.iter_installed(): - if isinstance(plugin.command, click.Command): - plugin.command.name = plugin.name - command_group.add_command(plugin.command) + config = tutor_config.load(context.root) + for index in indexes.get_all(config): + fmt.echo(index) +@click.command(name="add") +@click.argument("url", type=click.Path()) +@click.pass_obj +def index_add(context: Context, url: str) -> None: + """ + Add a plugin index. + + The index URL will be appended with '{version}/plugins.yml'. The index path can be + either an http(s) url or a local file path. + + For official indexes, there is no need to pass a full URL. Instead, use "main" or + "contrib". + """ + config = tutor_config.load_minimal(context.root) + if indexes.add(url, config): + tutor_config.save_config_file(context.root, config) + update_indexes(config) + else: + fmt.echo_alert("Plugin index was already added") + + +@click.command(name="remove") +@click.argument("url") +@click.pass_obj +def index_remove(context: Context, url: str) -> None: + """ + Remove a plugin index. + """ + config = tutor_config.load_minimal(context.root) + if indexes.remove(url, config): + tutor_config.save_config_file(context.root, config) + update_indexes(config) + else: + fmt.echo_alert("Plugin index not present") + + +index_command.add_command(index_add) +index_command.add_command(index_list) +index_command.add_command(index_remove) +plugins_command.add_command(index_command) plugins_command.add_command(list_command) +plugins_command.add_command(printroot) plugins_command.add_command(enable) plugins_command.add_command(disable) -plugins_command.add_command(printroot) +plugins_command.add_command(update) +plugins_command.add_command(search) plugins_command.add_command(install) +plugins_command.add_command(upgrade) +plugins_command.add_command(show) diff --git a/tutor/commands/ui.py b/tutor/commands/ui.py deleted file mode 100644 index c6ce17ec93..0000000000 --- a/tutor/commands/ui.py +++ /dev/null @@ -1,21 +0,0 @@ -import click -import click_repl - - -@click.command( - short_help="Interactive shell", - help="Launch an interactive shell for launching Tutor commands", -) -def ui(): - click.echo( - """Welcome to the Tutor interactive shell UI! -Type "help" to view all available commands. -Type "local quickstart" to configure and launch a new platform from scratch. -Type to exit.""" - ) - while True: - try: - click_repl.repl(click.get_current_context()) - return # this happens on a ctrl+d - except Exception: # pylint: disable=broad-except - pass diff --git a/tutor/commands/upgrade/__init__.py b/tutor/commands/upgrade/__init__.py new file mode 100644 index 0000000000..44592344a5 --- /dev/null +++ b/tutor/commands/upgrade/__init__.py @@ -0,0 +1,13 @@ +# Note: don't forget to change this when we upgrade from redwood +OPENEDX_RELEASE_NAMES = [ + "ironwood", + "juniper", + "koa", + "lilac", + "maple", + "nutmeg", + "olive", + "palm", + "quince", + "redwood", +] diff --git a/tutor/commands/upgrade/common.py b/tutor/commands/upgrade/common.py new file mode 100644 index 0000000000..783be6435d --- /dev/null +++ b/tutor/commands/upgrade/common.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +from typing import Optional + +import click +from packaging import version + +from tutor import config as tutor_config +from tutor import fmt, plugins +from tutor.types import Config, get_typed + + +def upgrade_from_lilac(config: Config) -> None: + if not plugins.is_installed("forum"): + fmt.echo_alert( + "The Open edX forum feature was moved to a separate plugin in Maple. To keep using this feature, " + "you must install and enable the tutor-forum plugin: https://github.com/overhangio/tutor-forum" + ) + elif not plugins.is_loaded("forum"): + fmt.echo_info( + "The Open edX forum feature was moved to a separate plugin in Maple. To keep using this feature, " + "we will now enable the 'forum' plugin. If you do not want to use this feature, you should disable the " + "plugin with: `tutor plugins disable forum`." + ) + plugins.load("forum") + tutor_config.save_enabled_plugins(config) + + if not plugins.is_installed("mfe"): + fmt.echo_alert( + "In Maple the legacy courseware is no longer supported. You need to install and enable the 'mfe' plugin " + "to make use of the new learning microfrontend: https://github.com/overhangio/tutor-mfe" + ) + elif not plugins.is_loaded("mfe"): + fmt.echo_info( + "In Maple the legacy courseware is no longer supported. To start using the new learning microfrontend, " + "we will now enable the 'mfe' plugin. If you do not want to use this feature, you should disable the " + "plugin with: `tutor plugins disable mfe`." + ) + plugins.load("mfe") + tutor_config.save_enabled_plugins(config) + + +def upgrade_from_nutmeg(context: click.Context, config: Config) -> None: + context.obj.job_runner(config).run_task( + "lms", "./manage.py lms compute_grades -v1 --all_courses" + ) + + +def get_mongo_upgrade_parameters( + docker_version: str, compatibility_version: str +) -> tuple[int, dict[str, int | str]]: + """ + Helper utility to get parameters required during mongo upgrade. + """ + mongo_version = int(docker_version.split(".")[0]) + admin_command: dict[str, int | str] = { + "setFeatureCompatibilityVersion": compatibility_version + } + if mongo_version == 7: + # Explicit confirmation is required to upgrade to 7 from 6 + # https://www.mongodb.com/docs/manual/reference/command/setFeatureCompatibilityVersion/#confirm + admin_command.update({"confirm": 1}) + return mongo_version, admin_command + + +def get_intermediate_mysql_upgrade(config: Config) -> Optional[str]: + """ + Checks if a MySQL upgrade is needed based on the Tutor version and MySQL setup. + + This method ensures that MySQL is running and determines if the upgrade + process should proceed based on the Tutor version. It is intended for upgrades + from Tutor version 15 to version 18 or later. Manual upgrade steps are not + required for versions 16 or 17. + + Returns: + Optional[str]: The docker image of MySQL to upgrade to or None if not applicable + """ + if not get_typed(config, "RUN_MYSQL", bool): + fmt.echo_info( + "You are not running MySQL (RUN_MYSQL=false). It is your " + "responsibility to upgrade your MySQL instance to v8.4. There is " + "nothing left to do to upgrade from Olive." + ) + return None + image_tag = get_typed(config, "DOCKER_IMAGE_MYSQL", str).split(":")[-1] + # If latest image, we assign a constant value to invalidate the condition + # as we know that the latest image will always be greater than 8.1.0 + target_version = ( + version.Version("8.1.1") if image_tag == "latest" else version.parse(image_tag) + ) + return "docker.io/mysql:8.1.0" if target_version > version.parse("8.1.0") else None + + +PALM_RENAME_ORA2_FOLDER_COMMAND = """ +if stat '/openedx/data/ora2/SET-ME-PLEASE (ex. bucket-name)' 2> /dev/null; then + echo "Renaming ora2 folder..." + mv '/openedx/data/ora2/SET-ME-PLEASE (ex. bucket-name)' /openedx/data/ora2/openedxuploads +fi +""" diff --git a/tutor/commands/upgrade/compose.py b/tutor/commands/upgrade/compose.py new file mode 100644 index 0000000000..4f8f7773d8 --- /dev/null +++ b/tutor/commands/upgrade/compose.py @@ -0,0 +1,261 @@ +from time import sleep + +import click + +from tutor import config as tutor_config +from tutor import env as tutor_env +from tutor import hooks +from tutor import fmt +from tutor.commands import compose, jobs +from tutor.types import Config + +from . import common as common_upgrade + + +def upgrade_from(context: click.Context, from_release: str) -> None: + # Make sure to bypass current version check + config = tutor_config.load_full(context.obj.root) + running_release = from_release + if running_release == "ironwood": + upgrade_from_ironwood(context, config) + running_release = "juniper" + + if running_release == "juniper": + upgrade_from_juniper(context, config) + running_release = "koa" + + if running_release == "koa": + upgrade_from_koa(context, config) + running_release = "lilac" + + if running_release == "lilac": + common_upgrade.upgrade_from_lilac(config) + running_release = "maple" + + if running_release == "maple": + upgrade_from_maple(context, config) + running_release = "nutmeg" + + if running_release == "nutmeg": + common_upgrade.upgrade_from_nutmeg(context, config) + running_release = "olive" + + if running_release == "olive": + upgrade_from_olive(context, config) + running_release = "palm" + + if running_release == "palm": + running_release = "quince" + + if running_release == "quince": + upgrade_from_quince(context, config) + running_release = "redwood" + + +def upgrade_from_ironwood(context: click.Context, config: Config) -> None: + click.echo(fmt.title("Upgrading from Ironwood")) + tutor_env.save(context.obj.root, config) + + click.echo(fmt.title("Stopping any existing platform")) + context.invoke(compose.stop) + + upgrade_mongodb(context, config, "3.4", "3.4") + upgrade_mongodb(context, config, "3.6", "3.6") + + +def upgrade_from_juniper(context: click.Context, config: Config) -> None: + click.echo(fmt.title("Upgrading from Juniper")) + tutor_env.save(context.obj.root, config) + + click.echo(fmt.title("Stopping any existing platform")) + context.invoke(compose.stop) + + if not config["RUN_MYSQL"]: + fmt.echo_info( + "You are not running MySQL (RUN_MYSQL=false). It is your " + "responsibility to upgrade your MySQL instance to v5.7. There is " + "nothing left to do to upgrade from Juniper." + ) + return + + click.echo(fmt.title("Upgrading MySQL from v5.6 to v5.7")) + context.invoke(compose.start, detach=True, services=["mysql"]) + context.invoke( + compose.execute, + args=[ + "mysql", + "bash", + "-e", + "-c", + f"mysql_upgrade -u {config['MYSQL_ROOT_USERNAME']} --password='{config['MYSQL_ROOT_PASSWORD']}'", + ], + ) + context.invoke(compose.stop) + + +def upgrade_from_koa(context: click.Context, config: Config) -> None: + click.echo(fmt.title("Upgrading from Koa")) + if not config["RUN_MONGODB"]: + fmt.echo_info( + "You are not running MongoDB (RUN_MONGODB=false). It is your " + "responsibility to upgrade your MongoDb instance to v4.0. There is " + "nothing left to do to upgrade from Koa to Lilac." + ) + return + upgrade_mongodb(context, config, "4.0.25", "4.0") + + +def upgrade_from_maple(context: click.Context, config: Config) -> None: + click.echo(fmt.title("Upgrading from Maple")) + # The environment needs to be updated because the management commands are from Nutmeg + tutor_env.save(context.obj.root, config) + # Command backpopulate_user_tours + context.invoke( + compose.run, + args=["lms", "sh", "-e", "-c", "./manage.py lms migrate user_tours"], + ) + context.invoke( + compose.run, + args=["lms", "sh", "-e", "-c", "./manage.py lms backpopulate_user_tours"], + ) + # Command backfill_course_tabs + context.invoke( + compose.run, + args=["cms", "sh", "-e", "-c", "./manage.py cms migrate contentstore"], + ) + context.invoke( + compose.run, + args=[ + "cms", + "sh", + "-e", + "-c", + "./manage.py cms migrate split_modulestore_django", + ], + ) + context.invoke( + compose.run, + args=["cms", "sh", "-e", "-c", "./manage.py cms backfill_course_tabs"], + ) + # Command simulate_publish + context.invoke( + compose.run, + args=["cms", "sh", "-e", "-c", "./manage.py cms migrate course_overviews"], + ) + context.invoke( + compose.run, + args=["cms", "sh", "-e", "-c", "./manage.py cms simulate_publish"], + ) + + +def upgrade_from_olive(context: click.Context, config: Config) -> None: + # Note that we need to exec because the ora2 folder is not bind-mounted in the job + # services. + context.invoke(compose.start, detach=True, services=["lms"]) + context.invoke( + compose.execute, + args=["lms", "sh", "-e", "-c", common_upgrade.PALM_RENAME_ORA2_FOLDER_COMMAND], + ) + upgrade_mongodb(context, config, "4.2.17", "4.2") + upgrade_mongodb(context, config, "4.4.22", "4.4") + + intermediate_mysql_docker_image = common_upgrade.get_intermediate_mysql_upgrade( + config + ) + if not intermediate_mysql_docker_image: + return + + click.echo(fmt.title(f"Upgrading MySQL to {intermediate_mysql_docker_image}")) + + # We start up a mysql-8.1 container to build data dictionary to preserve + # the upgrade order of 5.7 -> 8.1 -> 8.4 + # Use the mysql-8.1 context so that we can clear these filters later on + with hooks.Contexts.app("mysql-8.1").enter(): + hooks.Filters.ENV_PATCHES.add_items( + [ + ( + "local-docker-compose-services", + """ +mysql-8.1: + extends: mysql + image: docker.io/mysql:8.1.0 + command: > + mysqld + --character-set-server=utf8mb3 + --collation-server=utf8mb3_general_ci + --binlog-expire-logs-seconds=259200 + """, + ), + ( + "local-docker-compose-jobs-services", + """ +mysql-8.1-job: + image: docker.io/mysql:8.1.0 + depends_on: {{ [("mysql-8.1", RUN_MYSQL)]|list_if }} + """, + ), + ] + ) + hooks.Filters.CONFIG_DEFAULTS.add_item(("MYSQL_HOST", "mysql-8.1")) + + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ("mysql-8.1", tutor_env.read_core_template_file("jobs", "init", "mysql.sh")) + ) + + tutor_env.save(context.obj.root, config) + + # Run the init command to make sure MySQL is ready for connections + context.invoke(jobs.initialise, limit="mysql-8.1") + context.invoke(compose.stop, services=["mysql-8.1"]) + + # Clear the filters added for mysql-8.1 as we don't need them anymore + hooks.clear_all(context="app:mysql-8.1") + + # Save environment and run init for mysql 8.4 to make sure MySQL is ready + tutor_env.save(context.obj.root, config) + context.invoke(jobs.initialise, limit="mysql") + context.invoke(compose.stop, services=["mysql"]) + + +def upgrade_from_quince(context: click.Context, config: Config) -> None: + click.echo(fmt.title("Upgrading from Quince")) + upgrade_mongodb(context, config, "5.0.26", "5.0") + upgrade_mongodb(context, config, "6.0.14", "6.0") + upgrade_mongodb(context, config, "7.0.7", "7.0") + + +def upgrade_mongodb( + context: click.Context, + config: Config, + to_docker_version: str, + to_compatibility_version: str, +) -> None: + if not config["RUN_MONGODB"]: + fmt.echo_info( + f"You are not running MongoDB (RUN_MONGODB=false). It is your " + f"responsibility to upgrade your MongoDb instance to {to_docker_version}." + ) + return + + mongo_version, admin_command = common_upgrade.get_mongo_upgrade_parameters( + to_docker_version, to_compatibility_version + ) + click.echo(fmt.title(f"Upgrading MongoDb to v{to_docker_version}")) + + # Note that the DOCKER_IMAGE_MONGODB value is never saved, because we only save the + # environment, not the configuration. + config["DOCKER_IMAGE_MONGODB"] = f"mongo:{to_docker_version}" + tutor_env.save(context.obj.root, config) + context.invoke(compose.start, detach=True, services=["mongodb"]) + fmt.echo_info("Waiting for mongodb to boot...") + sleep(10) + context.invoke( + compose.execute, + args=[ + "mongodb", + "mongosh" if mongo_version >= 6 else "mongo", + "--eval", + f"db.adminCommand({admin_command})", + ], + ) + context.invoke(compose.stop) diff --git a/tutor/commands/upgrade/k8s.py b/tutor/commands/upgrade/k8s.py new file mode 100644 index 0000000000..e547bf4b80 --- /dev/null +++ b/tutor/commands/upgrade/k8s.py @@ -0,0 +1,337 @@ +import click + +from tutor import config as tutor_config +from tutor import env as tutor_env +from tutor import fmt, hooks +from tutor.commands import k8s +from tutor.commands.context import Context +from tutor.types import Config + +from . import common as common_upgrade + + +def upgrade_from(context: click.Context, from_release: str) -> None: + config = tutor_config.load(context.obj.root) + + running_release = from_release + if running_release == "ironwood": + upgrade_from_ironwood(config) + running_release = "juniper" + + if running_release == "juniper": + upgrade_from_juniper(config) + running_release = "koa" + + if running_release == "koa": + upgrade_from_koa(config) + running_release = "lilac" + + if running_release == "lilac": + upgrade_from_lilac(config) + running_release = "maple" + + if running_release == "maple": + upgrade_from_maple(context.obj, config) + running_release = "nutmeg" + + if running_release == "nutmeg": + common_upgrade.upgrade_from_nutmeg(context, config) + running_release = "olive" + + if running_release == "olive": + upgrade_from_olive(context, config) + running_release = "palm" + + if running_release == "palm": + running_release = "quince" + + if running_release == "quince": + upgrade_from_quince(config) + running_release = "redwood" + + +def upgrade_from_ironwood(config: Config) -> None: + upgrade_mongodb(config, "3.4.24", "3.4") + upgrade_mongodb(config, "3.6.18", "3.6") + + +def upgrade_from_juniper(config: Config) -> None: + if not config["RUN_MYSQL"]: + fmt.echo_info( + "You are not running MySQL (RUN_MYSQL=false). It is your " + "responsibility to upgrade your MySQL instance to v5.7. There is " + "nothing left to do to upgrade from Juniper." + ) + return + + message = """Automatic release upgrade is unsupported in Kubernetes. To upgrade from Juniper, you should upgrade +your MySQL database from v5.6 to v5.7. You should run something similar to: + + tutor k8s start + tutor k8s exec mysql bash -e -c "mysql_upgrade \ + -u $(tutor config printvalue MYSQL_ROOT_USERNAME) \ + --password='$(tutor config printvalue MYSQL_ROOT_PASSWORD)' +""" + fmt.echo_info(message) + + +def upgrade_from_koa(config: Config) -> None: + upgrade_mongodb(config, "4.0.25", "4.0") + + +def upgrade_from_lilac(config: Config) -> None: + common_upgrade.upgrade_from_lilac(config) + fmt.echo_info( + "All Kubernetes services and deployments need to be deleted during " + "upgrade from Lilac to Maple" + ) + k8s.delete_resources(config, resources=["deployments", "services"]) + + +def upgrade_from_maple(context: Context, config: Config) -> None: + fmt.echo_info("Upgrading from Maple") + # The environment needs to be updated because the backpopulate/backfill commands are from Nutmeg + tutor_env.save(context.root, config) + + if config["RUN_MYSQL"]: + # Start mysql + k8s.kubectl_apply( + context.root, + "--selector", + "app.kubernetes.io/name=mysql", + ) + k8s.wait_for_deployment_ready(config, "mysql") + + # lms upgrade + k8s.kubectl_apply( + context.root, + "--selector", + "app.kubernetes.io/name=lms", + ) + k8s.wait_for_deployment_ready(config, "lms") + + # Command backpopulate_user_tours + k8s.kubectl_exec( + config, "lms", ["sh", "-e", "-c", "./manage.py lms migrate user_tours"] + ) + k8s.kubectl_exec( + config, "lms", ["sh", "-e", "-c", "./manage.py lms backpopulate_user_tours"] + ) + + # cms upgrade + k8s.kubectl_apply( + context.root, + "--selector", + "app.kubernetes.io/name=cms", + ) + k8s.wait_for_deployment_ready(config, "cms") + + # Command backfill_course_tabs + k8s.kubectl_exec( + config, "cms", ["sh", "-e", "-c", "./manage.py cms migrate contentstore"] + ) + k8s.kubectl_exec( + config, + "cms", + ["sh", "-e", "-c", "./manage.py cms migrate split_modulestore_django"], + ) + k8s.kubectl_exec( + config, "cms", ["sh", "-e", "-c", "./manage.py cms backfill_course_tabs"] + ) + + # Command simulate_publish + k8s.kubectl_exec( + config, "cms", ["sh", "-e", "-c", "./manage.py cms migrate course_overviews"] + ) + k8s.kubectl_exec( + config, "cms", ["sh", "-e", "-c", "./manage.py cms simulate_publish"] + ) + + +def upgrade_from_olive(context: click.Context, config: Config) -> None: + # Note that we need to exec because the ora2 folder is not bind-mounted in the job + # services. + k8s.kubectl_apply( + context.obj.root, + "--selector", + "app.kubernetes.io/name=lms", + ) + k8s.wait_for_deployment_ready(config, "lms") + k8s.kubectl_exec( + config, + "lms", + ["sh", "-e", "-c", common_upgrade.PALM_RENAME_ORA2_FOLDER_COMMAND], + ) + upgrade_mongodb(config, "4.2.17", "4.2") + upgrade_mongodb(config, "4.4.22", "4.4") + + intermediate_mysql_docker_image = common_upgrade.get_intermediate_mysql_upgrade( + config + ) + if not intermediate_mysql_docker_image: + return + + click.echo(fmt.title(f"Upgrading MySQL to {intermediate_mysql_docker_image}")) + + # We start up a mysql-8.1 container to build data dictionary to preserve + # the upgrade order of 5.7 -> 8.1 -> 8.4 + # Use the mysql-8.1 context so that we can clear these filters later on + with hooks.Contexts.app("mysql-8.1").enter(): + hooks.Filters.ENV_PATCHES.add_items( + [ + ( + "k8s-deployments", + """ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mysql-81 + labels: + app.kubernetes.io/name: mysql-81 +spec: + selector: + matchLabels: + app.kubernetes.io/name: mysql-81 + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: mysql-81 + spec: + securityContext: + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 + fsGroupChangePolicy: "OnRootMismatch" + containers: + - name: mysql-81 + image: docker.io/mysql:8.1.0 + args: + - "mysqld" + - "--character-set-server=utf8mb3" + - "--collation-server=utf8mb3_general_ci" + - "--binlog-expire-logs-seconds=259200" + env: + - name: MYSQL_ROOT_PASSWORD + value: "{{ MYSQL_ROOT_PASSWORD }}" + ports: + - containerPort: 3306 + volumeMounts: + - mountPath: /var/lib/mysql + name: data + securityContext: + allowPrivilegeEscalation: false + volumes: + - name: data + persistentVolumeClaim: + claimName: mysql + """, + ), + ( + "k8s-jobs", + """ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: mysql-81-job + labels: + app.kubernetes.io/component: job +spec: + template: + spec: + restartPolicy: Never + containers: + - name: mysql-81 + image: docker.io/mysql:8.1.0 + """, + ), + ] + ) + hooks.Filters.ENV_PATCHES.add_item( + ( + "k8s-services", + """ +--- +apiVersion: v1 +kind: Service +metadata: + name: mysql-81 + labels: + app.kubernetes.io/name: mysql-81 +spec: + type: ClusterIP + ports: + - port: 3306 + protocol: TCP + selector: + app.kubernetes.io/name: mysql-81 + """, + ) + ) + hooks.Filters.CONFIG_DEFAULTS.add_item(("MYSQL_HOST", "mysql-81")) + + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ("mysql-81", tutor_env.read_core_template_file("jobs", "init", "mysql.sh")) + ) + + tutor_env.save(context.obj.root, config) + + # Run the init command to make sure MySQL is ready for connections + k8s.kubectl_apply( + context.obj.root, + "--selector", + "app.kubernetes.io/name=mysql-81", + ) + k8s.wait_for_deployment_ready(config, "mysql-81") + context.invoke(k8s.do.commands["init"], limit="mysql-8.1") + context.invoke(k8s.stop, names=["mysql-81"]) + + # Clear the filters added for mysql-8.1 as we don't need them anymore + hooks.clear_all(context="app:mysql-8.1") + + # Save environment and run init for mysql 8.4 to make sure MySQL is ready + tutor_env.save(context.obj.root, config) + k8s.kubectl_apply( + context.obj.root, + "--selector", + "app.kubernetes.io/name=mysql", + ) + k8s.wait_for_deployment_ready(config, "mysql") + context.invoke(k8s.do.commands["init"], limit="mysql") + context.invoke(k8s.stop, names=["mysql"]) + + +def upgrade_from_quince(config: Config) -> None: + click.echo(fmt.title("Upgrading from Quince")) + upgrade_mongodb(config, "5.0.26", "5.0") + upgrade_mongodb(config, "6.0.14", "6.0") + upgrade_mongodb(config, "7.0.7", "7.0") + + +def upgrade_mongodb( + config: Config, to_docker_version: str, to_compatibility_version: str +) -> None: + if not config["RUN_MONGODB"]: + fmt.echo_info( + "You are not running MongoDB (RUN_MONGODB=false). It is your " + "responsibility to upgrade your MongoDb instance to {to_docker_version}." + ) + return + mongo_version, admin_command = common_upgrade.get_mongo_upgrade_parameters( + to_docker_version, to_compatibility_version + ) + mongo_binary = "mongosh" if mongo_version >= 6 else "mongo" + + message = f"""Automatic release upgrade is unsupported in Kubernetes. You should manually upgrade +your MongoDb cluster to {to_docker_version} by running something similar to: + + tutor k8s stop + tutor config save --set DOCKER_IMAGE_MONGODB=mongo:{to_docker_version} + tutor k8s start + tutor k8s exec mongodb {mongo_binary} --eval 'db.adminCommand({admin_command})' + tutor config save --unset DOCKER_IMAGE_MONGODB + """ + fmt.echo_info(message) diff --git a/tutor/commands/webui.py b/tutor/commands/webui.py deleted file mode 100644 index 896910eb2c..0000000000 --- a/tutor/commands/webui.py +++ /dev/null @@ -1,152 +0,0 @@ -import io -import os -import platform -import subprocess -import sys -import tarfile -from urllib.request import urlopen - -import click - -# Note: it is important that this module does not depend on config, such that -# the web ui can be launched even where there is no configuration. -from .. import fmt -from .. import env as tutor_env -from .. import serialize - - -@click.group( - short_help="Web user interface", help="""Run Tutor commands from a web terminal""" -) -def webui(): - pass - - -@click.command(help="Start the web UI") -@click.option( - "-p", - "--port", - default=3737, - type=int, - show_default=True, - help="Port number to listen", -) -@click.option( - "-h", "--host", default="0.0.0.0", show_default=True, help="Host address to listen" -) -@click.pass_obj -def start(context, port, host): - check_gotty_binary(context.root) - fmt.echo_info("Access the Tutor web UI at http://{}:{}".format(host, port)) - while True: - config = load_config(context.root) - user = config["user"] - password = config["password"] - command = [ - gotty_path(context.root), - "--permit-write", - "--address", - host, - "--port", - str(port), - "--title-format", - "Tutor web UI - {{ .Command }} ({{ .Hostname }})", - ] - if user and password: - credential = "{}:{}".format(user, password) - command += ["--credential", credential] - else: - fmt.echo_alert( - "Running web UI without user authentication. Run 'tutor webui configure' to setup authentication" - ) - command += [sys.argv[0], "ui"] - p = subprocess.Popen(command) - while True: - try: - p.wait(timeout=2) - except subprocess.TimeoutExpired: - new_config = load_config(context.root) - if new_config != config: - click.echo( - "WARNING configuration changed. Tutor web UI is now going to restart. Reload this page to continue." - ) - p.kill() - p.wait() - break - - -@click.command(help="Configure authentication") -@click.option("-u", "--user", prompt="User name", help="Authentication user name") -@click.option( - "-p", - "--password", - prompt=True, - hide_input=True, - confirmation_prompt=True, - help="Authentication password", -) -@click.pass_obj -def configure(context, user, password): - save_webui_config_file(context.root, {"user": user, "password": password}) - fmt.echo_info( - "The web UI configuration has been updated. " - "If at any point you wish to reset your username and password, " - "just delete the following file:\n\n {}".format(config_path(context.root)) - ) - - -def check_gotty_binary(root): - path = gotty_path(root) - if os.path.exists(path): - return - fmt.echo_info("Downloading gotty to {}...".format(path)) - - # Generate release url - # Note: I don't know how to handle arm - architecture = "amd64" if platform.architecture()[0] == "64bit" else "386" - url = "https://github.com/yudai/gotty/releases/download/v1.0.1/gotty_{system}_{architecture}.tar.gz".format( - system=platform.system().lower(), architecture=architecture - ) - - # Download - response = urlopen(url) - - # Decompress - dirname = os.path.dirname(path) - if not os.path.exists(dirname): - os.makedirs(dirname) - compressed = tarfile.open(fileobj=io.BytesIO(response.read())) - compressed.extract("./gotty", dirname) - - -def load_config(root): - path = config_path(root) - if not os.path.exists(path): - save_webui_config_file(root, {"user": None, "password": None}) - with open(config_path(root)) as f: - return serialize.load(f) - - -def save_webui_config_file(root, config): - path = config_path(root) - directory = os.path.dirname(path) - if not os.path.exists(directory): - os.makedirs(directory) - with open(path, "w") as of: - serialize.dump(config, of) - - -def gotty_path(root): - return get_path(root, "gotty") - - -def config_path(root): - return get_path(root, "config.yml") - - -def get_path(root, filename): - return tutor_env.pathjoin(root, "webui", filename) - - -webui.add_command(start) -webui.add_command(configure) diff --git a/tutor/config.py b/tutor/config.py index a0f98f0889..531056792c 100644 --- a/tutor/config.py +++ b/tutor/config.py @@ -1,143 +1,209 @@ +from __future__ import annotations + import os +import typing as t +from copy import deepcopy + +from tutor import env, exceptions, fmt, hooks, plugins, serialize, utils +from tutor.types import Config, ConfigValue, cast_config, get_typed -from . import exceptions -from . import env -from . import fmt -from . import plugins -from . import serialize -from . import utils +CONFIG_FILENAME = "config.yml" -def update(root): +def load(root: str) -> Config: """ - Load and save the configuration. + Load full configuration. + + This will raise an exception if there is no current configuration in the + project root. A warning will also be printed if the version from disk + differs from the package version. """ - config, defaults = load_all(root) - save_config_file(root, config) - merge(config, defaults) - return config + if not os.path.exists(config_path(root)): + raise exceptions.TutorError( + "Project root does not exist. Make sure to generate the initial " + "configuration with `tutor config save --interactive` or `tutor local " + "launch` prior to running other commands." + ) + env.check_is_up_to_date(root) + return load_full(root) -def load(root): +def load_defaults() -> Config: """ - Load full configuration. This will raise an exception if there is no current - configuration in the project root. + Load default configuration. """ - check_existing_config(root) - return load_no_check(root) + config: Config = {} + update_with_defaults(config) + return config -def load_no_check(root): - config, defaults = load_all(root) - merge(config, defaults) +def load_minimal(root: str) -> Config: + """ + Load a minimal configuration composed of the user and the base config. + + This configuration is not suitable for rendering templates, as it is incomplete. + """ + config = get_user(root) + update_with_base(config) + render_full(config) return config -def load_all(root): +def load_full(root: str) -> Config: """ + Load a full configuration, with user, base and defaults. + Return: current (dict): params currently saved in config.yml defaults (dict): default values of params which might be missing from the current config """ - defaults = load_defaults() - current = load_current(root, defaults) - return current, defaults + config = get_user(root) + update_with_base(config) + update_with_defaults(config) + render_full(config) + hooks.Actions.CONFIG_LOADED.do(deepcopy(config)) + return config -def merge(config, defaults, force=False): +def update_with_base(config: Config) -> None: """ - Merge default values with user configuration and perform rendering of "{{...}}" - values. + Add base configuration to the config object. + + Note that configuration entries are unrendered at this point. """ - for key, value in defaults.items(): - if force or key not in config: - config[key] = env.render_unknown(config, value) + base = get_base() + merge(config, base) -def load_defaults(): - return serialize.load(env.read_template_file("config.yml")) +def update_with_defaults(config: Config) -> None: + """ + Add default configuration to the config object. + + Note that configuration entries are unrendered at this point. + """ + defaults = get_defaults() + merge(config, defaults) -def load_config_file(path): - with open(path) as f: - return serialize.load(f.read()) +def update_with_env(config: Config) -> None: + """ + Override config values from environment variables. + """ + overrides = {} + for k in config.keys(): + env_var = "TUTOR_" + k + if env_var in os.environ: + overrides[k] = serialize.parse(os.environ[env_var]) + config.update(overrides) -def load_current(root, defaults): +def get_user(root: str) -> Config: """ - Load the configuration currently stored on disk. - Note: this modifies the defaults with the plugin default values. + Get the user configuration from the tutor root. + + Overrides from environment variables are loaded as well. """ convert_json2yml(root) - config = load_user(root) - load_env(config, defaults) - load_required(config, defaults) - load_plugins(config, defaults) + path = config_path(root) + config = {} + if os.path.exists(path): + config = get_yaml_file(path) + upgrade_obsolete(config) + update_with_env(config) + + for name, value in hooks.Filters.CONFIG_USER.iterate(): + config[name] = value return config -def load_user(root): - path = config_path(root) - if not os.path.exists(path): - return {} +def get_base() -> Config: + """ + Load the base configuration. - config = load_config_file(path) - upgrade_obsolete(config) - return config + Entries in this configuration are unrendered. + """ + base = get_template("base.yml") + extra_config: list[tuple[str, ConfigValue]] = [] + extra_config = hooks.Filters.CONFIG_UNIQUE.apply(extra_config) + extra_config = hooks.Filters.CONFIG_OVERRIDES.apply(extra_config) + for name, value in extra_config: + if name in base: + fmt.echo_alert( + f"Found conflicting values for setting '{name}': '{value}' or '{base[name]}'" + ) + base[name] = value + return base -def load_env(config, defaults): - for k in defaults.keys(): - env_var = "TUTOR_" + k - if env_var in os.environ: - config[k] = serialize.parse(os.environ[env_var]) +def get_defaults() -> Config: + """ + Get default configuration, including from plugins. + + Entries in this configuration are unrendered. + """ + defaults = dict(hooks.Filters.CONFIG_DEFAULTS.iterate()) + update_with_env(defaults) + return defaults + +@hooks.Filters.CONFIG_DEFAULTS.add(priority=hooks.priorities.HIGH) +def _load_config_defaults_yml( + items: list[tuple[str, t.Any]] +) -> list[tuple[str, t.Any]]: + defaults = get_template("defaults.yml") + items += list(defaults.items()) + return items -def load_required(config, defaults): + +def get_template(filename: str) -> Config: """ - All these keys must be present in the user's config.yml. This includes all values - that are generated once and must be kept after that, such as passwords. + Get one of the configuration templates. + + Entries in this configuration are unrendered. """ - for key in [ - "OPENEDX_SECRET_KEY", - "MYSQL_ROOT_PASSWORD", - "OPENEDX_MYSQL_PASSWORD", - "ANDROID_OAUTH2_SECRET", - "ID", - "JWT_RSA_PRIVATE_KEY", - ]: - if key not in config: - config[key] = env.render_unknown(config, defaults[key]) + config = serialize.load(env.read_core_template_file("config", filename)) + return cast_config(config) -def load_plugins(config, defaults): +def get_yaml_file(path: str) -> Config: """ - Add, override and set new defaults from plugins. + Load config from yaml file. """ - for plugin in plugins.iter_enabled(config): - # Add new config key/values - for key, value in plugin.config_add.items(): - new_key = plugin.config_key(key) - if new_key not in config: - config[new_key] = env.render_unknown(config, value) + with open(path, encoding="utf-8") as f: + config = serialize.load(f.read()) + return cast_config(config) + + +def merge(config: Config, base: Config) -> None: + """ + Merge base values with user configuration. Values are only added if not + already present. + + Note that this function does not perform the rendering step of the + configuration entries. + """ + for key, value in base.items(): + if key not in config: + config[key] = value + - # Create new defaults - for key, value in plugin.config_defaults.items(): - defaults[plugin.config_key(key)] = value +def render_full(config: Config) -> None: + """ + Fill and render an existing configuration with defaults. - # Set existing config key/values: here, we do not override existing values - # This must come last, as overridden values may depend on plugin defaults - for key, value in plugin.config_set.items(): - if key not in config: - config[key] = env.render_unknown(config, value) + It is generally necessary to apply this function before rendering templates, + otherwise configuration entries may not be rendered. + """ + for key, value in config.items(): + config[key] = env.render_unknown(config, value) -def is_service_activated(config, service): - return config["RUN_" + service.upper()] +def is_service_activated(config: Config, service: str) -> bool: + return config["RUN_" + service.upper()] is not False -def upgrade_obsolete(config): +def upgrade_obsolete(config: Config) -> None: # Openedx-specific mysql passwords if "MYSQL_PASSWORD" in config: config["MYSQL_ROOT_PASSWORD"] = config["MYSQL_PASSWORD"] @@ -149,11 +215,13 @@ def upgrade_obsolete(config): config["OPENEDX_MYSQL_USERNAME"] = config.pop("MYSQL_USERNAME") if "RUN_NOTES" in config: if config["RUN_NOTES"]: - plugins.enable(config, "notes") + plugins.load("notes") + save_enabled_plugins(config) config.pop("RUN_NOTES") if "RUN_XQUEUE" in config: if config["RUN_XQUEUE"]: - plugins.enable(config, "xqueue") + plugins.load("xqueue") + save_enabled_plugins(config) config.pop("RUN_XQUEUE") if "SECRET_KEY" in config: config["OPENEDX_SECRET_KEY"] = config.pop("SECRET_KEY") @@ -167,7 +235,6 @@ def upgrade_obsolete(config): for name in [ "ACTIVATE_LMS", "ACTIVATE_CMS", - "ACTIVATE_FORUM", "ACTIVATE_ELASTICSEARCH", "ACTIVATE_MONGODB", "ACTIVATE_MYSQL", @@ -176,9 +243,14 @@ def upgrade_obsolete(config): ]: if name in config: config[name.replace("ACTIVATE_", "RUN_")] = config.pop(name) + # Replace nginx by caddy + if "RUN_CADDY" in config: + config["ENABLE_WEB_PROXY"] = config.pop("RUN_CADDY") + if "NGINX_HTTP_PORT" in config: + config["CADDY_HTTP_PORT"] = config.pop("NGINX_HTTP_PORT") -def convert_json2yml(root): +def convert_json2yml(root: str) -> None: """ Older versions of tutor used to have json config files. """ @@ -187,38 +259,103 @@ def convert_json2yml(root): return if os.path.exists(config_path(root)): raise exceptions.TutorError( - "Both config.json and config.yml exist in {}: only one of these files must exist to continue".format( - root - ) + f"Both config.json and {CONFIG_FILENAME} exist in {root}: only one of these files must exist to continue" ) - config = load_config_file(json_path) + config = get_yaml_file(json_path) save_config_file(root, config) os.remove(json_path) fmt.echo_info( - "File config.json detected in {} and converted to config.yml".format(root) + f"File config.json detected in {root} and converted to {CONFIG_FILENAME}" ) -def save_config_file(root, config): +def save_config_file(root: str, config: Config) -> None: path = config_path(root) utils.ensure_file_directory_exists(path) - with open(path, "w") as of: + with open(path, "w", encoding="utf-8") as of: serialize.dump(config, of) - fmt.echo_info("Configuration saved to {}".format(path)) + fmt.echo_info(f"Configuration saved to {path}") + + +def config_path(root: str) -> str: + return os.path.join(root, CONFIG_FILENAME) + +# Key name under which plugins are listed +PLUGINS_CONFIG_KEY = "PLUGINS" -def check_existing_config(root): + +def enable_plugins(config: Config) -> None: """ - Check there is a configuration on disk and the current environment is up-to-date. + Enable all plugins listed in the configuration. """ - if not os.path.exists(config_path(root)): - raise exceptions.TutorError( - "Project root does not exist. Make sure to generate the initial " - "configuration with `tutor config save --interactive` or `tutor local " - "quickstart` prior to running other commands." - ) - env.check_is_up_to_date(root) + plugins.load_all(get_enabled_plugins(config)) + + +def get_enabled_plugins(config: Config) -> list[str]: + """ + Return the list of plugins that are enabled, as per the configuration. Note that + this may differ from the list of loaded plugins. For instance when a plugin is + present in the configuration but it's not installed. + """ + return get_typed(config, PLUGINS_CONFIG_KEY, list, []) + + +def save_enabled_plugins(config: Config) -> None: + """ + Save the list of enabled plugins. + + Plugins are deduplicated by name. + """ + config[PLUGINS_CONFIG_KEY] = list(plugins.iter_loaded()) -def config_path(root): - return os.path.join(root, "config.yml") +@hooks.Actions.PROJECT_ROOT_READY.add() +def _enable_plugins(root: str) -> None: + """ + Enable plugins that are listed in the user configuration. + """ + config = load_minimal(root) + enable_plugins(config) + + +# This is run with a very high priority such that it is called before the plugin hooks +# are actually cleared. +@hooks.Actions.PLUGIN_UNLOADED.add(priority=hooks.priorities.HIGH - 1) +def _remove_plugin_config_overrides_on_unload( + plugin: str, _root: str, config: Config +) -> None: + # Find the configuration entries that were overridden by the plugin and + # remove them from the current config + for key, _value in hooks.Filters.CONFIG_OVERRIDES.iterate_from_context( + hooks.Contexts.app(plugin).name + ): + value = config.pop(key, None) + value = env.render_unknown(config, value) + fmt.echo_info(f" config - removing entry: {key}={value}") + + +@hooks.Actions.PLUGIN_UNLOADED.add(priority=hooks.priorities.LOW) +def _update_enabled_plugins_on_unload(_plugin: str, _root: str, config: Config) -> None: + """ + Update the list of enabled plugins. + + Note that this action must be performed after the plugin has been unloaded, hence the low priority. + """ + save_enabled_plugins(config) + + +@hooks.Actions.CONFIG_LOADED.add() +def _check_preview_lms_host(config: Config) -> None: + """ + This will check if the PREVIEW_LMS_HOST is a subdomain of LMS_HOST. + if not, prints a warning to notify the user. + """ + + lms_host = get_typed(config, "LMS_HOST", str, "") + preview_lms_host = get_typed(config, "PREVIEW_LMS_HOST", str, "") + if not preview_lms_host.endswith("." + lms_host): + fmt.echo_alert( + f'Warning: PREVIEW_LMS_HOST="{preview_lms_host}" is not a subdomain of LMS_HOST="{lms_host}". ' + "This configuration is not typically recommended and may lead to unexpected behavior." + ) diff --git a/tutor/core/__init__.py b/tutor/core/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tutor/core/hooks/__init__.py b/tutor/core/hooks/__init__.py new file mode 100644 index 0000000000..fa1451a5bb --- /dev/null +++ b/tutor/core/hooks/__init__.py @@ -0,0 +1,13 @@ +import typing as t + +from .actions import Action +from .contexts import Context +from .filters import Filter + + +def clear_all(context: t.Optional[str] = None) -> None: + """ + Clear both actions and filters. + """ + Action.clear_all(context=context) + Filter.clear_all(context=context) diff --git a/tutor/core/hooks/actions.py b/tutor/core/hooks/actions.py new file mode 100644 index 0000000000..e081244c1d --- /dev/null +++ b/tutor/core/hooks/actions.py @@ -0,0 +1,171 @@ +from __future__ import annotations + +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +import sys +import typing as t +from weakref import WeakSet + +from typing_extensions import ParamSpec + +from . import priorities +from .contexts import Contextualized + +#: Action generic signature. +T = ParamSpec("T") + +ActionCallbackFunc = t.Callable[T, None] + + +class ActionCallback(Contextualized, t.Generic[T]): + def __init__( + self, + func: ActionCallbackFunc[T], + priority: t.Optional[int] = None, + ): + super().__init__() + self.func = func + self.priority = priority or priorities.DEFAULT + + def do( + self, + *args: T.args, + **kwargs: T.kwargs, + ) -> None: + self.func(*args, **kwargs) + + +class Action(t.Generic[T]): + """ + Action hooks have callbacks that are triggered independently from one another. + + Several actions are defined across the codebase. Each action is given a unique name. + To each action are associated zero or more callbacks, sorted by priority. + + This is the typical action lifecycle: + + 1. Create an action with ``Action()``. + 2. Add callbacks with :py:meth:`add`. + 3. Call the action callbacks with :py:meth:`do`. + + The ``T`` type parameter of the Action class corresponds to the expected signature of + the action callbacks. For instance, ``Action[[str, int]]`` means that the action + callbacks are expected to take two arguments: one string and one integer. + + This strong typing makes it easier for plugin developers to quickly check whether + they are adding and calling action callbacks correctly. + """ + + # Keep a weak reference to all created filters. This allows us to clear them when + # necessary. + INSTANCES: WeakSet[Action[t.Any]] = WeakSet() + + def __init__(self) -> None: + self.callbacks: list[ActionCallback[T]] = [] + self.INSTANCES.add(self) + + def add( + self, priority: t.Optional[int] = None + ) -> t.Callable[[ActionCallbackFunc[T]], ActionCallbackFunc[T]]: + """ + Decorator to add a callback to an action. + + :param priority: optional order in which the action callbacks are performed. Higher + values mean that they will be performed later. The default value is + ``priorities.DEFAULT`` (10). Actions that should be performed last should have a + priority of 100. + + Usage:: + + @my_action.add("my-action") + def do_stuff(my_arg): + ... + + The ``do_stuff`` callback function will be called on ``my_action.do(some_arg)``. + + The signature of each callback action function must match the signature of the + corresponding :py:meth:`do` method. Callback action functions are not supposed + to return any value. Returned values will be ignored. + """ + + def inner(func: ActionCallbackFunc[T]) -> ActionCallbackFunc[T]: + callback = ActionCallback(func, priority=priority) + priorities.insert_callback(callback, self.callbacks) + return func + + return inner + + def do( + self, + *args: T.args, + **kwargs: T.kwargs, + ) -> None: + """ + Run the action callbacks in sequence. + + :param name: name of the action for which callbacks will be run. + + Extra ``*args`` and ``*kwargs`` arguments will be passed as-is to + callback functions. + + Callbacks are executed in order of priority, then FIFO. There is no error + management here: a single exception will cause all following callbacks + not to be run and the exception will be bubbled up. + """ + self.do_from_context(None, *args, **kwargs) + + def do_from_context( + self, + context: t.Optional[str], + *args: T.args, + **kwargs: T.kwargs, + ) -> None: + """ + Same as :py:meth:`do` but only run the callbacks from a given context. + + :param name: name of the action for which callbacks will be run. + :param context: limit the set of callback actions to those that + were declared within a certain context (see + :py:func:`tutor.core.hooks.contexts.enter`). + """ + for callback in self.callbacks: + if callback.is_in_context(context): + try: + callback.do( + *args, + **kwargs, + ) + except: + sys.stderr.write( + f"Error applying action: func={callback.func} contexts={callback.contexts}'\n" + ) + raise + + def clear(self, context: t.Optional[str] = None) -> None: + """ + Clear all or part of the callbacks associated to an action + + :param name: name of the action callbacks to remove. + :param context: when defined, will clear only the actions that were + created within that context. + + Actions will be removed from the list of callbacks and will no longer be + run in :py:meth:`do` calls. + + This function should almost certainly never be called by plugins. It is + mostly useful to disable some plugins at runtime or in unit tests. + """ + self.callbacks = [ + callback + for callback in self.callbacks + if not callback.is_in_context(context) + ] + + @classmethod + def clear_all(cls, context: t.Optional[str] = None) -> None: + """ + Clear any previously defined action with the given context. + """ + for action in cls.INSTANCES: + action.clear(context) diff --git a/tutor/core/hooks/contexts.py b/tutor/core/hooks/contexts.py new file mode 100644 index 0000000000..940d2bac6c --- /dev/null +++ b/tutor/core/hooks/contexts.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +import typing as t +from contextlib import contextmanager + + +class Context: + """ + Contexts are used to track in which parts of the code filters and actions have been + declared. Let's look at an example:: + + from tutor.core.hooks import contexts + + with contexts.enter("c1"): + @filters.add("f1") + def add_stuff_to_filter(...): + ... + + The fact that our custom filter was added in a certain context allows us to later + remove it. To do so, we write:: + + from tutor import hooks + filters.clear("f1", context="c1") + + For instance, contexts make it easy to disable side-effects by plugins, provided + they were created with a specific context. + """ + + CURRENT: list[str] = [] + + def __init__(self, name: str): + self.name = name + + @contextmanager + def enter(self) -> t.Iterator[None]: + try: + Context.CURRENT.append(self.name) + yield + finally: + Context.CURRENT.pop() + + +class Contextualized: + """ + This is a simple class to store the current context in hooks. + + The current context is stored as a static variable. + """ + + def __init__(self) -> None: + self.contexts = Context.CURRENT[:] + + def is_in_context(self, context: t.Optional[str]) -> bool: + return context is None or context in self.contexts + + +def enter(name: str) -> t.ContextManager[None]: + """ + Identify created hooks with one or multiple context strings. + + :param name: name of the context that will be attached to hooks. + :rtype t.ContextManager[None]: + + Usage:: + + from tutor.core import hooks + + with hooks.contexts.enter("my-context"): + # declare new actions and filters + ... + + # Later on, actions and filters that were created within this context can be + # disabled with: + hooks.actions.clear_all(context="my-context") + hooks.filters.clear_all(context="my-context") + + This is a context manager that will attach a context name to all hook callbacks + created within its scope. The purpose of contexts is to solve an issue that + is inherent to pluggable hooks: it is difficult to track in which part of the + code each hook callback was created. This makes things hard to debug when a single + hook callback goes wrong. It also makes it impossible to disable some hook callbacks after + they have been created. + + We resolve this issue by storing the current contexts in a static list. + Whenever a hook is created, the list of current contexts is copied as a + ``contexts`` attribute. This attribute can be later examined, either for + removal or for limiting the set of hook callbacks that should be applied. + """ + return Context(name).enter() diff --git a/tutor/core/hooks/filters.py b/tutor/core/hooks/filters.py new file mode 100644 index 0000000000..ef42325dbd --- /dev/null +++ b/tutor/core/hooks/filters.py @@ -0,0 +1,264 @@ +from __future__ import annotations + +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +import sys +import typing as t +from weakref import WeakSet + +from typing_extensions import Concatenate, ParamSpec + +from . import contexts, priorities + +#: Filter generic return value, which is also the type of the first callback argument. +T1 = t.TypeVar("T1") +#: Filter generic signature for all arguments after the first one. +T2 = ParamSpec("T2") +#: Specialized typevar for list elements +L = t.TypeVar("L") + +FilterCallbackFunc = t.Callable[Concatenate[T1, T2], T1] + + +class FilterCallback(contexts.Contextualized, t.Generic[T1, T2]): + def __init__( + self, + func: FilterCallbackFunc[T1, T2], + priority: t.Optional[int] = None, + ): + super().__init__() + self.func = func + self.priority = priority or priorities.DEFAULT + + def apply(self, value: T1, *args: T2.args, **kwargs: T2.kwargs) -> T1: + return self.func(value, *args, **kwargs) + + +class Filter(t.Generic[T1, T2]): + """ + Filter hooks have callbacks that are triggered as a chain. + + Several filters are defined across the codebase. Each filters is given a unique + name. To each filter are associated zero or more callbacks, sorted by priority. + + This is the typical filter lifecycle: + + 1. Create a filter with ``Filter()``. + 2. Add callbacks with :py:meth:`add`. + 3. Call the filter callbacks with method :py:meth:`apply`. + + The result of each callback is passed as the first argument to the next one. Thus, + the type of the first argument must match the callback return type. + + The ``T1`` and ``T2`` type parameters of the Filter class correspond to the expected + signature of the filter callbacks. ``T1`` is the type of the first argument (and thus + the return value type as well) and ``T2`` is the signature of the other arguments. + + For instance, `Filter[str, [int]]` means that the filter callbacks are expected to + take two arguments: one string and one integer. Each callback must then return a + string. + + This strong typing makes it easier for plugin developers to quickly check whether + they are adding and calling filter callbacks correctly. + """ + + # Keep a weak reference to all created filters. This allows us to clear them when + # necessary. + INSTANCES: WeakSet[Filter[t.Any, t.Any]] = WeakSet() + + def __init__(self) -> None: + self.callbacks: list[FilterCallback[T1, T2]] = [] + self.INSTANCES.add(self) + + def add( + self, priority: t.Optional[int] = None + ) -> t.Callable[[FilterCallbackFunc[T1, T2]], FilterCallbackFunc[T1, T2]]: + """ + Decorator to add a filter callback. + + Callbacks are added by increasing priority. Highest priority score are called + last. + + :param int priority: optional order in which the filter callbacks are called. Higher + values mean that they will be performed later. The default value is + ``priorities.DEFAULT`` (10). Filters that must be called last should have a + priority of 100. + + The return value of each filter function callback will be passed as the first argument to the next one. + + Usage:: + + @my_filter.add() + def my_func(value, some_other_arg): + # Do something with `value` + ... + return value + + After filters have been created, the result of calling all filter callbacks is obtained by running: + + final_value = my_filter.apply(initial_value, some_other_argument_value) + """ + + def inner(func: FilterCallbackFunc[T1, T2]) -> FilterCallbackFunc[T1, T2]: + callback: FilterCallback[T1, T2] = FilterCallback(func, priority=priority) + priorities.insert_callback(callback, self.callbacks) + return func + + return inner + + def apply( + self, + value: T1, + *args: T2.args, + **kwargs: T2.kwargs, + ) -> T1: + """ + Apply all declared filters to a single value, passing along the additional arguments. + + The return value of every filter is passed as the first argument to the next callback. + + Usage:: + + results = filters.apply("my-filter", ["item0"]) + + :type value: object + :rtype: same as the type of ``value``. + """ + return self.apply_from_context(None, value, *args, **kwargs) + + def apply_from_context( + self, + context: t.Optional[str], + value: T1, + *args: T2.args, + **kwargs: T2.kwargs, + ) -> T1: + """ + Same as :py:meth:`apply` but only run the callbacks that were created in a given context. + + If ``context`` is None then it is ignored. + """ + for callback in self.callbacks: + if callback.is_in_context(context): + try: + value = callback.apply( + value, + *args, + **kwargs, + ) + except: + sys.stderr.write( + f"Error applying filter: func={callback.func} contexts={callback.contexts}'\n" + ) + raise + return value + + def clear(self, context: t.Optional[str] = None) -> None: + """ + Clear any previously defined filter with the given context. + """ + self.callbacks = [ + callback + for callback in self.callbacks + if not callback.is_in_context(context) + ] + + @classmethod + def clear_all(cls, context: t.Optional[str] = None) -> None: + """ + Clear any previously defined filter with the given context. + """ + for filtre in cls.INSTANCES: + filtre.clear(context) + + # The methods below are specific to filters which take lists as first arguments + def add_item( + self: "Filter[list[L], T2]", item: L, priority: t.Optional[int] = None + ) -> None: + """ + Convenience decorator to add a single item to a filter that returns a list of items. + + This method is only valid for filters that return list of items. + + :param object item: item that will be appended to the resulting list. + :param int priority: see :py:data:`Filter.add`. + + Usage:: + + my_filter.add_item("item1") + my_filter.add_item("item2") + + assert ["item1", "item2"] == my_filter.apply([]) + """ + self.add_items([item], priority=priority) + + def add_items( + self: "Filter[list[L], T2]", items: list[L], priority: t.Optional[int] = None + ) -> None: + """ + Convenience function to add multiple items to a filter that returns a list of items. + + This method is only valid for filters that return list of items. + + This is a similar method to :py:data:`Filter.add_item` except that it can be + used to add multiple items at the same time. If you find yourself calling + ``add_item`` multiple times on the same filter, then you probably want to use a + single call to ``add_items`` instead. + + :param list[object] items: items that will be appended to the resulting list. + :param int priority: optional priority. + + Usage:: + + my_filter.add_items(["item1", "item2"]) + my_filter.add_items(["item3", "item4"]) + + assert ["item1", "item2", "item3", "item4"] == my_filter.apply([]) + + The following are equivalent:: + + # Single call to add_items + my_filter.add_items(["item1", "item2"]) + + # Multiple calls to add_item + my_filter.add_item("item1") + my_filter.add_item("item2") + """ + + @self.add(priority=priority) + def callback( + values: list[L], /, *_args: T2.args, **_kwargs: T2.kwargs + ) -> list[L]: + return values + items + + def iterate( + self: "Filter[list[L], T2]", *args: T2.args, **kwargs: T2.kwargs + ) -> t.Iterator[L]: + """ + Convenient function to iterate over the results of a filter result list. + + This method is only valid for filters that return list of items. + + This pieces of code are equivalent:: + + for value in my_filter.apply([], *args, **kwargs): + ... + + for value in my_filter.iterate(*args, **kwargs): + ... + + :rtype iterator[T]: iterator over the list of items from the filter + """ + yield from self.iterate_from_context(None, *args, **kwargs) + + def iterate_from_context( + self: "Filter[list[L], T2]", + context: t.Optional[str], + *args: T2.args, + **kwargs: T2.kwargs, + ) -> t.Iterator[L]: + """ + Same as :py:func:`Filter.iterate` but apply only callbacks from a given context. + """ + yield from self.apply_from_context(context, [], *args, **kwargs) diff --git a/tutor/core/hooks/priorities.py b/tutor/core/hooks/priorities.py new file mode 100644 index 0000000000..ca2b6f5376 --- /dev/null +++ b/tutor/core/hooks/priorities.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +import typing as t + +from typing_extensions import Protocol + +#: High priority callbacks are triggered first. +HIGH = 5 +#: By default, all callbacks have the same priority and are processed in the order they +#: were added. +DEFAULT = 10 +#: Low-priority callbacks are called last. Add callbacks with this priority to override previous callbacks. To add callbacks with even lower priority, use ``LOW + somevalue`` (though such behaviour is not encouraged). +LOW = 50 + + +class PrioritizedCallback(Protocol): + priority: int + + +TPrioritized = t.TypeVar("TPrioritized", bound=PrioritizedCallback) + + +def insert_callback(callback: TPrioritized, callbacks: list[TPrioritized]) -> None: + # I wish we could use bisect.insort_right here but the `key=` parameter + # is unsupported in Python 3.9 + position = 0 + while ( + position < len(callbacks) and callbacks[position].priority <= callback.priority + ): + position += 1 + callbacks.insert(position, callback) diff --git a/tutor/env.py b/tutor/env.py index d0382d6e8b..fef5f2cbed 100644 --- a/tutor/env.py +++ b/tutor/env.py @@ -1,129 +1,192 @@ -import codecs -from copy import deepcopy +from __future__ import annotations + import os +import re +import shutil +import typing as t +from copy import deepcopy +import importlib_resources import jinja2 -import pkg_resources -from . import exceptions -from . import fmt -from . import plugins -from . import utils -from .__about__ import __version__ +from tutor import exceptions, fmt, hooks, plugins, utils +from tutor.__about__ import __app__, __version__ +from tutor.types import Config, ConfigValue - -TEMPLATES_ROOT = pkg_resources.resource_filename("tutor", "templates") +TEMPLATES_ROOT = str(importlib_resources.files("tutor") / "templates") VERSION_FILENAME = "version" -BIN_FILE_EXTENSIONS = [".ico", ".jpg", ".png", ".ttf", ".woff", ".woff2"] +BIN_FILE_EXTENSIONS = [ + ".ico", + ".jpg", + ".otf", + ".patch", + ".png", + ".ttf", + ".webp", + ".woff", + ".woff2", +] +JinjaFilter = t.Callable[..., t.Any] + + +def _prepare_environment() -> None: + """ + Prepare environment by adding core data to filters. + """ + # Core template targets + hooks.Filters.ENV_TEMPLATE_TARGETS.add_items( + [ + ("apps/", ""), + ("build/", ""), + ("dev/", ""), + ("k8s/", ""), + ("local/", ""), + (VERSION_FILENAME, ""), + ("kustomization.yml", ""), + ], + ) + # Template filters + hooks.Filters.ENV_TEMPLATE_FILTERS.add_items( + [ + ("common_domain", utils.common_domain), + ("encrypt", utils.encrypt), + ("list_if", utils.list_if), + ("long_to_base64", utils.long_to_base64), + ("random_string", utils.random_string), + ("reverse_host", utils.reverse_host), + ("rsa_import_key", utils.rsa_import_key), + ("rsa_private_key", utils.rsa_private_key), + ], + ) + # Template variables + hooks.Filters.ENV_TEMPLATE_VARIABLES.add_items( + [ + ("HOST_USER_ID", utils.get_user_id()), + ("TUTOR_APP", __app__.replace("-", "_")), + ("TUTOR_VERSION", __version__), + ("is_docker_rootless", utils.is_docker_rootless), + ], + ) + + +_prepare_environment() + + +class JinjaEnvironment(jinja2.Environment): + loader: jinja2.FileSystemLoader + + def __init__(self) -> None: + template_roots = hooks.Filters.ENV_TEMPLATE_ROOTS.apply([TEMPLATES_ROOT]) + loader = jinja2.FileSystemLoader(template_roots) + super().__init__(loader=loader, undefined=jinja2.StrictUndefined) + + def read_str(self, template_name: str) -> str: + return self.read_bytes(template_name).decode() + + def read_bytes(self, template_name: str) -> bytes: + with open(self.find_os_path(template_name), "rb") as f: + return f.read() + + def find_os_path(self, template_name: str) -> str: + path = template_name.replace("/", os.sep) + for templates_root in self.loader.searchpath: + full_path = os.path.join(templates_root, path) + if os.path.exists(full_path): + return full_path + raise ValueError("Template path does not exist") class Renderer: - @classmethod - def instance(cls, config): - # Load template roots: these are required to be able to use - # {% include .. %} directives - template_roots = [TEMPLATES_ROOT] - for plugin in plugins.iter_enabled(config): - if plugin.templates_root: - template_roots.append(plugin.templates_root) - - return cls(config, template_roots, ignore_folders=["partials"]) - - @classmethod - def reset(cls): - cls.INSTANCE = None - - def __init__(self, config, template_roots, ignore_folders=None): - self.config = deepcopy(config) - self.template_roots = template_roots - self.ignore_folders = ignore_folders or [] - self.ignore_folders.append(".git") - - # Create environment - environment = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_roots), - undefined=jinja2.StrictUndefined, - ) - environment.filters["common_domain"] = utils.common_domain - environment.filters["encrypt"] = utils.encrypt - environment.filters["list_if"] = utils.list_if - environment.filters["long_to_base64"] = utils.long_to_base64 - environment.filters["random_string"] = utils.random_string - environment.filters["reverse_host"] = utils.reverse_host - environment.filters["rsa_private_key"] = utils.rsa_private_key - environment.filters["walk_templates"] = self.walk_templates - environment.globals["patch"] = self.patch - environment.globals["rsa_import_key"] = utils.rsa_import_key - environment.globals["TUTOR_VERSION"] = __version__ - self.environment = environment - - def iter_templates_in(self, *prefix): + def __init__(self, config: t.Optional[Config] = None): + self.config = deepcopy(config or {}) + + # Create environment with extra filters and globals + self.environment = JinjaEnvironment() + + # Filters + plugin_filters = hooks.Filters.ENV_TEMPLATE_FILTERS.iterate() + for name, func in plugin_filters: + if name in self.environment.filters: + fmt.echo_alert(f"Found conflicting template filters named '{name}'") + self.environment.filters[name] = func + self.environment.filters["walk_templates"] = self.walk_templates + + # Globals + for name, value in hooks.Filters.ENV_TEMPLATE_VARIABLES.iterate(): + if name in self.environment.globals: + fmt.echo_alert(f"Found conflicting template variables named '{name}'") + self.environment.globals[name] = value + self.environment.globals["iter_values_named"] = self.iter_values_named + self.environment.globals["patch"] = self.patch + + def iter_templates_in(self, *prefix: str) -> t.Iterable[str]: """ The elements of `prefix` must contain only "/", and not os.sep. """ - prefix = "/".join(prefix) - for template in self.environment.loader.list_templates(): - if template.startswith(prefix) and self.is_part_of_env(template): - yield template + full_prefix = "/".join(prefix) + env_templates: list[str] = self.environment.loader.list_templates() + for template in env_templates: + if template.startswith(full_prefix): + # Exclude templates that match certain patterns + # Note that here we don't rely on the OS separator, as we are handling templates. + if is_rendered(template): + yield template + + def iter_values_named( + self, + prefix: t.Optional[str] = None, + suffix: t.Optional[str] = None, + allow_empty: bool = False, + ) -> t.Iterable[ConfigValue]: + """ + Iterate on all config values for which the name match the given pattern. - def walk_templates(self, subdir): + Note that here we only iterate on the values, not the key names. Empty + values (those that evaluate to boolean `false`) will not be yielded, unless + `allow_empty` is True. + """ + for var_name, value in self.config.items(): + if prefix is not None and not var_name.startswith(prefix): + continue + if suffix is not None and not var_name.endswith(suffix): + continue + if not allow_empty and not value: + continue + yield value + + def walk_templates(self, subdir: str) -> t.Iterable[str]: """ Iterate on the template files from `templates/`. Yield: path: template path relative to the template root """ - yield from self.iter_templates_in(subdir + "/") + yield from self.iter_templates_in(subdir) - def is_part_of_env(self, path): - """ - Determines whether a template should be rendered or not. Note that here we don't - rely on the OS separator, as we are handling templates - """ - parts = path.split("/") - basename = parts[-1] - is_excluded = False - is_excluded = ( - is_excluded or basename.startswith(".") or basename.endswith(".pyc") - ) - is_excluded = is_excluded or basename == "__pycache__" - for ignore_folder in self.ignore_folders: - is_excluded = is_excluded or ignore_folder in parts - return not is_excluded - - def find_os_path(self, template_name): - path = template_name.replace("/", os.sep) - for templates_root in self.template_roots: - full_path = os.path.join(templates_root, path) - if os.path.exists(full_path): - return full_path - raise ValueError("Template path does not exist") - - def patch(self, name, separator="\n", suffix=""): + def patch(self, name: str, separator: str = "\n", suffix: str = "") -> str: """ Render calls to {{ patch("...") }} in environment templates from plugin patches. """ patches = [] - for plugin, patch in plugins.iter_patches(self.config, name): - patch_template = self.environment.from_string(patch) + for patch in plugins.iter_patches(name): try: - patches.append(patch_template.render(**self.config)) - except jinja2.exceptions.UndefinedError as e: - raise exceptions.TutorError( - "Missing configuration value: {} in patch '{}' from plugin {}".format( - e.args[0], name, plugin - ) - ) + patches.append(self.render_str(patch)) + except exceptions.TutorError: + fmt.echo_error(f"Error rendering patch '{name}':\n{patch}") + raise rendered = separator.join(patches) if rendered: rendered += suffix return rendered - def render_str(self, text): - template = self.environment.from_string(text) + def render_str(self, text: str) -> str: + try: + template = self.environment.from_string(text) + except jinja2.exceptions.TemplateSyntaxError as e: + raise exceptions.TutorError(f"Template syntax error: {e.args[0]}") return self.__render(template) - def render_template(self, template_name): + def render_template(self, template_name: str) -> t.Union[str, bytes]: """ Render a template file. Return the corresponding string. If it's a binary file (as indicated by its path), return bytes. @@ -131,10 +194,8 @@ def render_template(self, template_name): The template_name *always* uses "/" separators, and is not os-dependent. Do not pass the result of os.path.join(...) to this function. """ - if is_binary_file(template_name): - # Don't try to render binary files - with open(self.find_os_path(template_name), "rb") as f: - return f.read() + if not hooks.Filters.IS_FILE_RENDERED.apply(True, template_name): + return self.environment.read_bytes(template_name) try: template = self.environment.get_template(template_name) @@ -151,128 +212,185 @@ def render_template(self, template_name): fmt.echo_error("Unknown error rendering template " + template_name) raise - def render_all_to(self, root, *prefix): + def render_all_to(self, dst: str, *prefix: str) -> None: """ `prefix` can be used to limit the templates to render. """ for template_name in self.iter_templates_in(*prefix): rendered = self.render_template(template_name) - dst = os.path.join(root, template_name.replace("/", os.sep)) - write_to(rendered, dst) + template_dst = os.path.join(dst, template_name.replace("/", os.sep)) + write_to(rendered, template_dst) - def __render(self, template): + def __render(self, template: jinja2.Template) -> str: try: return template.render(**self.config) except jinja2.exceptions.UndefinedError as e: - raise exceptions.TutorError( - "Missing configuration value: {}".format(e.args[0]) - ) + raise exceptions.TutorError(f"Missing configuration value: {e.args[0]}") + + +class PatchRenderer(Renderer): + """ + Render patches for print it. + """ + + def __init__(self, config: t.Optional[Config] = None): + self.patches_locations: t.Dict[str, t.List[str]] = {} + self.current_template: str = "" + super().__init__(config) + + def render_template(self, template_name: str) -> t.Union[str, bytes]: + """ + Set the current template and render template from Renderer. + """ + self.current_template = template_name + return super().render_template(self.current_template) + + def patch(self, name: str, separator: str = "\n", suffix: str = "") -> str: + """ + Set the patches locations and render calls to {{ patch("...") }} from Renderer. + """ + if not self.patches_locations.get(name): + self.patches_locations.update({name: [self.current_template]}) + else: + if self.current_template not in self.patches_locations[name]: + self.patches_locations[name].append(self.current_template) + + # Store the template's name, and replace it with the name of this patch. + # This handles the case where patches themselves include patches. + original_template = self.current_template + self.current_template = f"within patch: {name}" + + rendered_patch = super().patch(name, separator=separator, suffix=suffix) + self.current_template = ( + original_template # Restore the template's name from before. + ) + return rendered_patch + + def render_all(self, *prefix: str) -> None: + """ + Render all templates. + """ + for template_name in self.iter_templates_in(*prefix): + self.render_template(template_name) + + def print_patches_locations(self) -> None: + """ + Print patches locations. + """ + plugins_table: list[tuple[str, ...]] = [("PATCH", "LOCATIONS")] + self.render_all() + for patch, locations in sorted(self.patches_locations.items()): + n_locations = 0 + for location in locations: + if n_locations < 1: + plugins_table.append((patch, location)) + n_locations += 1 + else: + plugins_table.append(("", location)) + + fmt.echo(utils.format_table(plugins_table)) + + +def is_rendered(path: str) -> bool: + """ + Return whether the template should be rendered or not. + + If the path matches an include pattern, it is rendered. If not and it matches an + ignore pattern, it is not rendered. By default, all files are rendered. + """ + for include_pattern in hooks.Filters.ENV_PATTERNS_INCLUDE.iterate(): + if re.match(include_pattern, path): + return True + for ignore_pattern in hooks.Filters.ENV_PATTERNS_IGNORE.iterate(): + if re.match(ignore_pattern, path): + return False + return True + + +# Skip rendering some files that follow commonly-ignored patterns: +# +# .* +# *.pyc +# __pycache__ +# partials +hooks.Filters.ENV_PATTERNS_IGNORE.add_items( + [ + # Skip all hidden files + r"(.*/)?\.", + # Skip compiled python files + r"(.*/)?__pycache__(/.*)?$", + r".*\.pyc$", + # Skip files from "partials" folders + r"(.*/)?partials(/.*)?$", + ] +) -def save(root, config): +def save(root: str, config: Config) -> None: """ Save the full environment, including version information. """ root_env = pathjoin(root) - for prefix in [ - "android/", - "apps/", - "build/", - "dev/", - "k8s/", - "local/", - "webui/", - VERSION_FILENAME, - "kustomization.yml", - ]: - save_all_from(prefix, root_env, config) - - for plugin in plugins.iter_enabled(config): - if plugin.templates_root: - save_plugin_templates(plugin, root, config) + for src, dst in hooks.Filters.ENV_TEMPLATE_TARGETS.iterate(): + save_all_from(src, os.path.join(root_env, dst), config) upgrade_obsolete(root) - fmt.echo_info("Environment generated in {}".format(base_dir(root))) - + fmt.echo_info(f"Environment generated in {base_dir(root)}") -def upgrade_obsolete(root): - # tutor.conf was renamed to _tutor.conf in order to be the first config file loaded - # by nginx - nginx_tutor_conf = pathjoin(root, "apps", "nginx", "tutor.conf") - if os.path.exists(nginx_tutor_conf): - os.remove(nginx_tutor_conf) - -def save_plugin_templates(plugin, root, config): +def upgrade_obsolete(_root: str) -> None: """ - Save plugin templates to plugins//*. - Only the "apps" and "build" subfolders are rendered. + Add here ad-hoc commands to upgrade the environment. """ - plugins_root = pathjoin(root, "plugins") - for subdir in ["apps", "build"]: - subdir_path = os.path.join(plugin.name, subdir) - save_all_from(subdir_path, plugins_root, config) -def save_all_from(prefix, root, config): +def save_all_from(prefix: str, dst: str, config: Config) -> None: """ Render the templates that start with `prefix` and store them with the same - hierarchy at `root`. Here, `prefix` can be the result of os.path.join(...). + hierarchy at `dst`. Here, `prefix` can be the result of os.path.join(...). """ - renderer = Renderer.instance(config) - renderer.render_all_to(root, prefix.replace(os.sep, "/")) + renderer = Renderer(config) + renderer.render_all_to(dst, prefix.replace(os.sep, "/")) -def write_to(content, path): +def write_to(content: t.Union[str, bytes], path: str) -> None: """ Write some content to a path. Content can be either str or bytes. """ - open_kwargs = {"mode": "w"} + utils.ensure_file_directory_exists(path) if isinstance(content, bytes): - open_kwargs["mode"] += "b" + with open(path, mode="wb") as of_binary: + of_binary.write(content) else: - # Make files readable by Docker on Windows - open_kwargs["encoding"] = "utf8" - open_kwargs["newline"] = "\n" - utils.ensure_file_directory_exists(path) - with open(path, **open_kwargs) as of: - of.write(content) + with open(path, mode="w", encoding="utf8", newline="\n") as of_text: + of_text.write(content) -def render_file(config, *path): +def render_file(config: Config, *path: str) -> t.Union[str, bytes]: """ Return the rendered contents of a template. """ - renderer = Renderer.instance(config) + renderer = Renderer(config) template_name = "/".join(path) return renderer.render_template(template_name) -def render_dict(config): +def render_unknown(config: Config, value: t.Any) -> t.Any: """ - Render the values from the dict. This is useful for rendering the default - values from config.yml. + Render an unknown `value` object with the selected config. - Args: - config (dict) + If `value` is a dict, its values are also rendered. """ - rendered = {} - for key, value in config.items(): - if isinstance(value, str): - rendered[key] = render_str(config, value) - else: - rendered[key] = value - for k, v in rendered.items(): - config[k] = v - - -def render_unknown(config, value): if isinstance(value, str): return render_str(config, value) + if isinstance(value, dict): + return {k: render_unknown(config, v) for k, v in value.items()} + if isinstance(value, list): + return [render_unknown(config, v) for v in value] return value -def render_str(config, text): +def render_str(config: Config, text: str) -> str: """ Args: text (str) @@ -281,103 +399,190 @@ def render_str(config, text): Return: substituted (str) """ - return Renderer.instance(config).render_str(text) + return Renderer(config).render_str(text) -def check_is_up_to_date(root): +def check_is_up_to_date(root: str) -> None: if not is_up_to_date(root): - message = ( - "The current environment stored at {} is not up-to-date: it is at " - "v{} while the 'tutor' binary is at v{}. You should upgrade " - "the environment by running:\n" - "\n" - " tutor config save" - ) fmt.echo_alert( - message.format(base_dir(root), current_version(root), __version__) + f"The current environment stored at {base_dir(root)} is not up-to-date: it is at " + f"v{current_version(root)} while the 'tutor' binary is at v{__version__}. You should upgrade " + f"the environment by running:\n" + f"\n" + f" tutor config save" ) -def is_up_to_date(root): +def is_up_to_date(root: str) -> bool: """ Check if the currently rendered version is equal to the current tutor version. """ - return current_version(root) == __version__ + current = current_version(root) + return current is None or current == __version__ -def needs_major_upgrade(root): +def should_upgrade_from_release(root: str) -> t.Optional[str]: """ - Return the current version as a tuple of int. E.g: (1, 0, 2). + Return the name of the currently installed release that we should upgrade from. Return None If we already run the + latest release. """ - current = int(current_version(root).split(".")[0]) - required = int(__version__.split(".")[0]) - return 0 < current < required + current = current_version(root) + if current is None: + return None + current_as_int = int(current.split(".")[0]) + required_as_int = int(__version__.split(".", maxsplit=1)[0]) + if current_as_int >= required_as_int: + return None + return get_release(current) -def current_release(root): +def get_env_release(root: str) -> t.Optional[str]: """ - Return the name of the current Open edX release. + Return the Open edX release name from the current environment. + + If the current environment has no version, return None. """ - return {"0": "ironwood", "3": "ironwood", "10": "juniper", "11": "koa"}[ - current_version(root).split(".")[0] - ] + version = current_version(root) + if version is None: + return None + return get_release(version) -def current_version(root): +def get_current_open_edx_release_name() -> str: + """ + Return the release name associated to this package. + """ + return get_release(__version__) + + +def get_release(version: str) -> str: + return { + "0": "ironwood", + "3": "ironwood", + "10": "juniper", + "11": "koa", + "12": "lilac", + "13": "maple", + "14": "nutmeg", + "15": "olive", + "16": "palm", + "17": "quince", + "18": "redwood", + }[version.split(".", maxsplit=1)[0]] + + +def current_version(root: str) -> t.Optional[str]: """ Return the current environment version. If the current environment has no version, - return "0.0.0". + return None. """ path = pathjoin(root, VERSION_FILENAME) if not os.path.exists(path): - return "0.0.0" - return open(path).read().strip() + return None + with open(path, encoding="utf-8") as fi: + return fi.read().strip() -def read_template_file(*path): +def read_template_file(*path: str) -> str: """ Read raw content of template located at `path`. + + The template may be located inside any of the template root folders. + """ + return JinjaEnvironment().read_str("/".join(path)) + + +def read_core_template_file(*path: str) -> str: + """ + Read raw content of template located in tutor core template directory. """ - src = template_path(*path) - with codecs.open(src, encoding="utf-8") as fi: + with open(os.path.join(TEMPLATES_ROOT, *path), encoding="utf-8") as fi: return fi.read() -def is_binary_file(path): +def is_binary_file(path: str) -> bool: ext = os.path.splitext(path)[1] return ext in BIN_FILE_EXTENSIONS -def template_path(*path, templates_root=TEMPLATES_ROOT): - """ - Return the template file's absolute path. - """ - return os.path.join(templates_root, *path) - - -def data_path(root, *path): +def data_path(root: str, *path: str) -> str: """ Return the file's absolute path inside the data directory. """ return os.path.join(root_dir(root), "data", *path) -def pathjoin(root, *path): +def pathjoin(root: str, *path: str) -> str: """ Return the file's absolute path inside the environment. """ return os.path.join(base_dir(root), *path) -def base_dir(root): +def base_dir(root: str) -> str: """ Return the environment base directory. """ return os.path.join(root_dir(root), "env") -def root_dir(root): +def root_dir(root: str) -> str: """ Return the project root directory. """ return os.path.abspath(root) + + +def delete_env_dir(root: str) -> None: + env_path = base_dir(root) + + try: + shutil.rmtree(env_path) + fmt.echo_alert(f"Removed existing Tutor environment at: {env_path}") + except PermissionError as e: + raise exceptions.TutorError( + f"Permission Denied while trying to remove existing Tutor environment at: {env_path}" + ) from e + except FileNotFoundError: + fmt.echo_info(f"No existing Tutor environment to remove at: {env_path}") + + +@hooks.Actions.PLUGIN_UNLOADED.add() +def _delete_plugin_templates(plugin: str, root: str, _config: Config) -> None: + """ + Delete plugin env files on unload. + """ + targets = hooks.Filters.ENV_TEMPLATE_TARGETS.iterate_from_context( + hooks.Contexts.app(plugin).name + ) + for src, dst in targets: + path = pathjoin(root, dst.replace("/", os.sep), src.replace("/", os.sep)) + if os.path.exists(path): + fmt.echo_info(f" env - removing folder: {path}") + try: + shutil.rmtree(path) + except PermissionError as e: + raise exceptions.TutorError( + f"Could not delete file {e.filename} from plugin {plugin} in folder {path}" + ) + + +@hooks.Filters.IS_FILE_RENDERED.add() +def _do_not_render_binary_files(result: bool, path: str) -> bool: + """ + Determine whether a file should be rendered based on its binary nature. + + This function checks if a file is binary and updates the rendering decision accordingly. + If the initial decision (`result`) is to render the file, but the file is detected as binary, + the function will override this decision to prevent rendering. + + Parameters: + - result (bool): The initial decision on whether the file should be rendered. + - path (str): The file path to check for binary content. + + Returns: + - bool: False if the file is binary and was initially set to be rendered, otherwise returns the initial decision. + """ + if result and is_binary_file(path): + result = False + return result diff --git a/tutor/fmt.py b/tutor/fmt.py index cc21b0ad8e..8af0592071 100644 --- a/tutor/fmt.py +++ b/tutor/fmt.py @@ -1,9 +1,11 @@ +import os + import click STDOUT = None -def title(text): +def title(text: str) -> str: indent = 8 separator = "=" * (len(text) + 2 * indent) message = "{separator}\n{indent}{text}\n{separator}".format( @@ -12,37 +14,41 @@ def title(text): return click.style(message, fg="green") -def echo_info(text): +def echo_info(text: str) -> None: echo(info(text)) -def info(text): +def info(text: str) -> str: return click.style(text, fg="blue") -def error(text): +def error(text: str) -> str: return click.style(text, fg="red") -def echo_error(text): +def echo_error(text: str) -> None: echo(error(text), err=True) -def command(text): +def command(text: str) -> str: return click.style(text, fg="magenta") -def question(text): +def question(text: str) -> str: return click.style(text, fg="yellow") -def echo_alert(text): - echo(alert(text)) +def echo_alert(text: str) -> None: + echo_error(alert(text)) -def alert(text): +def alert(text: str) -> str: return click.style("⚠️ " + text, fg="yellow", bold=True) -def echo(text, err=False): +def echo(text: str, err: bool = False) -> None: + if os.environ.get("_TUTOR_COMPLETE"): + if os.environ.get("COMP_WORDS") or os.environ.get("COMP_CWORD"): + # Don't even attempt to log stuff when we are actually auto-completing shell commands. + return click.echo(text, file=STDOUT, err=err) diff --git a/tutor/hooks/__init__.py b/tutor/hooks/__init__.py new file mode 100644 index 0000000000..485b0c1eb8 --- /dev/null +++ b/tutor/hooks/__init__.py @@ -0,0 +1,36 @@ +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +import functools +import typing as t + +from typing_extensions import ParamSpec + +# The imports that follow are the hooks API +from tutor.core.hooks import clear_all, priorities +from tutor.types import Config + +from .catalog import Actions, Contexts, Filters + + +def lru_cache(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: + """ + LRU cache decorator similar to `functools.lru_cache + `__ that is + automatically cleared whenever plugins are updated. + + Use this to decorate functions that need to be called multiple times with a return + value that depends on which plugins are loaded. Typically: functions that depend on + the output of filters. + """ + decorated = functools.lru_cache(func) + + @Actions.PLUGIN_LOADED.add() + def _clear_func_cache_on_load(_plugin: str) -> None: + decorated.cache_clear() + + @Actions.PLUGIN_UNLOADED.add() + def _clear_func_cache_on_unload(_plugin: str, _root: str, _config: Config) -> None: + decorated.cache_clear() + + return decorated diff --git a/tutor/hooks/catalog.py b/tutor/hooks/catalog.py new file mode 100644 index 0000000000..acbb81e59f --- /dev/null +++ b/tutor/hooks/catalog.py @@ -0,0 +1,563 @@ +""" +List of all the action, filter and context names used across Tutor. This module is used +to generate part of the reference documentation. +""" + +from __future__ import annotations + +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +from typing import Any, Callable, Iterable, Literal, Union + +import click + +from tutor.core.hooks import Action, Context, Filter +from tutor.types import Config + +__all__ = ["Actions", "Filters", "Contexts"] + + +class Actions: + """ + This class is a container for all actions used across Tutor (see + :py:class:`tutor.core.hooks.Action`). Actions are used to trigger callback functions at + specific moments in the Tutor life cycle. + + To create a new callback for an existing action, start by importing the hooks + module:: + + from tutor import hooks + + Then create your callback function and decorate it with the :py:meth:`add ` method of the + action you're interested in:: + + @hooks.Actions.SOME_ACTION.add() + def your_action(): + # Do stuff here + + Your callback function should have the same signature as the original action. For + instance, to add a callback to the :py:data:`COMPOSE_PROJECT_STARTED` action:: + + @hooks.Actions.COMPOSE_PROJECT_STARTED.add(): + def run_this_on_start(root, config, name): + print(root, config["LMS_HOST", name]) + + Your callback function will then be called whenever the ``COMPOSE_PROJECT_STARTED.do`` method + is called, i.e: when ``tutor local start`` or ``tutor dev start`` is run. + + Note that action callbacks do not return anything. + + For more information about how actions work, check out the :py:class:`tutor.core.hooks.Action` API. + """ + + #: Triggered whenever a "docker compose start", "up" or "restart" command is executed. + #: + #: :parameter str root: project root. + #: :parameter dict config: project configuration. + #: :parameter str name: docker-compose project name. + COMPOSE_PROJECT_STARTED: Action[[str, Config, str]] = Action() + + #: Triggered after all interactive questions have been asked. + #: You should use this action if you want to add new questions. + #: + #: :parameter dict config: project configuration. + CONFIG_INTERACTIVE: Action[[Config]] = Action() + + #: This action is called at the end of the tutor.config.load_full function. + #: Modifying this object will not trigger changes in the configuration. + #: For all purposes, it should be considered read-only. + #: + #: :parameter dict config: project configuration. + CONFIG_LOADED: Action[[Config]] = Action() + + #: Called whenever the core project is ready to run. This action is called as soon + #: as possible. This is the right time to discover plugins, for instance. In + #: particular, we auto-discover the following plugins: + #: + #: - Python packages that declare a "tutor.plugin.v0" entrypoint. + #: - Python packages that declare a "tutor.plugin.v1" entrypoint. + #: - YAML and Python plugins stored in ~/.local/share/tutor-plugins (as indicated by ``tutor plugins printroot``) + #: - When running the binary version of Tutor, official plugins that ship with the binary are automatically discovered. + #: + #: Discovering a plugin is typically done by the Tutor plugin mechanism. Thus, plugin + #: developers probably don't have to implement this action themselves. + #: + #: This action does not have any parameter. + CORE_READY: Action[[]] = Action() + + #: Called just before triggering the job tasks of any ``... do `` command. + #: + #: :parameter str job: job name. + #: :parameter args: job positional arguments. + #: :parameter kwargs: job named arguments. + DO_JOB: Action[[str, Any]] = Action() + + #: Triggered when a single plugin needs to be loaded. Only plugins that have previously been + #: discovered can be loaded (see :py:data:`CORE_READY`). + #: + #: Plugins are typically loaded because they were enabled by the user; the list of + #: plugins to enable is found in the project root (see + #: :py:data:`PROJECT_ROOT_READY`). + #: + #: Most plugin developers will not have to implement this action themselves, unless + #: they want to perform a specific action at the moment the plugin is enabled. + #: + #: :parameter str plugin: plugin name. + PLUGIN_LOADED: Action[[str]] = Action() + + #: Triggered after all plugins have been loaded. At this point the list of loaded + #: plugins may be obtained from the :py:data:`Filters.PLUGINS_LOADED` filter. + #: + #: This action does not have any parameter. + PLUGINS_LOADED: Action[[]] = Action() + + #: Triggered when a single plugin is unloaded. Only plugins that have previously been + #: loaded can be unloaded (see :py:data:`PLUGIN_LOADED`). + #: + #: Plugins are typically unloaded because they were disabled by the user. + #: + #: Most plugin developers will not have to implement this action themselves, unless + #: they want to perform a specific action at the moment the plugin is disabled. + #: + #: :parameter str plugin: plugin name. + #: :parameter str root: absolute path to the project root. + #: :parameter config: full project configuration + PLUGIN_UNLOADED: Action[str, str, Config] = Action() + + #: Called as soon as we have access to the Tutor project root. + #: + #: :parameter str root: absolute path to the project root. + PROJECT_ROOT_READY: Action[str] = Action() + + +class Filters: + """ + Here are the names of all filters used across Tutor. (see + :py:class:`tutor.core.hooks.Filter`) Filters are used to modify some data at + specific points during the Tutor life cycle. + + To add a callback to an existing filter, start by importing the hooks module:: + + from tutor import hooks + + Then create your callback function and decorate it with :py:meth:`add + ` method of the filter instance you need:: + + @hooks.Filters.SOME_FILTER.add() + def your_filter_callback(some_data): + # Do stuff here with the data + ... + # return the modified data + return some_data + + Note that your filter callback should have the same signature as the original + filter. The return value should also have the same type as the first argument of the + callback function. + + Many filters have a list of items as the first argument. Quite often, plugin + developers just want to add a new item at the end of that list. In such cases there + is no need for a callback function. Instead, you can use the ``add_item`` method. For + instance, you can add a "hello" to the init task of the lms container by modifying + the :py:data:`CLI_DO_INIT_TASKS` filter:: + + hooks.Filters.CLI_DO_INIT_TASKS.add_item(("lms", "echo hello")) + + To add multiple items at a time, use ``add_items``:: + + hooks.Filters.CLI_DO_INIT_TASKS.add_items( + ("lms", "echo 'hello from lms'"), + ("cms", "echo 'hello from cms'"), + ) + + The ``echo`` commands will then be run every time the "init" tasks are run, for + instance during ``tutor local launch``. + + For more information about how filters work, check out the + :py:class:`tutor.core.hooks.Filter` API. + """ + + #: Hostnames of user-facing applications. + #: + #: So far this filter is only used to inform the user of application urls after they have run ``launch``. + #: + #: :parameter list[str] hostnames: items from this list are templates that will be + #: rendered by the environment. + #: :parameter str context_name: either "local" or "dev", depending on the calling context. + APP_PUBLIC_HOSTS: Filter[list[str], [Literal["local", "dev"]]] = Filter() + + #: List of command line interface (CLI) commands. + #: + #: :parameter list commands: commands are instances of ``click.Command``. They will + #: all be added as subcommands of the main ``tutor`` command. + CLI_COMMANDS: Filter[list[click.Command], []] = Filter() + + #: List of ``do ...`` commands. + #: + #: :parameter list commands: see :py:data:`CLI_COMMANDS`. These commands will be + #: added as subcommands to the ``local/dev/k8s do`` commands. They must return a list of + #: ("service name", "service command") tuples. Each "service command" will be executed + #: in the "service" container, both in local, dev and k8s mode. + CLI_DO_COMMANDS: Filter[list[Callable[[Any], Iterable[tuple[str, str]]]], []] = ( + Filter() + ) + + #: List of initialization tasks (scripts) to be run in the ``init`` job. This job + #: includes all database migrations, setting up, etc. To run some tasks before or + #: after others, they should be assigned a different priority. + #: + #: :parameter list[tuple[str, str]] tasks: list of ``(service, task)`` tuples. Each + #: task is essentially a bash script to be run in the "service" container. Scripts + #: may contain Jinja markup, similar to templates. + CLI_DO_INIT_TASKS: Filter[list[tuple[str, str]], []] = Filter() + + #: List of folders to bind-mount in docker-compose containers, either in ``tutor local`` or ``tutor dev``. + #: + #: This filter is for processing values of the ``MOUNTS`` setting such as:: + #: + #: tutor mounts add /path/to/edx-platform + #: + #: In this example, this host folder would be bind-mounted in different containers + #: (lms, lms-worker, cms, cms-worker, lms-job, cms-job) at the + #: /openedx/edx-platform location. Plugin developers may implement this filter to + #: define custom behaviour when mounting folders that relate to their plugins. For + #: instance, the ecommerce plugin may process the ``/path/to/ecommerce`` value. + #: + #: To also bind-mount these folder at build time, implement also the + #: :py:data:`IMAGES_BUILD_MOUNTS` filter. + #: + #: :parameter list[tuple[str, str]] mounts: each item is a ``(service, path)`` + #: tuple, where ``service`` is the name of the docker-compose service and ``path`` is + #: the location in the container where the folder should be bind-mounted. Note: the + #: path must be slash-separated ("/"). Thus, do not use ``os.path.join`` to generate + #: the ``path`` because it will fail on Windows. + #: :parameter str name: basename of the host-mounted folder. In the example above, + #: this is "edx-platform". When implementing this filter you should check this name to + #: conditionally add mounts. + COMPOSE_MOUNTS: Filter[list[tuple[str, str]], [str]] = Filter() + + #: Declare new default configuration settings that don't necessarily have to be saved in the user + #: ``config.yml`` file. Default settings may be overridden with ``tutor config save --set=...``, in which + #: case they will automatically be added to ``config.yml``. + #: + #: :parameter list[tuple[str, ...]] items: list of (name, value) new settings. All + #: new entries must be prefixed with the plugin name in all-caps. + CONFIG_DEFAULTS: Filter[list[tuple[str, Any]], []] = Filter() + + #: Modify existing settings, either from Tutor core or from other plugins. Beware not to override any + #: important setting, such as passwords! Overridden setting values will be printed to stdout when the plugin + #: is disabled, such that users have a chance to back them up. + #: + #: :parameter list[tuple[str, ...]] items: list of (name, value) settings. + CONFIG_OVERRIDES: Filter[list[tuple[str, Any]], []] = Filter() + + #: Declare unique configuration settings that must be saved in the user ``config.yml`` file. This is where + #: you should declare passwords and randomly-generated values that are different from one environment to the next. + #: + #: :parameter list[tuple[str, ...]] items: list of (name, value) new settings. All + #: names must be prefixed with the plugin name in all-caps. + CONFIG_UNIQUE: Filter[list[tuple[str, Any]], []] = Filter() + + #: Used to declare unique key:value pairs in the ``config.yml`` file that will be overwritten on ``tutor config save``. + #: This is where you should declare passwords and other secrets that need to be fetched live from an external secrets + #: store. Most users will not need to use this filter but it will allow you to programmatically fetch and set secrets + #: from an external secrets store such as AWS Secrets Manager via boto3. + #: + #: Values passed in to this filter will overwrite existing values in the ``config.yml`` file. + #: + #: :parameter list[tuple[str, ...]] items: list of (name, value) new settings. All + #: names must be prefixed with the plugin name in all-caps. + CONFIG_USER: Filter[list[tuple[str, Any]], []] = Filter() + + #: Use this filter to modify the ``docker build`` command. + #: + #: :parameter list[str] command: the full build command, including options and + #: arguments. Note that these arguments do not include the leading ``docker`` command. + DOCKER_BUILD_COMMAND: Filter[list[str], []] = Filter() + + #: List of patches that should be inserted in a given location of the templates. + #: + #: :parameter list[tuple[str, str]] patches: pairs of (name, content) tuples. Use this + #: filter to modify the Tutor templates. + ENV_PATCHES: Filter[list[tuple[str, str]], []] = Filter() + + #: List of template path patterns to be ignored when rendering templates to the project root. By default, we ignore: + #: + #: - hidden files (``.*``) + #: - ``__pycache__`` directories and ``*.pyc`` files + #: - "partials" directories. + #: + #: Ignored patterns are overridden by include patterns; see :py:data:`ENV_PATTERNS_INCLUDE`. + #: + #: :parameter list[str] patterns: list of regular expression patterns. E.g: ``r"(.*/)?ignored_file_name(/.*)?"``. + ENV_PATTERNS_IGNORE: Filter[list[str], []] = Filter() + + #: List of template path patterns to be included when rendering templates to the project root. + #: Patterns from this list will take priority over the patterns from :py:data:`ENV_PATTERNS_IGNORE`. + #: + #: :parameter list[str] patterns: list of regular expression patterns. See :py:data:`ENV_PATTERNS_IGNORE`. + ENV_PATTERNS_INCLUDE: Filter[list[str], []] = Filter() + + #: List of `Jinja2 filters `__ that will be + #: available in templates. Jinja2 filters are basically functions that can be used + #: as follows within templates:: + #: + #: {{ "somevalue"|my_filter }} + #: + #: Note that Jinja2 filters are a completely different thing than the Tutor hook + #: filters, although they share the same name. + #: + #: Out of the box, Tutor comes with the following filters: + #: + #: - ``common_domain``: Return the longest common name between two domain names. Example: ``{{ "studio.demo.myopenedx.com"|common_domain("lms.demo.myopenedx.com") }}`` is equal to "demo.myopenedx.com". + #: - ``encrypt``: Encrypt an arbitrary string. The encryption process is compatible with `htpasswd `__ verification. + #: - ``list_if``: In a list of ``(value, condition)`` tuples, return the list of ``value`` for which the ``condition`` is true. + #: - ``long_to_base64``: Base-64 encode a long integer. + #: - ``iter_values_named``: Yield the values of the configuration settings that match a certain pattern. Example: ``{% for value in iter_values_named(prefix="KEY", suffix="SUFFIX")%}...{% endfor %}``. By default, only non-empty values are yielded. To iterate also on empty values, pass the ``allow_empty=True`` argument. + #: - ``patch``: See :ref:`patches `. + #: - ``random_string``: Return a random string of the given length composed of ASCII letters and digits. Example: ``{{ 8|random_string }}``. + #: - ``reverse_host``: Reverse a domain name (see `reference `__). Example: ``{{ "demo.myopenedx.com"|reverse_host }}`` is equal to "com.myopenedx.demo". + #: - ``rsa_import_key``: Import a PEM-formatted RSA key and return the corresponding object. + #: - ``rsa_private_key``: Export an RSA private key in PEM format. + #: - ``walk_templates``: Iterate recursively over the templates of the given folder. For instance:: + #: + #: {% for file in "apps/myplugin"|walk_templates %} + #: ... + #: {% endfor %} + #: + #: :parameter filters: list of (name, function) tuples. The function signature + #: should correspond to its usage in templates. + ENV_TEMPLATE_FILTERS: Filter[list[tuple[str, Callable[..., Any]]], []] = Filter() + + #: List of all template root folders. + #: + #: :parameter list[str] templates_root: absolute paths to folders which contain templates. + #: The templates in these folders will then be accessible by the environment + #: renderer using paths that are relative to their template root. + ENV_TEMPLATE_ROOTS: Filter[list[str], []] = Filter() + + #: List of template source/destination targets. + #: + #: :parameter list[tuple[str, str]] targets: list of (source, destination) pairs. + #: Each source is a path relative to one of the template roots, and each destination + #: is a path relative to the environment root. For instance: adding ``("c/d", + #: "a/b")`` to the filter will cause all files from "c/d" to be rendered to the ``a/b/c/d`` + #: subfolder. + ENV_TEMPLATE_TARGETS: Filter[list[tuple[str, str]], []] = Filter() + + #: List of extra variables to be included in all templates. + #: + #: Out of the box, this filter will include all configuration settings, but also the following: + #: + #: - ``HOST_USER_ID``: the numerical ID of the user on the host. + #: - ``TUTOR_APP``: the app name ("tutor" by default), used to determine the dev/local project names. + #: - ``TUTOR_VERSION``: the current version of Tutor. + #: - ``iter_values_named``: a function to iterate on variables that start or end with a given string. + #: - ``iter_mounts``: a function that yields compose-compatible bind-mounts for any given service. + #: - ``iter_mounted_directories``: iterate on bind-mounted directory names. + #: - ``patch``: a function to incorporate extra content into a template. + #: + #: :parameter filters: list of (name, value) tuples. + ENV_TEMPLATE_VARIABLES: Filter[list[tuple[str, Any]], []] = Filter() + + #: List of images to be built when we run ``tutor images build ...``. + #: + #: :parameter list[tuple[str, tuple[str, ...], str, tuple[str, ...]]] tasks: list of ``(name, path, tag, args)`` tuples. + #: + #: - ``name`` is the name of the image, as in ``tutor images build myimage``. + #: - ``path`` is the relative path to the folder that contains the Dockerfile. This can be either a string or a tuple of strings. + #: For instance ``("myplugin", "build", "myservice")`` indicates that the template will be read from + #: ``myplugin/build/myservice/Dockerfile``. This argument value would be equivalent to "myplugin/build/myservice". + #: - ``tag`` is the Docker tag that will be applied to the image. It will be + #: rendered at runtime with the user configuration. Thus, the image tag could + #: be ``"{{ DOCKER_REGISTRY }}/myimage:{{ TUTOR_VERSION }}"``. + #: - ``args`` is a list of arguments that will be passed to ``docker build ...``. + #: :parameter Config config: user configuration. + IMAGES_BUILD: Filter[ + list[tuple[str, Union[str, tuple[str, ...]], str, tuple[str, ...]]], [Config] + ] = Filter() + + #: List of image names which must be built prior to launching the platform. These + #: images will be built on launch, in "dev" and "local" mode (but not in Kubernetes). + #: + #: :parameter list[str] names: list of image names. + #: :parameter str context_name: either "local" or "dev", depending on the calling context. + IMAGES_BUILD_REQUIRED: Filter[list[str], [Literal["local", "dev"]]] = Filter() + + #: List of host directories to be automatically bind-mounted in Docker images at + #: build time. For instance, this is useful to build Docker images using a custom + #: repository on the host. + #: + #: This filter works similarly to the :py:data:`COMPOSE_MOUNTS` filter, with a few differences. + #: + #: :parameter list[tuple[str, str]] mounts: each item is a pair of ``(name, value)`` + #: used to generate a build context at build time. See the corresponding `Docker + #: documentation `__. + #: The following option will be added to the ``docker buildx build`` command: + #: ``--build-context={name}={value}``. If the Dockerfile contains a "name" stage, then + #: that stage will be replaced by the corresponding directory on the host. + #: :parameter str name: full path to the host-mounted folder. As opposed to + #: :py:data:`COMPOSE_MOUNTS`, this is not just the basename, but the full path. When + #: implementing this filter you should check this path (for instance: with + #: ``os.path.basename(path)``) to conditionally add mounts. + IMAGES_BUILD_MOUNTS: Filter[list[tuple[str, str]], [str]] = Filter() + + #: List of images to be pulled when we run ``tutor images pull ...``. + #: + #: :parameter list[tuple[str, str]] tasks: list of ``(name, tag)`` tuples. + #: + #: - ``name`` is the name of the image, as in ``tutor images pull myimage``. + #: - ``tag`` is the Docker tag that will be applied to the image. (see :py:data:`IMAGES_BUILD`). + #: :parameter Config config: user configuration. + IMAGES_PULL: Filter[list[tuple[str, str]], [Config]] = Filter() + + #: List of images to be pushed when we run ``tutor images push ...``. + #: Parameters are the same as for :py:data:`IMAGES_PULL`. + IMAGES_PUSH: Filter[list[tuple[str, str]], [Config]] = Filter() + + #: List of directories that will be automatically bind-mounted in an image (at + #: build-time) and a container (at run-time). + #: + #: Whenever a user runs: ``tutor mounts add /path/to/name``, "name" will be matched to + #: the regular expressions in this filter. If it matches, then the directory will be + #: automatically bind-mounted in the matching Docker image at build time and run + #: time. At build-time, they will be added to a layer named "mnt-{name}". At + #: run-time, they wll be mounted in ``/mnt/``. + #: + #: In the case of edx-platform, ``pip install -e .`` will be run in this directory + #: at build-time. And the same host directory will be bind-mounted in that location + #: at run time. This allows users to transparently work on edx-platform + #: dependencies, such as Python packages. + #: + #: By default, xblocks and some common edx-platform packages are already present in + #: this filter, and associated to the "openedx" image. Add your own Python + #: dependencies to this filter to make it easier for users to work on the + #: dependencies of your app. + #: + #: See the list of all edx-platform base requirements here: + #: https://github.com/openedx/edx-platform/blob/master/requirements/edx/base.txt + #: + #: This filter was mostly designed for edx-platform, but it can be used by any + #: Python-based Docker image as well. The Dockerfile must declare mounted layers:: + #: + #: {% for name in iter_mounted_directories(MOUNTS, "yourimage") %} + #: FROM scratch AS mnt-{{ name }} + #: {% endfor %} + #: + #: Then, Python packages are installed with:: + #: + #: {% for name in iter_mounted_directories(MOUNTS, "yourimage") %} + #: COPY --from=mnt-{{ name }} --chown=app:app / /mnt/{{ name }} + #: RUN pip install -e "/mnt/{{ name }}" + #: {% endfor %} + #: + #: And the docker-compose service must include the following:: + #: + #: volumes: + #: {%- for mount in iter_mounts(MOUNTS, "yourimage") %} + #: - {{ mount }} + #: {%- endfor %} + #: + #: :parameter list[tuple[str, str]] name_regex: Each tuple is the name of an image and a + #: regular expression. For instance: ``("openedx", r".*xblock.*")``. + MOUNTED_DIRECTORIES: Filter[list[tuple[str, str]], []] = Filter() + + #: List of plugin indexes that are loaded when we run ``tutor plugins update``. By + #: default, the plugin indexes are stored in the user configuration. This filter makes + #: it possible to extend and modify this list with plugins. + #: + #: :parameter list[str] indexes: list of index URLs. Remember that entries further + #: in the list have priority. + PLUGIN_INDEXES: Filter[list[str], []] = Filter() + + #: Filter to modify the url of a plugin index url. This is convenient to alias + #: plugin indexes with a simple name, such as "main" or "contrib". + #: + #: :parameter str url: value passed to the ``index add/remove`` commands. + PLUGIN_INDEX_URL: Filter[str, []] = Filter() + + #: When installing an entry from a plugin index, the plugin data from the index will + #: go through this filter before it is passed along to ``pip install``. Thus, this is a + #: good place to add custom authentication when you need to install from a private + #: index. + #: + #: :parameter dict[str, str] plugin: the dict entry from the plugin index. It + #: includes an additional "index" key which contains the plugin index URL. + PLUGIN_INDEX_ENTRY_TO_INSTALL: Filter[dict[str, str], []] = Filter() + + #: Information about each installed plugin, including its version. + #: Keep this information to a single line for easier parsing by 3rd-party scripts. + #: + #: :param list[tuple[str, str]] versions: each pair is a ``(plugin, info)`` tuple. + PLUGINS_INFO: Filter[list[tuple[str, str]], []] = Filter() + + #: List of installed plugins. In order to be added to this list, a plugin must first + #: be discovered (see :py:data:`Actions.CORE_READY`). + #: + #: :param list[str] plugins: plugin developers probably don't have to implement this + #: filter themselves, but they can apply it to check for the presence of other + #: plugins. + PLUGINS_INSTALLED: Filter[list[str], []] = Filter() + + #: List of loaded plugins. + #: + #: :param list[str] plugins: plugin developers probably don't have to modify this + #: filter themselves, but they can apply it to check whether other plugins are enabled. + PLUGINS_LOADED: Filter[list[str], []] = Filter() + + #: Use this filter to determine whether a file should be rendered. This can be useful in scenarios where + #: certain types of files need special handling, such as binary files, which should not be rendered as text. + #: + #: This filter expects a boolean return value that indicates whether the file should be rendered. + #: + #: :param bool should_render: Initial decision on rendering the file, typically set to True. + #: :param str file_path: The path to the file being checked. + IS_FILE_RENDERED: Filter[bool, [str]] = Filter() + + #: List of parameters to use when starting the LMS Celery worker ``celery worker ...``. + #: + #: :param list[str] command: the list of paramaters to use as the celery command. + LMS_WORKER_COMMAND: Filter[list[str], []] = Filter() + + #: List of parameters to use when starting the CMS Celery worker ``celery worker ...``. + #: + #: :param list[str] command: the list of paramaters to use as the celery command. + CMS_WORKER_COMMAND: Filter[list[str], []] = Filter() + + +class Contexts: + """ + Here we list all the :py:class:`contexts ` that are used across Tutor. It is not expected that + plugin developers will ever need to use contexts. But if you do, this is how it + should be done:: + + from tutor import hooks + + with hooks.Contexts.SOME_CONTEXT.enter(): + # do stuff and all created hooks will include SOME_CONTEXT + ... + + # Apply only the hook callbacks that were created within SOME_CONTEXT + hooks.Actions.MY_ACTION.do_from_context(str(hooks.Contexts.SOME_CONTEXT)) + hooks.Filters.MY_FILTER.apply_from_context(hooks.Contexts.SOME_CONTEXT.name) + """ + + #: Dictionary of name/contexts. Each value is a context that we enter whenever we + #: create hooks for a specific application or plugin. For instance, plugin + #: "myplugin" will be enabled within the "app:myplugin" context. + APP: dict[str, Context] = {} + + @classmethod + def app(cls, name: str) -> Context: + if name not in cls.APP: + cls.APP[name] = Context(f"app:{name}") + return cls.APP[name] + + #: Plugins will be installed and enabled within this context. + PLUGINS = Context("plugins") + + #: YAML-formatted v0 plugins will be installed within this context. + PLUGINS_V0_YAML = Context("plugins:v0:yaml") + + #: Python entrypoint plugins will be installed within this context. + PLUGINS_V0_ENTRYPOINT = Context("plugins:v0:entrypoint") diff --git a/tutor/images.py b/tutor/images.py index fb412b9be1..ebab4ec505 100644 --- a/tutor/images.py +++ b/tutor/images.py @@ -1,21 +1,21 @@ -from . import fmt -from . import utils +from tutor import fmt, hooks, utils -def get_tag(config, name): - return config["DOCKER_IMAGE_" + name.upper().replace("-", "_")] +def build(path: str, tag: str, *args: str) -> None: + fmt.echo_info(f"Building image {tag}") + build_command = ["build", f"--tag={tag}", *args, path] + # `buildx` can be removed once Tutor requires Docker v23+. At that point, BuildKit will be + # enabled by default for all Docker users. + build_command.insert(0, "buildx") + command = hooks.Filters.DOCKER_BUILD_COMMAND.apply(build_command) + utils.docker(*command) -def build(path, tag, *args): - fmt.echo_info("Building image {}".format(tag)) - utils.docker("build", "-t", tag, *args, path) - - -def pull(tag): - fmt.echo_info("Pulling image {}".format(tag)) +def pull(tag: str) -> None: + fmt.echo_info(f"Pulling image {tag}") utils.docker("pull", tag) -def push(tag): - fmt.echo_info("Pushing image {}".format(tag)) +def push(tag: str) -> None: + fmt.echo_info(f"Pushing image {tag}") utils.docker("push", tag) diff --git a/tutor/interactive.py b/tutor/interactive.py index e9ce268f85..6aff45f3b1 100644 --- a/tutor/interactive.py +++ b/tutor/interactive.py @@ -1,45 +1,41 @@ +from typing import List, Optional + import click from . import config as tutor_config -from . import env -from . import exceptions -from . import fmt -from .__about__ import __version__ +from . import env, exceptions, fmt, hooks +from .types import Config, get_typed -def update(root, interactive=True): - """ - Load and save the configuration. +def ask_questions(config: Config, run_for_prod: Optional[bool] = None) -> None: """ - config, defaults = load_all(root, interactive=interactive) - tutor_config.save_config_file(root, config) - tutor_config.merge(config, defaults) - return config + Interactively ask questions to collect configuration values from the user. - -def load_all(root, interactive=True): - """ - Load configuration and interactively ask questions to collect param values from the user. + Arguments: + config: Existing (or minimal) configuration. Modified in-place. + run_for_prod: Whether platform should be configured for production. + If None, then ask the user. + Returns: + None """ - config, defaults = tutor_config.load_all(root) - if interactive: - ask_questions(config, defaults) - return config, defaults - - -def ask_questions(config, defaults): - run_for_prod = config.get("LMS_HOST") != "local.overhang.io" - run_for_prod = click.confirm( - fmt.question( - "Are you configuring a production platform? Type 'n' if you are just testing Tutor on your local computer" - ), - prompt_suffix=" ", - default=run_for_prod, - ) + defaults = tutor_config.get_defaults() + if run_for_prod is None: + run_for_prod = not config.get("LMS_HOST") in [ + "local.edly.io", + "local.overhang.io", + ] + run_for_prod = click.confirm( + fmt.question( + "Are you configuring a production platform? " + "Type 'n' if you are just testing Tutor on your local computer" + ), + prompt_suffix=" ", + default=run_for_prod, + ) if not run_for_prod: - dev_values = { - "LMS_HOST": "local.overhang.io", - "CMS_HOST": "studio.local.overhang.io", + dev_values: Config = { + "LMS_HOST": "local.edly.io", + "CMS_HOST": "studio.local.edly.io", "ENABLE_HTTPS": False, } fmt.echo_info( @@ -47,11 +43,11 @@ def ask_questions(config, defaults): ) for k, v in dev_values.items(): config[k] = v - fmt.echo_info(" {} = {}".format(k, v)) - - if run_for_prod: + fmt.echo_info(f" {k} = {v}") + else: ask("Your website domain name for students (LMS)", "LMS_HOST", config, defaults) - if "localhost" in config["LMS_HOST"]: + lms_host = get_typed(config, "LMS_HOST", str) + if "localhost" in lms_host: raise exceptions.TutorError( "You may not use 'localhost' as the LMS domain name. To run a local platform for testing purposes you should answer 'n' to the previous question." ) @@ -130,7 +126,7 @@ def ask_questions(config, defaults): "sq", "sr", "sv", - "sw", + "sw-ke", "ta", "te", "th", @@ -155,23 +151,34 @@ def ask_questions(config, defaults): defaults, ) + hooks.Actions.CONFIG_INTERACTIVE.do(config) + -def ask(question, key, config, defaults): - default = env.render_str(config, config.get(key, defaults[key])) +def ask(question: str, key: str, config: Config, defaults: Config) -> None: + default = get_typed(defaults, key, str) + default = get_typed(config, key, str, default=default) + default = env.render_str(config, default) config[key] = click.prompt( fmt.question(question), prompt_suffix=" ", default=default, show_default=True ) -def ask_bool(question, key, config, defaults): - default = config.get(key, defaults[key]) +def ask_bool(question: str, key: str, config: Config, defaults: Config) -> None: + default = get_typed(defaults, key, bool) + default = get_typed(config, key, bool, default=default) config[key] = click.confirm( fmt.question(question), prompt_suffix=" ", default=default ) -def ask_choice(question, key, config, defaults, choices): - default = config.get(key, defaults[key]) +def ask_choice( + question: str, + key: str, + config: Config, + defaults: Config, + choices: List[str], +) -> None: + default = str(config.get(key, defaults[key])) answer = click.prompt( fmt.question(question), type=click.Choice(choices), diff --git a/tutor/plugins.py b/tutor/plugins.py deleted file mode 100644 index 52e589431b..0000000000 --- a/tutor/plugins.py +++ /dev/null @@ -1,287 +0,0 @@ -from collections import namedtuple -from copy import deepcopy -from glob import glob -import importlib -import os -import pkg_resources - -import appdirs - -from . import exceptions -from . import fmt -from . import serialize - - -CONFIG_KEY = "PLUGINS" - - -class BasePlugin: - """ - Tutor plugins are defined by a name and an object that implements one or more of the - following properties: - - `config` (dict str->dict(str->str)): contains "add", "set", "default" keys. Entries - in these dicts will be added or override the global configuration. Keys in "add" and - "set" will be prefixed by the plugin name in uppercase. - - `patches` (dict str->str): entries in this dict will be used to patch the rendered - Tutor templates. For instance, to add "somecontent" to a template that includes '{{ - patch("mypatch") }}', set: `patches["mypatch"] = "somecontent"`. It is recommended - to store all patches in separate files, and to dynamically list patches by listing - the contents of a "patches" subdirectory. - - `templates` (str): path to a directory that includes new template files for the - plugin. It is recommended that all files in the template directory are stored in a - `myplugin` folder to avoid conflicts with other plugins. Plugin templates are useful - for content re-use, e.g: "{% include 'myplugin/mytemplate.html'}". - - `hooks` (dict str->list[str]): hooks are commands that will be run at various points - during the lifetime of the platform. For instance, to run `service1` and `service2` - in sequence during initialization, you should define: - - hooks["init"] = ["service1", "service2"] - - It is then assumed that there are `myplugin/hooks/service1/init` and - `myplugin/hooks/service2/init` templates in the plugin `templates` directory. - - `command` (click.Command): if a plugin exposes a `command` attribute, users will be able to run it from the command line as `tutor pluginname`. - """ - - INSTALLED = [] - _IS_LOADED = False - - def __init__(self, name, obj): - self.name = name - self.config = get_callable_attr(obj, "config", {}) - self.patches = get_callable_attr(obj, "patches", default={}) - self.hooks = get_callable_attr(obj, "hooks", default={}) - self.templates_root = get_callable_attr(obj, "templates", default=None) - self.command = getattr(obj, "command", None) - - def config_key(self, key): - """ - Config keys in the "add" and "defaults" dicts should be prefixed by the plugin name, in uppercase. - """ - return self.name.upper() + "_" + key - - @property - def config_add(self): - return self.config.get("add", {}) - - @property - def config_set(self): - return self.config.get("set", {}) - - @property - def config_defaults(self): - return self.config.get("defaults", {}) - - @property - def version(self): - raise NotImplementedError - - @classmethod - def iter_installed(cls): - if not cls._IS_LOADED: - for plugin in cls.iter_load(): - cls.INSTALLED.append(plugin) - cls._IS_LOADED = True - yield from cls.INSTALLED - - @classmethod - def iter_load(cls): - raise NotImplementedError - - -class EntrypointPlugin(BasePlugin): - """ - Entrypoint plugins are regular python packages that have a 'tutor.plugin.v0' entrypoint. - - The API for Tutor plugins is currently in development. The entrypoint will switch to - 'tutor.plugin.v1' once it is stabilised. - """ - - ENTRYPOINT = "tutor.plugin.v0" - - def __init__(self, entrypoint): - super().__init__(entrypoint.name, entrypoint.load()) - self.entrypoint = entrypoint - - @property - def version(self): - return self.entrypoint.dist.version - - @classmethod - def iter_load(cls): - for entrypoint in pkg_resources.iter_entry_points(cls.ENTRYPOINT): - yield cls(entrypoint) - - -class OfficialPlugin(BasePlugin): - """ - Official plugins have a "plugin" module which exposes a __version__ attribute. - Official plugins should be manually added by calling `OfficialPlugin.load()`. - """ - - @classmethod - def load(cls, name): - plugin = cls(name) - cls.INSTALLED.append(plugin) - return plugin - - def __init__(self, name): - self.module = importlib.import_module("tutor{}.plugin".format(name)) - super().__init__(name, self.module) - - @property - def version(self): - return self.module.__version__ - - @classmethod - def iter_load(cls): - yield from [] - - -class DictPlugin(BasePlugin): - ROOT_ENV_VAR_NAME = "TUTOR_PLUGINS_ROOT" - ROOT = os.path.expanduser( - os.environ.get(ROOT_ENV_VAR_NAME, "") - ) or appdirs.user_data_dir(appname="tutor-plugins") - - def __init__(self, data): - Module = namedtuple("Module", data.keys()) - obj = Module(**data) - super().__init__(data["name"], obj) - self._version = data["version"] - - @property - def version(self): - return self._version - - @classmethod - def iter_load(cls): - for path in glob(os.path.join(cls.ROOT, "*.yml")): - with open(path) as f: - data = serialize.load(f) - if not isinstance(data, dict): - raise exceptions.TutorError( - "Invalid plugin: {}. Expected dict.".format(path) - ) - try: - yield cls(data) - except KeyError as e: - raise exceptions.TutorError( - "Invalid plugin: {}. Missing key: {}".format(path, e.args[0]) - ) - - -class Plugins: - PLUGIN_CLASSES = [OfficialPlugin, EntrypointPlugin, DictPlugin] - - def __init__(self, config): - self.config = deepcopy(config) - self.patches = {} - self.hooks = {} - self.template_roots = {} - - for plugin in self.iter_enabled(): - for patch_name, content in plugin.patches.items(): - if patch_name not in self.patches: - self.patches[patch_name] = {} - self.patches[patch_name][plugin.name] = content - - for hook_name, services in plugin.hooks.items(): - if hook_name not in self.hooks: - self.hooks[hook_name] = {} - self.hooks[hook_name][plugin.name] = services - - @classmethod - def clear(cls): - for PluginClass in cls.PLUGIN_CLASSES: - PluginClass.INSTALLED.clear() - - @classmethod - def iter_installed(cls): - """ - Iterate on all installed plugins. Plugins are deduplicated by name. The list of installed plugins is cached to - prevent too many re-computations, which happens a lot. - """ - installed_plugin_names = set() - for PluginClass in cls.PLUGIN_CLASSES: - for plugin in PluginClass.iter_installed(): - if plugin.name not in installed_plugin_names: - installed_plugin_names.add(plugin.name) - yield plugin - - def iter_enabled(self): - for plugin in self.iter_installed(): - if is_enabled(self.config, plugin.name): - yield plugin - - def iter_patches(self, name): - plugin_patches = self.patches.get(name, {}) - plugins = sorted(plugin_patches.keys()) - for plugin in plugins: - yield plugin, plugin_patches[plugin] - - def iter_hooks(self, hook_name): - yield from self.hooks.get(hook_name, {}).items() - - -def get_callable_attr(plugin, attr_name, default=None): - attr = getattr(plugin, attr_name, default) - if callable(attr): - attr = attr() - return attr - - -def is_installed(name): - for plugin in iter_installed(): - if name == plugin.name: - return True - return False - - -def iter_installed(): - yield from Plugins.iter_installed() - - -def enable(config, name): - if not is_installed(name): - raise exceptions.TutorError("plugin '{}' is not installed.".format(name)) - if is_enabled(config, name): - return - if CONFIG_KEY not in config: - config[CONFIG_KEY] = [] - config[CONFIG_KEY].append(name) - config[CONFIG_KEY].sort() - - -def disable(config, name): - fmt.echo_info("Disabling plugin {}...".format(name)) - for plugin in Plugins(config).iter_enabled(): - if name == plugin.name: - # Remove "set" config entries - for key, value in plugin.config_set.items(): - config.pop(key, None) - fmt.echo_info(" Removed config entry {}={}".format(key, value)) - # Remove plugin from list - while name in config[CONFIG_KEY]: - config[CONFIG_KEY].remove(name) - fmt.echo_info(" Plugin disabled") - - -def iter_enabled(config): - yield from Plugins(config).iter_enabled() - - -def is_enabled(config, name): - return name in config.get(CONFIG_KEY, []) - - -def iter_patches(config, name): - yield from Plugins(config).iter_patches(name) - - -def iter_hooks(config, hook_name): - yield from Plugins(config).iter_hooks(hook_name) diff --git a/tutor/plugins/__init__.py b/tutor/plugins/__init__.py new file mode 100644 index 0000000000..b46106ad10 --- /dev/null +++ b/tutor/plugins/__init__.py @@ -0,0 +1,122 @@ +""" +Provide API for plugin features. +""" + +from __future__ import annotations + +import typing as t + +from tutor import exceptions, fmt, hooks +from tutor.types import Config + +# Import modules to trigger hook creation +from . import openedx, v0, v1 + + +def is_installed(name: str) -> bool: + """ + Return true if the plugin is installed. + """ + return name in iter_installed() + + +def iter_installed() -> t.Iterator[str]: + """ + Iterate on all installed plugins, sorted by name. + + This will yield all plugins, including those that have the same name. + + The CORE_READY action must have been triggered prior to calling this function, + otherwise no installed plugin will be detected. + """ + yield from sorted(hooks.Filters.PLUGINS_INSTALLED.iterate()) + + +def iter_info() -> t.Iterator[tuple[str, t.Optional[str]]]: + """ + Iterate on the information of all installed plugins. + + Yields (, ) tuples. + """ + + def plugin_info_name(info: tuple[str, t.Optional[str]]) -> str: + return info[0] + + yield from sorted(hooks.Filters.PLUGINS_INFO.iterate(), key=plugin_info_name) + + +def is_loaded(name: str) -> bool: + return name in iter_loaded() + + +def load_all(names: t.Iterable[str]) -> None: + """ + Load all plugins one by one. + + Plugins are loaded in alphabetical order. We ignore plugins which failed to load. + After all plugins have been loaded, the PLUGINS_LOADED action is triggered. + """ + names = sorted(set(names)) + for name in names: + try: + load(name) + except Exception as e: # pylint: disable=broad-except + fmt.echo_alert(f"Failed to enable plugin '{name}': {e}") + hooks.Actions.PLUGINS_LOADED.do() + + +def load(name: str) -> None: + """ + Load a given plugin, thus declaring all its hooks. + + Loading a plugin is done within a context, such that we can remove all hooks when a + plugin is disabled, or during unit tests. + """ + if not is_installed(name): + raise exceptions.TutorError(f"plugin '{name}' is not installed.") + with hooks.Contexts.PLUGINS.enter(): + with hooks.Contexts.app(name).enter(): + hooks.Actions.PLUGIN_LOADED.do(name) + hooks.Filters.PLUGINS_LOADED.add_item(name) + + +def iter_loaded() -> t.Iterator[str]: + """ + Iterate on the list of loaded plugin names, sorted in alphabetical order. + + Note that loaded plugin names are deduplicated. Thus, if two plugins have + the same name, just one name will be displayed. + """ + plugins: t.Iterable[str] = hooks.Filters.PLUGINS_LOADED.iterate() + yield from sorted(set(plugins)) + + +def iter_patches(name: str) -> t.Iterator[str]: + """ + Yields: patch (str) + """ + yield from _env_patches().get(name, []) + + +@hooks.lru_cache +def _env_patches() -> dict[str, list[str]]: + """ + Dictionary of patches, implemented for performance reasons. + """ + patches: dict[str, list[str]] = {} + for name, content in hooks.Filters.ENV_PATCHES.iterate(): + patches.setdefault(name, []) + patches[name].append(content) + return patches + + +def unload(plugin: str) -> None: + """ + Remove all filters and actions associated to a given plugin. + """ + hooks.clear_all(context=hooks.Contexts.app(plugin).name) + + +@hooks.Actions.PLUGIN_UNLOADED.add(priority=hooks.priorities.HIGH) +def _unload_on_disable(plugin: str, _root: str, _config: Config) -> None: + unload(plugin) diff --git a/tutor/plugins/base.py b/tutor/plugins/base.py new file mode 100644 index 0000000000..af6d8c08c6 --- /dev/null +++ b/tutor/plugins/base.py @@ -0,0 +1,16 @@ +import os + +import appdirs + +from tutor.__about__ import __app__ + +PLUGINS_ROOT_ENV_VAR_NAME = "TUTOR_PLUGINS_ROOT" + +# Folder path which contains *.yml and *.py file plugins. +# On linux this is typically ``~/.local/share/tutor-plugins``. On the nightly branch +# this will be ``~/.local/share/tutor-plugins-nightly``. +# The path can be overridden by defining the ``TUTOR_PLUGINS_ROOT`` environment +# variable. +PLUGINS_ROOT = os.path.expanduser( + os.environ.get(PLUGINS_ROOT_ENV_VAR_NAME, "") +) or appdirs.user_data_dir(appname=__app__ + "-plugins") diff --git a/tutor/plugins/indexes.py b/tutor/plugins/indexes.py new file mode 100644 index 0000000000..c32d1117af --- /dev/null +++ b/tutor/plugins/indexes.py @@ -0,0 +1,244 @@ +from __future__ import annotations + +import os +import typing as t + +from yaml.parser import ParserError + +from tutor import env, fmt, hooks, serialize, utils +from tutor.__about__ import __version__, __version_suffix__ +from tutor.exceptions import TutorError +from tutor.types import Config, get_typed + +PLUGIN_INDEXES_KEY = "PLUGIN_INDEXES" +# Current release name ('zebulon' or 'nightly') and version (1-26) +RELEASE = __version_suffix__ or env.get_current_open_edx_release_name() +MAJOR_VERSION = int(__version__.split(".", maxsplit=1)[0]) + + +class Indexes: + # Store index cache path in this singleton. + CACHE_PATH = "" + + +@hooks.Actions.PROJECT_ROOT_READY.add() +def _set_indexes_cache_path(root: str) -> None: + Indexes.CACHE_PATH = env.pathjoin(root, "plugins", "index", "cache.yml") + + +@hooks.Filters.PLUGIN_INDEX_URL.add() +def _get_index_url_from_alias(url: str) -> str: + known_aliases = { + "main": "https://overhang.io/tutor/main", + "contrib": "https://overhang.io/tutor/contrib", + } + return known_aliases.get(url, url) + + +@hooks.Filters.PLUGIN_INDEX_URL.add() +def _local_absolute_path(url: str) -> str: + if os.path.exists(url): + url = os.path.abspath(url) + return url + + +class IndexEntry: + def __init__(self, data: dict[str, str]): + self._data = data + + @property + def data(self) -> dict[str, str]: + return self._data + + @property + def name(self) -> str: + return self.data["name"] + + @property + def src(self) -> str: + return self.data["src"] + + @property + def short_description(self) -> str: + lines = self.description.splitlines() or [""] + return lines[0][:128] + + @property + def description(self) -> str: + return self.data.get("description", "").strip() + + @property + def author(self) -> str: + return self.data.get("author", "") + + @property + def maintainer(self) -> str: + return self.data.get("maintainer", "") + + @property + def url(self) -> str: + return self.data.get("url", "") + + @property + def index(self) -> str: + return self.data["index"] + + def match(self, pattern: str) -> bool: + """ + Simple case-insensitive pattern matching. + + Pattern matching is case-insensitive. Both the name and description fields are + searched. + """ + if not pattern: + return True + pattern = pattern.lower() + if pattern in self.name.lower() or pattern in self.description.lower(): + return True + return False + + +def add(url: str, config: Config) -> bool: + """ + Append an index to the list if not already present. + + Return True if if the list of indexes was modified. + """ + indexes = get_all(config) + url = hooks.Filters.PLUGIN_INDEX_URL.apply(url) + if url in indexes: + return False + indexes.append(url) + return True + + +def remove(url: str, config: Config) -> bool: + """ + Remove an index to the list if present. + + Return True if if the list of indexes was modified. + """ + indexes = get_all(config) + url = hooks.Filters.PLUGIN_INDEX_URL.apply(url) + if url not in indexes: + return False + indexes.remove(url) + return True + + +def get_all(config: Config) -> list[str]: + """ + Return the list of all plugin indexes. + """ + config.setdefault(PLUGIN_INDEXES_KEY, []) + indexes = get_typed(config, PLUGIN_INDEXES_KEY, list) + for url in indexes: + if not isinstance(url, str): + raise TutorError( + f"Invalid plugin index: {url}. Expected 'str', got '{url.__class__}'" + ) + return indexes + + +def fetch(config: Config) -> list[dict[str, str]]: + """ + Fetch the contents of all indexes. Return the list of plugin entries. + """ + all_plugins: list[dict[str, str]] = [] + indexes = get_all(config) + indexes = hooks.Filters.PLUGIN_INDEXES.apply(indexes) + for index in indexes: + url = named_index_url(index) + try: + fmt.echo_info(f"Fetching index {url}...") + all_plugins += fetch_url(url) + except TutorError as e: + fmt.echo_error(f" Failed to update index. {e.args[0]}") + + return deduplicate_plugins(all_plugins) + + +def deduplicate_plugins(plugins: list[dict[str, str]]) -> list[dict[str, str]]: + plugins_dict: dict[str, dict[str, str]] = {} + for plugin in plugins: + # Plugins from later indexes override others + plugin["name"] = plugin["name"].lower() + plugins_dict[plugin["name"]] = plugin + + return sorted(plugins_dict.values(), key=lambda p: p["name"]) + + +def fetch_url(url: str) -> list[dict[str, str]]: + content = utils.read_url(url) + plugins = parse_index(content) + for plugin in plugins: + # Store index url in the plugin itself + plugin["index"] = url + return plugins + + +def parse_index(content: str) -> list[dict[str, str]]: + try: + plugins = serialize.load(content) + except ParserError as e: + raise TutorError(f"Could not parse index: {e}") from e + validate_index(plugins) + valid_plugins = [] + for plugin in plugins: + # check plugin format + if "name" not in plugin: + fmt.echo_error(" Invalid plugin: missing 'name' attribute") + elif not isinstance(plugin["name"], str): + fmt.echo_error( + f" Invalid plugin name: expected str, got {plugin['name'].__class__}" + ) + else: + valid_plugins.append(plugin) + return valid_plugins + + +def validate_index(plugins: t.Any) -> list[dict[str, str]]: + if not isinstance(plugins, list): + raise TutorError( + f"Invalid plugin index format. Expected list, got {plugins.__class__}" + ) + return plugins + + +def named_index_url(url: str) -> str: + if utils.is_http(url): + separator = "" if url.endswith("/") else "/" + return f"{url}{separator}{RELEASE}/plugins.yml" + return os.path.join(url, RELEASE, "plugins.yml") + + +def find_in_cache(name: str) -> IndexEntry: + """ + Find entry in cache. If not found, raise error. + """ + name = name.lower() + for entry in iter_cache_entries(): + if entry.name == name: + return entry + raise TutorError(f"Plugin '{name}' could not be found in indexes") + + +def iter_cache_entries() -> t.Iterator[IndexEntry]: + for data in load_cache(): + yield IndexEntry(data) + + +def save_cache(plugins: list[dict[str, str]]) -> str: + env.write_to(serialize.dumps(plugins), Indexes.CACHE_PATH) + return Indexes.CACHE_PATH + + +def load_cache() -> list[dict[str, str]]: + try: + with open(Indexes.CACHE_PATH, encoding="utf8") as cache_if: + plugins = serialize.load(cache_if) + except FileNotFoundError as e: + raise TutorError( + f"Local index cache could not be found in {Indexes.CACHE_PATH}. Run `tutor plugins update`." + ) from e + return validate_index(plugins) diff --git a/tutor/plugins/openedx.py b/tutor/plugins/openedx.py new file mode 100644 index 0000000000..f01288173e --- /dev/null +++ b/tutor/plugins/openedx.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import os +import re +import typing as t + +from tutor import bindmount, hooks +from tutor.__about__ import __version_suffix__ + + +@hooks.Filters.CONFIG_DEFAULTS.add() +def _set_openedx_common_version_in_nightly( + items: list[tuple[str, t.Any]] +) -> list[tuple[str, t.Any]]: + if __version_suffix__ == "nightly": + items.append(("OPENEDX_COMMON_VERSION", "master")) + return items + + +@hooks.Filters.APP_PUBLIC_HOSTS.add() +def _edx_platform_public_hosts( + hosts: list[str], context_name: t.Literal["local", "dev"] +) -> list[str]: + if context_name == "dev": + hosts += ["{{ LMS_HOST }}:8000", "{{ CMS_HOST }}:8001"] + else: + hosts += ["{{ LMS_HOST }}", "{{ CMS_HOST }}"] + return hosts + + +@hooks.Filters.IMAGES_BUILD_MOUNTS.add() +def _mount_edx_platform_build( + volumes: list[tuple[str, str]], path: str +) -> list[tuple[str, str]]: + """ + Automatically add an edx-platform repo from the host to the build context whenever + it is added to the `MOUNTS` setting. + """ + if os.path.basename(path) == "edx-platform": + volumes += [ + ("openedx", "edx-platform"), + ("openedx-dev", "edx-platform"), + ] + return volumes + + +@hooks.Filters.COMPOSE_MOUNTS.add() +def _mount_edx_platform_compose( + volumes: list[tuple[str, str]], name: str +) -> list[tuple[str, str]]: + """ + When mounting edx-platform with `tutor mounts add /path/to/edx-platform`, + bind-mount the host repo in the lms/cms containers. + """ + if name == "edx-platform": + path = "/openedx/edx-platform" + volumes.append(("openedx", path)) + return volumes + + +# Auto-magically bind-mount xblock directories and some common dependencies. +hooks.Filters.MOUNTED_DIRECTORIES.add_items( + [ + ("openedx", r".*[xX][bB]lock.*"), + ("openedx", "edx-enterprise"), + ("openedx", "edx-ora2"), + ("openedx", "edx-search"), + ("openedx", "openedx-learning"), + ("openedx", r"platform-plugin-.*"), + ] +) + + +@hooks.Filters.MOUNTED_DIRECTORIES.add(priority=hooks.priorities.LOW) +def _add_openedx_dev_mounted_python_packages( + image_regex: list[tuple[str, str]] +) -> list[tuple[str, str]]: + """ + Automatically add python packages to "openedx-dev" if they are already in the + "openedx" image. + """ + for image, regex in image_regex: + if image == "openedx": + image_regex.append(("openedx-dev", regex)) + return image_regex + + +@hooks.Filters.IMAGES_BUILD_MOUNTS.add() +def _mount_python_requirements_build( + volumes: list[tuple[str, str]], path: str +) -> list[tuple[str, str]]: + """ + Automatically bind-mount directories that match MOUNTED_DIRECTORIES at build-time. + These directories are mounted in the "mnt-{name}" layer. + """ + name = os.path.basename(path) + for image_name, regex in hooks.Filters.MOUNTED_DIRECTORIES.iterate(): + if re.match(regex, name): + volumes.append((image_name, f"mnt-{name}")) + return volumes + + +@hooks.Filters.COMPOSE_MOUNTS.add() +def _mount_edx_platform_python_requirements_compose( + volumes: list[tuple[str, str]], folder_name: str +) -> list[tuple[str, str]]: + """ + Automatically bind-mount edx-platform Python requirements at runtime. + """ + for image_name, regex in hooks.Filters.MOUNTED_DIRECTORIES.iterate(): + if re.match(regex, folder_name): + # Bind-mount requirement + # TODO this is a breaking change because we associate runtime bind-mounts to + # "openedx" and no longer to "lms", "cms", etc. + volumes.append((image_name, f"/mnt/{folder_name}")) + return volumes + + +def iter_mounted_directories(mounts: list[str], image_name: str) -> t.Iterator[str]: + """ + Parse the list of mounted directories and yield the directory names that are for + the request image. Returned names are sorted in alphabetical order. + """ + mounted_dirnames: set[str] = set() + for mount in mounts: + for _service, host_path, _container_path in bindmount.parse_mount(mount): + dirname = os.path.basename(host_path) + if is_directory_mounted(image_name, dirname): + mounted_dirnames.add(dirname) + break + + yield from sorted(mounted_dirnames) + + +def is_directory_mounted(image_name: str, dirname: str) -> bool: + for name, regex in hooks.Filters.MOUNTED_DIRECTORIES.iterate(): + if name == image_name and re.match(regex, dirname): + return True + return False + + +hooks.Filters.ENV_TEMPLATE_VARIABLES.add_item( + ("iter_mounted_directories", iter_mounted_directories) +) + + +hooks.Filters.LMS_WORKER_COMMAND.add_items( + [ + "celery", + "--app=lms.celery", + "worker", + "--loglevel=info", + "--hostname=edx.lms.core.default.%h", + "--queues=edx.lms.core.default,edx.lms.core.high,edx.lms.core.high_mem", + "--max-tasks-per-child=100", + ] +) + + +hooks.Filters.CMS_WORKER_COMMAND.add_items( + [ + "celery", + "--app=cms.celery", + "worker", + "--loglevel=info", + "--hostname=edx.cms.core.default.%h", + "--queues=edx.cms.core.default,edx.cms.core.high,edx.cms.core.low", + "--max-tasks-per-child=100", + ] +) + + +def iter_cms_celery_parameters() -> t.Iterator[str]: + yield from hooks.Filters.CMS_WORKER_COMMAND.iterate() + + +def iter_lms_celery_parameters() -> t.Iterator[str]: + yield from hooks.Filters.LMS_WORKER_COMMAND.iterate() + + +hooks.Filters.ENV_TEMPLATE_VARIABLES.add_items( + [ + ("iter_cms_celery_parameters", iter_cms_celery_parameters), + ("iter_lms_celery_parameters", iter_lms_celery_parameters), + ] +) diff --git a/tutor/plugins/v0.py b/tutor/plugins/v0.py new file mode 100644 index 0000000000..b1cf0f76d5 --- /dev/null +++ b/tutor/plugins/v0.py @@ -0,0 +1,401 @@ +import importlib +import importlib.util +import os +import typing as t +from glob import glob + +import click +import importlib_metadata + +from tutor import env, exceptions, fmt, hooks, serialize +from tutor.__about__ import __app__ +from tutor.types import Config + +from .base import PLUGINS_ROOT + + +class BasePlugin: + """ + Tutor plugins are defined by a name and an object that implements one or more of the + following properties: + + `config` (dict str->dict(str->str)): contains "add", "defaults", "set" keys. Entries + in these dicts will be added or override the global configuration. Keys in "add" and + "defaults" will be prefixed by the plugin name in uppercase. + + `patches` (dict str->str): entries in this dict will be used to patch the rendered + Tutor templates. For instance, to add "somecontent" to a template that includes '{{ + patch("mypatch") }}', set: `patches["mypatch"] = "somecontent"`. It is recommended + to store all patches in separate files, and to dynamically list patches by listing + the contents of a "patches" subdirectory. + + `templates` (str): path to a directory that includes new template files for the + plugin. It is recommended that all files in the template directory are stored in a + `myplugin` folder to avoid conflicts with other plugins. Plugin templates are useful + for content re-use, e.g: "{% include 'myplugin/mytemplate.html'}". + + `hooks` (dict str->list[str]): hooks are commands that will be run at various points + during the lifetime of the platform. For instance, to run `service1` and `service2` + in sequence during initialisation, you should define: + + hooks["init"] = ["service1", "service2"] + + It is then assumed that there are `myplugin/hooks/service1/init` and + `myplugin/hooks/service2/init` templates in the plugin `templates` directory. + + `command` (click.Command): if a plugin exposes a `command` attribute, users will be able to run it from the command line as `tutor pluginname`. + """ + + def __init__(self, name: str, loader: t.Optional[t.Any] = None) -> None: + self.name = name + self.loader = loader + self.obj: t.Optional[t.Any] = None + self._discover() + + def _discover(self) -> None: + # Add itself to the list of installed plugins + hooks.Filters.PLUGINS_INSTALLED.add_item(self.name) + + # Add plugin version + hooks.Filters.PLUGINS_INFO.add_item((self.name, self._version() or "")) + + # Create actions and filters on load + @hooks.Actions.PLUGIN_LOADED.add() + def _load_plugin(name: str) -> None: + if name == self.name: + self.__load() + + def __load(self) -> None: + """ + On loading a plugin, we create all the required actions and filters. + + Note that this method is quite costly. Thus it is important that as little is + done as part of installing the plugin. For instance, we should not import + modules during installation, but only when the plugin is enabled. + """ + # Add all actions/filters + self._load_obj() + self._load_config() + self._load_patches() + self._load_tasks() + self._load_templates_root() + self._load_command() + + def _load_obj(self) -> None: + """ + Override this method to write to the `obj` attribute based on the `loader`. + """ + raise NotImplementedError + + def _load_config(self) -> None: + """ + Load config and check types. + """ + config = get_callable_attr(self.obj, "config", {}) + if not isinstance(config, dict): + raise exceptions.TutorError( + f"Invalid config in plugin {self.name}. Expected dict, got {config.__class__}." + ) + for name, subconfig in config.items(): + if not isinstance(name, str): + raise exceptions.TutorError( + f"Invalid config entry '{name}' in plugin {self.name}. Expected str, got {config.__class__}." + ) + if not isinstance(subconfig, dict): + raise exceptions.TutorError( + f"Invalid config entry '{name}' in plugin {self.name}. Expected str keys, got {config.__class__}." + ) + for key in subconfig.keys(): + if not isinstance(key, str): + raise exceptions.TutorError( + f"Invalid config entry '{name}.{key}' in plugin {self.name}. Expected str, got {key.__class__}." + ) + + # Config keys in the "add" and "defaults" dicts must be prefixed by + # the plugin name, in uppercase. + key_prefix = self.name.upper() + "_" + + hooks.Filters.CONFIG_UNIQUE.add_items( + [ + (f"{key_prefix}{key}", value) + for key, value in config.get("add", {}).items() + ], + ) + hooks.Filters.CONFIG_DEFAULTS.add_items( + [ + (f"{key_prefix}{key}", value) + for key, value in config.get("defaults", {}).items() + ], + ) + hooks.Filters.CONFIG_OVERRIDES.add_items( + [(key, value) for key, value in config.get("set", {}).items()], + ) + + def _load_patches(self) -> None: + """ + Load patches and check the types are right. + """ + patches = get_callable_attr(self.obj, "patches", {}) + if not isinstance(patches, dict): + raise exceptions.TutorError( + f"Invalid patches in plugin {self.name}. Expected dict, got {patches.__class__}." + ) + for patch_name, content in patches.items(): + if not isinstance(patch_name, str): + raise exceptions.TutorError( + f"Invalid patch name '{patch_name}' in plugin {self.name}. Expected str, got {patch_name.__class__}." + ) + if not isinstance(content, str): + raise exceptions.TutorError( + f"Invalid patch '{patch_name}' in plugin {self.name}. Expected str, got {content.__class__}." + ) + hooks.Filters.ENV_PATCHES.add_item((patch_name, content)) + + def _load_tasks(self) -> None: + """ + Load hooks and check types. + """ + tasks = get_callable_attr(self.obj, "hooks", default={}) + if not isinstance(tasks, dict): + raise exceptions.TutorError( + f"Invalid hooks in plugin {self.name}. Expected dict, got {tasks.__class__}." + ) + + build_image_tasks = tasks.get("build-image", {}) + remote_image_tasks = tasks.get("remote-image", {}) + pre_init_tasks = tasks.get("pre-init", []) + init_tasks = tasks.get("init", []) + + # Build images: hooks = {"build-image": {"myimage": "myimage:latest"}} + # We assume that the dockerfile is in the build/myimage folder. + for img, tag in build_image_tasks.items(): + hooks.Filters.IMAGES_BUILD.add_item( + (img, ("plugins", self.name, "build", img), tag, ()), + ) + # Remote images: hooks = {"remote-image": {"myimage": "myimage:latest"}} + for img, tag in remote_image_tasks.items(): + hooks.Filters.IMAGES_PULL.add_item( + (img, tag), + ) + hooks.Filters.IMAGES_PUSH.add_item( + (img, tag), + ) + # Pre-init scripts: hooks = {"pre-init": ["myservice1", "myservice2"]} + for service in pre_init_tasks: + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + ( + service, + env.read_template_file(self.name, "hooks", service, "pre-init"), + ), + priority=hooks.priorities.HIGH, + ) + # Init scripts: hooks = {"init": ["myservice1", "myservice2"]} + for service in init_tasks: + hooks.Filters.CLI_DO_INIT_TASKS.add_item( + (service, env.read_template_file(self.name, "hooks", service, "init")) + ) + + def _load_templates_root(self) -> None: + templates_root = get_callable_attr(self.obj, "templates", default=None) + if templates_root is None: + return + if not isinstance(templates_root, str): + raise exceptions.TutorError( + f"Invalid templates in plugin {self.name}. Expected str, got {templates_root.__class__}." + ) + + hooks.Filters.ENV_TEMPLATE_ROOTS.add_item(templates_root) + # We only add the "apps" and "build" folders and we render them in the + # "plugins/" folder. + hooks.Filters.ENV_TEMPLATE_TARGETS.add_items( + [ + ( + os.path.join(self.name, "apps"), + "plugins", + ), + ( + os.path.join(self.name, "build"), + "plugins", + ), + ] + ) + + def _load_command(self) -> None: + command = getattr(self.obj, "command", None) + if command is None: + return + if not isinstance(command, click.Command): + raise exceptions.TutorError( + f"Invalid command in plugin {self.name}. Expected click.Command, got {command.__class__}." + ) + # We force the command name to the plugin name + command.name = self.name + hooks.Filters.CLI_COMMANDS.add_item(command) + + def _version(self) -> t.Optional[str]: + return None + + +class EntrypointPlugin(BasePlugin): + """ + Entrypoint plugins are regular python packages that have a 'tutor.plugin.v0' entrypoint. + + The API for Tutor plugins is currently in development. The entrypoint will switch to + 'tutor.plugin.v1' once it is stabilised. + """ + + ENTRYPOINT = "tutor.plugin.v0" + + def __init__(self, entrypoint: importlib_metadata.EntryPoint) -> None: + self.loader: importlib_metadata.EntryPoint = entrypoint + super().__init__(entrypoint.name, entrypoint) + + def _load_obj(self) -> None: + self.obj = importlib.import_module(self.loader.value) + + def _version(self) -> t.Optional[str]: + if not self.loader.dist: + raise exceptions.TutorError(f"Entrypoint plugin '{self.name}' has no dist.") + return self.loader.dist.version + + @classmethod + def discover_all(cls) -> None: + entrypoints = importlib_metadata.entry_points(group=cls.ENTRYPOINT) + for entrypoint in entrypoints: + try: + error: t.Optional[str] = None + cls(entrypoint) + except Exception as e: # pylint: disable=broad-except + error = str(e) + if error: + fmt.echo_error( + f"Failed to load entrypoint '{entrypoint.name} = {entrypoint.module_name}' from distribution {entrypoint.dist}: {error}" + ) + + +class OfficialPlugin(BasePlugin): + """ + Official plugins have a "plugin" module which exposes a __version__ attribute. + Official plugins should be manually added by instantiating them with: `OfficialPlugin('name')`. + """ + + NAMES = [ + "android", + "discovery", + "ecommerce", + "forum", + "license", + "mfe", + "minio", + "notes", + "webui", + "xqueue", + ] + + def _load_obj(self) -> None: + self.obj = importlib.import_module(f"tutor{self.name}.plugin") + + def _version(self) -> t.Optional[str]: + try: + module = importlib.import_module(f"tutor{self.name}.__about__") + except ModuleNotFoundError: + return None + version = getattr(module, "__version__") + if version is None: + return None + if not isinstance(version, str): + raise TypeError("OfficialPlugin __version__ must be 'str'") + return version + + @classmethod + def discover_all(cls) -> None: + """ + This function must be called explicitely from the main. This is to handle + detection of official plugins from within the compiled binary. When not running + the binary, official plugins are treated as regular entrypoint plugins. + """ + for plugin_name in cls.NAMES: + if importlib.util.find_spec(f"tutor{plugin_name}") is not None: + OfficialPlugin(plugin_name) + + +class DictPlugin(BasePlugin): + def __init__(self, data: Config): + self.loader: Config + name = data["name"] + if not isinstance(name, str): + raise exceptions.TutorError( + f"Invalid plugin name: '{name}'. Expected str, got {name.__class__}" + ) + super().__init__(name, data) + + def _load_obj(self) -> None: + # Create a generic object (sort of a named tuple) which will contain all + # key/values from data + class Module: + pass + + self.obj = Module() + for key, value in self.loader.items(): + setattr(self.obj, key, value) + + def _version(self) -> t.Optional[str]: + version = self.loader.get("version", None) + if version is None: + return None + if not isinstance(version, str): + raise TypeError("DictPlugin.version must be str") + return version + + @classmethod + def discover_all(cls) -> None: + for path in glob(os.path.join(PLUGINS_ROOT, "*.yml")): + with open(path, encoding="utf-8") as f: + data = serialize.load(f) + if not isinstance(data, dict): + raise exceptions.TutorError( + f"Invalid plugin: {path}. Expected dict." + ) + try: + cls(data) + except KeyError as e: + raise exceptions.TutorError( + f"Invalid plugin: {path}. Missing key: {e.args[0]}" + ) + + +@hooks.Actions.CORE_READY.add() +def _discover_v0_plugins() -> None: + """ + Install all entrypoint and dict plugins. + + Plugins from both classes are discovered in a context, to make it easier to disable + them in tests. + + Note that official plugins are not discovered here. That's because they are expected + to be discovered manually from within the tutor binary. + + Installing entrypoint or dict plugins can be disabled by defining the + ``TUTOR_IGNORE_DICT_PLUGINS`` and ``TUTOR_IGNORE_ENTRYPOINT_PLUGINS`` + environment variables. + """ + with hooks.Contexts.PLUGINS.enter(): + if "TUTOR_IGNORE_ENTRYPOINT_PLUGINS" not in os.environ: + with hooks.Contexts.PLUGINS_V0_ENTRYPOINT.enter(): + EntrypointPlugin.discover_all() + if "TUTOR_IGNORE_DICT_PLUGINS" not in os.environ: + with hooks.Contexts.PLUGINS_V0_YAML.enter(): + DictPlugin.discover_all() + + +def get_callable_attr( + plugin: t.Any, attr_name: str, default: t.Optional[t.Any] = None +) -> t.Optional[t.Any]: + """ + Return the attribute of a plugin. If this attribute is a callable, return + the return value instead. + """ + attr = getattr(plugin, attr_name, default) + if callable(attr): + attr = attr() # pylint: disable=not-callable + return attr diff --git a/tutor/plugins/v1.py b/tutor/plugins/v1.py new file mode 100644 index 0000000000..88982b6f67 --- /dev/null +++ b/tutor/plugins/v1.py @@ -0,0 +1,78 @@ +import importlib.util +import os +from glob import glob + +import importlib_metadata + +from tutor import hooks + +from .base import PLUGINS_ROOT + + +@hooks.Actions.CORE_READY.add() +def _discover_module_plugins() -> None: + """ + Discover .py files in the plugins root folder. + """ + with hooks.Contexts.PLUGINS.enter(): + for path in glob(os.path.join(PLUGINS_ROOT, "*.py")): + discover_module(path) + + +@hooks.Actions.CORE_READY.add() +def _discover_entrypoint_plugins() -> None: + """ + Discover all plugins that declare a "tutor.plugin.v1" entrypoint. + """ + with hooks.Contexts.PLUGINS.enter(): + if "TUTOR_IGNORE_ENTRYPOINT_PLUGINS" not in os.environ: + for entrypoint in importlib_metadata.entry_points(group="tutor.plugin.v1"): + discover_package(entrypoint) + + +def discover_module(path: str) -> None: + """ + Install a plugin written as a single file module. + """ + name = os.path.splitext(os.path.basename(path))[0] + + # Add plugin to the list of installed plugins + hooks.Filters.PLUGINS_INSTALLED.add_item(name) + + # Add plugin information + hooks.Filters.PLUGINS_INFO.add_item((name, path)) + + # Import module on enable + @hooks.Actions.PLUGIN_LOADED.add() + def load(plugin_name: str) -> None: + if name == plugin_name: + # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly + spec = importlib.util.spec_from_file_location( + "tutor.plugin.v1.{name}", path + ) + if spec is None or spec.loader is None: + raise ValueError("Plugin could not be found: {path}") + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + +def discover_package(entrypoint: importlib_metadata.EntryPoint) -> None: + """ + Install a plugin from a python package. + """ + name = entrypoint.name + + # Add plugin to the list of installed plugins + hooks.Filters.PLUGINS_INSTALLED.add_item(name) + + # Add plugin information + if entrypoint.dist is None: + raise ValueError(f"Could not read plugin version: {name}") + dist_version = entrypoint.dist.version if entrypoint.dist else "Unknown" + hooks.Filters.PLUGINS_INFO.add_item((name, dist_version)) + + # Import module on enable + @hooks.Actions.PLUGIN_LOADED.add() + def load(plugin_name: str) -> None: + if name == plugin_name: + importlib.import_module(entrypoint.value) diff --git a/tutor/py.typed b/tutor/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tutor/scripts.py b/tutor/scripts.py deleted file mode 100644 index 538960699e..0000000000 --- a/tutor/scripts.py +++ /dev/null @@ -1,107 +0,0 @@ -from . import env -from . import fmt -from . import plugins - -BASE_OPENEDX_COMMAND = """ -export DJANGO_SETTINGS_MODULE=$SERVICE_VARIANT.envs.$SETTINGS -echo "Loading settings $DJANGO_SETTINGS_MODULE" -""" - - -class BaseRunner: - def __init__(self, root, config): - self.root = root - self.config = config - - def run_job_from_template(self, service, *path): - command = self.render(*path) - self.run_job(service, command) - - def render(self, *path): - return env.render_file(self.config, *path).strip() - - def run_job(self, service, command): - raise NotImplementedError - - def iter_plugin_hooks(self, hook): - yield from plugins.iter_hooks(self.config, hook) - - -def initialise(runner, limit_to=None): - fmt.echo_info("Initialising all services...") - if limit_to is None or limit_to == "mysql": - runner.run_job_from_template("mysql", "hooks", "mysql", "init") - for plugin_name, hook in runner.iter_plugin_hooks("pre-init"): - if limit_to is None or limit_to == plugin_name: - for service in hook: - fmt.echo_info( - "Plugin {}: running pre-init for service {}...".format( - plugin_name, service - ) - ) - runner.run_job_from_template( - service, plugin_name, "hooks", service, "pre-init" - ) - for service in ["lms", "cms", "forum"]: - if limit_to is None or limit_to == service: - fmt.echo_info("Initialising {}...".format(service)) - runner.run_job_from_template(service, "hooks", service, "init") - for plugin_name, hook in runner.iter_plugin_hooks("init"): - if limit_to is None or limit_to == plugin_name: - for service in hook: - fmt.echo_info( - "Plugin {}: running init for service {}...".format( - plugin_name, service - ) - ) - runner.run_job_from_template( - service, plugin_name, "hooks", service, "init" - ) - fmt.echo_info("All services initialised.") - - -def create_user_command(superuser, staff, username, email, password=None): - command = BASE_OPENEDX_COMMAND - - opts = "" - if superuser: - opts += " --superuser" - if staff: - opts += " --staff" - command += """ -./manage.py lms manage_user {opts} {username} {email} -""" - if password: - command += """ -./manage.py lms shell -c "from django.contrib.auth import get_user_model -u = get_user_model().objects.get(username='{username}') -u.set_password('{password}') -u.save()" -""" - else: - command += """ -./manage.py lms changepassword {username} -""" - - return command.format(opts=opts, username=username, email=email, password=password) - - -def import_demo_course(runner): - runner.run_job_from_template("cms", "hooks", "cms", "importdemocourse") - - -def set_theme(theme_name, domain_name, runner): - command = BASE_OPENEDX_COMMAND - command += """ -echo "Assigning theme {theme_name} to {domain_name}..." -./manage.py lms shell -c " -from django.contrib.sites.models import Site -site, _ = Site.objects.get_or_create(domain='{domain_name}') -if not site.name: - site.name = '{domain_name}' - site.save() -site.themes.all().delete() -site.themes.create(theme_dir_name='{theme_name}')" -""" - command = command.format(theme_name=theme_name, domain_name=domain_name) - runner.run_job("lms", command) diff --git a/tutor/serialize.py b/tutor/serialize.py index 98b7bdb856..7d77b8349a 100644 --- a/tutor/serialize.py +++ b/tutor/serialize.py @@ -1,25 +1,54 @@ +from __future__ import annotations + +import re +import typing as t + import yaml +from _io import TextIOWrapper from yaml.parser import ParserError from yaml.scanner import ScannerError -def load(stream): +def load(stream: t.Union[str, t.IO[str]]) -> t.Any: return yaml.load(stream, Loader=yaml.SafeLoader) -def load_all(stream): +def load_all(stream: str) -> t.Iterator[t.Any]: return yaml.load_all(stream, Loader=yaml.SafeLoader) -def dump(content, fileobj): - yaml.dump(content, stream=fileobj, default_flow_style=False) +def dump_all(documents: t.Sequence[t.Any], fileobj: TextIOWrapper) -> None: + yaml.safe_dump_all( + documents, stream=fileobj, default_flow_style=False, allow_unicode=True + ) + + +def dump(content: t.Any, fileobj: TextIOWrapper) -> None: + yaml.dump(content, stream=fileobj, default_flow_style=False, allow_unicode=True) + + +def dumps(content: t.Any) -> str: + result = yaml.dump(content, default_flow_style=False, allow_unicode=True) + assert isinstance(result, str) + return result + +def str_format(content: t.Any) -> str: + """ + Convert a value to str. -def dumps(content): - return yaml.dump(content, stream=None, default_flow_style=False) + This is almost like json, but more convenient for printing to the standard output. + """ + if content is True: + return "true" + if content is False: + return "false" + if content is None: + return "null" + return str(content) -def parse(v): +def parse(v: t.Union[str, t.IO[str]]) -> t.Any: """ Parse a yaml-formatted string. """ @@ -28,3 +57,24 @@ def parse(v): except (ParserError, ScannerError): pass return v + + +def parse_key_value(text: str) -> t.Optional[tuple[str, t.Any]]: + """ + Parse = command line arguments. + + Return None if text could not be parsed. + """ + match = re.match(r"(?P[a-zA-Z0-9_-]+)=(?P(.|\n|\r)*)", text) + if not match: + return None + key = match.groupdict()["key"] + value = match.groupdict()["value"] + if not value: + # Empty strings are interpreted as null values, which is incorrect. + value = "''" + elif "\n" not in value and value.startswith("#"): + # Single-line string that starts with a pound # key + # We need to escape the string, otherwise pound will be interpreted as a comment. + value = f'"{value}"' + return key, parse(value) diff --git a/tutor/tasks.py b/tutor/tasks.py new file mode 100644 index 0000000000..1e1de75994 --- /dev/null +++ b/tutor/tasks.py @@ -0,0 +1,42 @@ +from tutor import env +from tutor.types import Config + + +class BaseTaskRunner: + """ + A task runner is responsible for running bash commands in the right context. + + Commands may be loaded from string or template files. The `run_task` method must be + implemented by child classes. + """ + + def __init__(self, root: str, config: Config): + self.root = root + self.config = config + + def run_task_from_template(self, service: str, *path: str) -> None: + command = self.render(*path) + self.run_task(service, command) + + def run_task_from_str(self, service: str, command: str) -> None: + rendered = env.render_str(self.config, command).strip() + self.run_task(service, rendered) + + def render(self, *path: str) -> str: + rendered = env.render_file(self.config, *path).strip() + if isinstance(rendered, bytes): + raise TypeError("Cannot load job from binary file") + return rendered + + def run_task(self, service: str, command: str) -> int: + """ + Given a (potentially large) string command, run it with the + corresponding service. Implementations will differ depending on the + deployment strategy. + """ + raise NotImplementedError + + +class BaseComposeTaskRunner(BaseTaskRunner): + def docker_compose(self, *command: str) -> int: + raise NotImplementedError diff --git a/tutor/templates/android/edx.properties b/tutor/templates/android/edx.properties deleted file mode 100644 index 69139982d3..0000000000 --- a/tutor/templates/android/edx.properties +++ /dev/null @@ -1,3 +0,0 @@ -edx.android { - configFiles = ['tutor.yaml'] -} diff --git a/tutor/templates/android/gradle.properties b/tutor/templates/android/gradle.properties deleted file mode 100644 index 486a13a5b1..0000000000 --- a/tutor/templates/android/gradle.properties +++ /dev/null @@ -1,4 +0,0 @@ -RELEASE_STORE_FILE=/openedx/config/app.keystore -RELEASE_STORE_PASSWORD={{ ANDROID_RELEASE_STORE_PASSWORD }} -RELEASE_KEY_PASSWORD={{ ANDROID_RELEASE_KEY_PASSWORD }} -RELEASE_KEY_ALIAS={{ ANDROID_RELEASE_KEY_ALIAS }} diff --git a/tutor/templates/android/tutor.yaml b/tutor/templates/android/tutor.yaml deleted file mode 100644 index d5ecc8d298..0000000000 --- a/tutor/templates/android/tutor.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# See docs: https://openedx.atlassian.net/wiki/spaces/LEARNER/pages/48792067/App+Configuration+Flags -API_HOST_URL: "{{ "https" if ENABLE_HTTPS else "http" }}://{{ LMS_HOST }}" -ENVIRONMENT_DISPLAY_NAME: "tutor" -PLATFORM_NAME: "{{ PLATFORM_NAME }}" -PLATFORM_DESTINATION_NAME: "{{ LMS_HOST }}" -FEEDBACK_EMAIL_ADDRESS: "{{ CONTACT_EMAIL }}" -OAUTH_CLIENT_ID: "android" - -COURSE_VIDEOS_ENABLED: true -CERTIFICATES_ENABLED: true -DISCUSSIONS_ENABLED: true -DISCOVERY: - COURSE: - TYPE: native -DOWNLOAD_TO_SD_CARD_ENABLED: true -NEW_LOGISTRATION_ENABLED: true -USER_PROFILES_ENABLED : true -VIDEO_TRANSCRIPT_ENABLED: true \ No newline at end of file diff --git a/tutor/templates/apps/caddy/Caddyfile b/tutor/templates/apps/caddy/Caddyfile index 270f00fcab..96a6d9891b 100644 --- a/tutor/templates/apps/caddy/Caddyfile +++ b/tutor/templates/apps/caddy/Caddyfile @@ -1,11 +1,85 @@ -{{ LMS_HOST }}{% if not ENABLE_HTTPS %}:80{% endif %} { - reverse_proxy nginx:80 +# Global configuration +{ + + {% if not ENABLE_WEB_PROXY %} + # Enable proxying from all servers by default. Otherwise, X-Forwarded-* headers will + # be overwritten. + # https://caddyserver.com/docs/caddyfile/directives/reverse_proxy#defaults + servers { + trusted_proxies static 0.0.0.0/0 ::/0 + } + {% endif %} + {{ patch("caddyfile-global")|indent(4) }} } -preview.{{ LMS_HOST }}{% if not ENABLE_HTTPS %}:80{% endif %} { - reverse_proxy nginx:80 + +# proxy directive snippet (with logging) to be used as follows: +# +# import proxy "containername:port" +(proxy) { + log { + output stdout + format filter { + wrap json + fields { + common_log delete + request>headers delete + resp_headers delete + tls delete + } + } + } + + # This will compress requests that matches the default criteria set by Caddy. + # see https://caddyserver.com/docs/caddyfile/directives/encode + # for information about the defaults; i.e. how/when this will be applied. + encode gzip + + reverse_proxy {args.0} { + header_up X-Forwarded-Port {{ 443 if ENABLE_HTTPS else 80 }} + } + + {{ patch("caddyfile-proxy")|indent(4) }} } -{{ CMS_HOST }}{% if not ENABLE_HTTPS %}:80{% endif %} { - reverse_proxy nginx:80 + +{{ LMS_HOST }}{$default_site_port}, {{ PREVIEW_LMS_HOST }}{$default_site_port} { + @favicon_matcher { + path_regexp ^/favicon.ico$ + } + rewrite @favicon_matcher /theming/asset/images/favicon.ico + + # Limit profile image upload size + handle_path /api/profile_images/*/*/upload { + request_body { + max_size 1MB + } + } + + import proxy "lms:8000" + + {{ patch("caddyfile-lms")|indent(4) }} + + handle_path /* { + request_body { + max_size 4MB + } + } +} + +{{ CMS_HOST }}{$default_site_port} { + @favicon_matcher { + path_regexp ^/favicon.ico$ + } + rewrite @favicon_matcher /theming/asset/images/favicon.ico + + import proxy "cms:8000" + + {{ patch("caddyfile-cms")|indent(4) }} + + handle_path /* { + request_body { + max_size 250MB + } + } } -{{ patch("caddyfile") }} \ No newline at end of file +{{ patch("caddyfile") }} diff --git a/tutor/templates/apps/nginx/_tutor.conf b/tutor/templates/apps/nginx/_tutor.conf deleted file mode 100644 index 28225901dd..0000000000 --- a/tutor/templates/apps/nginx/_tutor.conf +++ /dev/null @@ -1,10 +0,0 @@ -# Allow long domain names -server_names_hash_bucket_size 128; - -# Set a short ttl for proxies to allow restarts -resolver 127.0.0.11 [::1]:5353 valid=10s; - -# Configure logging to include scheme and server name -log_format tutor '$remote_addr - $remote_user [$time_local] $scheme://$host "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; \ No newline at end of file diff --git a/tutor/templates/apps/nginx/cms.conf b/tutor/templates/apps/nginx/cms.conf deleted file mode 100644 index cbe909a9c7..0000000000 --- a/tutor/templates/apps/nginx/cms.conf +++ /dev/null @@ -1,28 +0,0 @@ -{% if RUN_CMS %} -upstream cms-backend { - server cms:8000 fail_timeout=0; -} - -server { - listen 80; - server_name {{ CMS_HOST }}; - - access_log /var/log/nginx/access.log tutor; - client_max_body_size 250M; - server_tokens off; - - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; - - location @proxy_to_cms_app { - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_pass http://cms-backend; - } - - location / { - try_files $uri @proxy_to_cms_app; - } - - {{ patch("nginx-cms")|indent(2) }} -} -{% endif %} diff --git a/tutor/templates/apps/nginx/extra.conf b/tutor/templates/apps/nginx/extra.conf deleted file mode 100644 index 73cf41bb0c..0000000000 --- a/tutor/templates/apps/nginx/extra.conf +++ /dev/null @@ -1 +0,0 @@ -{{ patch("nginx-extra") }} diff --git a/tutor/templates/apps/nginx/lms.conf b/tutor/templates/apps/nginx/lms.conf deleted file mode 100644 index 6b0d135f24..0000000000 --- a/tutor/templates/apps/nginx/lms.conf +++ /dev/null @@ -1,44 +0,0 @@ -{% if RUN_LMS %} -upstream lms-backend { - server lms:8000 fail_timeout=0; -} - -server { - listen 80; - server_name {{ LMS_HOST }} preview.{{ LMS_HOST }}; - - access_log /var/log/nginx/access.log tutor; - client_max_body_size 4M; - server_tokens off; - - rewrite ^(.*)/favicon.ico$ /static/images/favicon.ico last; - - location @proxy_to_lms_app { - proxy_redirect off; - proxy_set_header Host $http_host; - proxy_pass http://lms-backend; - } - - location / { - try_files $uri @proxy_to_lms_app; - } - - # /login?next= can be used by 3rd party sites in tags to - # determine whether a user on their site is logged into edX. - # The most common image to use is favicon.ico. - location /login { - if ( $arg_next ~* "favicon.ico" ) { - return 403; - } - try_files $uri @proxy_to_lms_app; - } - - # Need a separate location for the image uploads endpoint to limit upload sizes - location ~ ^/api/profile_images/[^/]*/[^/]*/upload$ { - try_files $uri @proxy_to_lms_app; - client_max_body_size 1049576; - } - - {{ patch("nginx-lms")|indent(2) }} -} -{% endif %} diff --git a/tutor/templates/apps/openedx/config/cms.env.json b/tutor/templates/apps/openedx/config/cms.env.json deleted file mode 100644 index 85543eec46..0000000000 --- a/tutor/templates/apps/openedx/config/cms.env.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "SITE_NAME": "{{ CMS_HOST }}", - "BOOK_URL": "", - "LOG_DIR": "/openedx/data/logs", - "LOGGING_ENV": "sandbox", - "OAUTH_OIDC_ISSUER": "{{ JWT_COMMON_ISSUER }}", - "PLATFORM_NAME": "{{ PLATFORM_NAME }}", - "FEATURES": { - {{ patch("common-env-features", separator=",\n", suffix=",")|indent(4) }} - {{ patch("cms-env-features", separator=",\n", suffix=",")|indent(4) }} - "CERTIFICATES_HTML_VIEW": true, - "PREVIEW_LMS_BASE": "preview.{{ LMS_HOST }}", - "ENABLE_COURSEWARE_INDEX": true, - "ENABLE_CSMH_EXTENDED": false, - "ENABLE_LEARNER_RECORDS": false, - "ENABLE_LIBRARY_INDEX": true - }, - "LMS_ROOT_URL": "{{ "https" if ENABLE_HTTPS else "http" }}://{{ LMS_HOST }}", - "CMS_ROOT_URL": "{{ "https" if ENABLE_HTTPS else "http" }}://{{ CMS_HOST }}", - "CMS_BASE": "{{ CMS_HOST }}", - "LMS_BASE": "{{ LMS_HOST }}", - "CONTACT_EMAIL": "{{ CONTACT_EMAIL }}", - "CELERY_BROKER_TRANSPORT": "redis", - "CELERY_BROKER_HOSTNAME": "{{ REDIS_HOST }}:{{ REDIS_PORT }}", - "CELERY_BROKER_USER": "{{ REDIS_USERNAME }}", - "CELERY_BROKER_PASSWORD": "{{ REDIS_PASSWORD }}", - "ALTERNATE_WORKER_QUEUES": "lms", - "ENABLE_COMPREHENSIVE_THEMING": true, - "COMPREHENSIVE_THEME_DIRS": ["/openedx/themes"], - "STATIC_ROOT_BASE": "/openedx/staticfiles", - "ELASTIC_SEARCH_CONFIG": [{ - {% if ELASTICSEARCH_SCHEME == "https" %}"use_ssl": true,{% endif %} - "host": "{{ ELASTICSEARCH_HOST }}", - "port": {{ ELASTICSEARCH_PORT }} - }], - "EMAIL_BACKEND": "django.core.mail.backends.smtp.EmailBackend", - "EMAIL_HOST": "{{ SMTP_HOST }}", - "EMAIL_PORT": {{ SMTP_PORT }}, - "EMAIL_USE_TLS": {{ "true" if SMTP_USE_TLS else "false" }}, - "HTTPS": "{{ "on" if ENABLE_HTTPS else "off" }}", - "LANGUAGE_CODE": "{{ LANGUAGE_CODE }}", - "SESSION_COOKIE_DOMAIN": ".{{ LMS_HOST|common_domain(CMS_HOST) }}", - {{ patch("cms-env", separator=",\n", suffix=",")|indent(2) }} - "CACHES": { - "default": { - "KEY_PREFIX": "default", - "VERSION": "1", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "general": { - "KEY_PREFIX": "general", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "mongo_metadata_inheritance": { - "KEY_PREFIX": "mongo_metadata_inheritance", - "TIMEOUT": 300, - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "staticfiles": { - "KEY_PREFIX": "staticfiles_cms", - "BACKEND": "django.core.cache.backends.locmem.LocMemCache", - "LOCATION": "staticfiles_cms" - }, - "configuration": { - "KEY_PREFIX": "configuration", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "celery": { - "KEY_PREFIX": "celery", - "TIMEOUT": "7200", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "course_structure_cache": { - "KEY_PREFIX": "course_structure", - "TIMEOUT": "7200", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - } - }, -{% include "apps/openedx/config/partials/auth.json" %} -} diff --git a/tutor/templates/apps/openedx/config/cms.env.yml b/tutor/templates/apps/openedx/config/cms.env.yml new file mode 100644 index 0000000000..53e99b0c2d --- /dev/null +++ b/tutor/templates/apps/openedx/config/cms.env.yml @@ -0,0 +1,38 @@ +SITE_NAME: "{{ CMS_HOST }}" +BOOK_URL: "" +LOG_DIR: "/openedx/data/logs" +LOGGING_ENV: "sandbox" +OAUTH_OIDC_ISSUER: "{{ JWT_COMMON_ISSUER }}" +PLATFORM_NAME: "{{ PLATFORM_NAME }}" +FEATURES: + {{ patch("common-env-features")|indent(2) }} + {{ patch("cms-env-features")|indent(2) }} + CERTIFICATES_HTML_VIEW: true + PREVIEW_LMS_BASE: "{{ PREVIEW_LMS_HOST }}" + ENABLE_COURSEWARE_INDEX: true + ENABLE_CSMH_EXTENDED: false + ENABLE_LEARNER_RECORDS: false + ENABLE_LIBRARY_INDEX: true + MILESTONES_APP: true + ENABLE_PREREQUISITE_COURSES: true +LMS_ROOT_URL: "{{ "https" if ENABLE_HTTPS else "http" }}://{{ LMS_HOST }}" +CMS_ROOT_URL: "{{ "https" if ENABLE_HTTPS else "http" }}://{{ CMS_HOST }}" +CMS_BASE: "{{ CMS_HOST }}" +LMS_BASE: "{{ LMS_HOST }}" +CONTACT_EMAIL: "{{ CONTACT_EMAIL }}" +CELERY_BROKER_TRANSPORT: "redis" +CELERY_BROKER_HOSTNAME: "{{ REDIS_HOST }}:{{ REDIS_PORT }}" +CELERY_BROKER_VHOST: "{{ OPENEDX_CELERY_REDIS_DB }}" +CELERY_BROKER_USER: "{{ REDIS_USERNAME }}" +CELERY_BROKER_PASSWORD: "{{ REDIS_PASSWORD }}" +ALTERNATE_WORKER_QUEUES: "lms" +ENABLE_COMPREHENSIVE_THEMING: true +EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend" +EMAIL_HOST: "{{ SMTP_HOST }}" +EMAIL_PORT: {{ SMTP_PORT }} +EMAIL_USE_TLS: {{ "true" if SMTP_USE_TLS else "false" }} +HTTPS: "{{ "on" if ENABLE_HTTPS else "off" }}" +LANGUAGE_CODE: "{{ LANGUAGE_CODE }}" +SESSION_COOKIE_DOMAIN: "{{ CMS_HOST }}" +{{ patch("cms-env") }} +{% include "apps/openedx/config/partials/auth.yml" %} diff --git a/tutor/templates/apps/openedx/config/lms.env.json b/tutor/templates/apps/openedx/config/lms.env.json deleted file mode 100644 index 0d3707b486..0000000000 --- a/tutor/templates/apps/openedx/config/lms.env.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "SITE_NAME": "{{ LMS_HOST }}", - "BOOK_URL": "", - "LOG_DIR": "/openedx/data/logs", - "LOGGING_ENV": "sandbox", - "OAUTH_OIDC_ISSUER": "{{ JWT_COMMON_ISSUER }}", - "PLATFORM_NAME": "{{ PLATFORM_NAME }}", - "FEATURES": { - {{ patch("common-env-features", separator=",\n", suffix=",")|indent(4) }} - {{ patch("lms-env-features", separator=",\n", suffix=",")|indent(4) }} - "CERTIFICATES_HTML_VIEW": true, - "PREVIEW_LMS_BASE": "preview.{{ LMS_HOST }}", - "ENABLE_CORS_HEADERS": true, - "ENABLE_COURSE_DISCOVERY": true, - "ENABLE_COURSEWARE_SEARCH": true, - "ENABLE_CSMH_EXTENDED": false, - "ENABLE_DASHBOARD_SEARCH": true, - "ENABLE_COMBINED_LOGIN_REGISTRATION": true, - "ENABLE_GRADE_DOWNLOADS": true, - "ENABLE_LEARNER_RECORDS": false, - "ENABLE_MOBILE_REST_API": true, - "ENABLE_OAUTH2_PROVIDER": true, - "ENABLE_THIRD_PARTY_AUTH": true - }, - "LMS_ROOT_URL": "{{ "https" if ENABLE_HTTPS else "http" }}://{{ LMS_HOST }}", - "CMS_ROOT_URL": "{{ "https" if ENABLE_HTTPS else "http" }}://{{ CMS_HOST }}", - "CMS_BASE": "{{ CMS_HOST }}", - "LMS_BASE": "{{ LMS_HOST }}", - "CONTACT_EMAIL": "{{ CONTACT_EMAIL }}", - "CELERY_BROKER_TRANSPORT": "redis", - "CELERY_BROKER_HOSTNAME": "{{ REDIS_HOST }}:{{ REDIS_PORT }}", - "CELERY_BROKER_USER": "{{ REDIS_USERNAME }}", - "CELERY_BROKER_PASSWORD": "{{ REDIS_PASSWORD }}", - "ALTERNATE_WORKER_QUEUES": "cms", - "COMMENTS_SERVICE_URL": "http://{{ FORUM_HOST }}:4567", - "COMMENTS_SERVICE_KEY": "forumapikey", - "ENABLE_COMPREHENSIVE_THEMING": true, - "COMPREHENSIVE_THEME_DIRS": ["/openedx/themes"], - "STATIC_ROOT_BASE": "/openedx/staticfiles", - "ELASTIC_SEARCH_CONFIG": [{ - {% if ELASTICSEARCH_SCHEME == "https" %}"use_ssl": true,{% endif %} - "host": "{{ ELASTICSEARCH_HOST }}", - "port": {{ ELASTICSEARCH_PORT }} - }], - "EMAIL_BACKEND": "django.core.mail.backends.smtp.EmailBackend", - "EMAIL_HOST": "{{ SMTP_HOST }}", - "EMAIL_PORT": {{ SMTP_PORT }}, - "EMAIL_USE_TLS": {{ "true" if SMTP_USE_TLS else "false" }}, - "HTTPS": "{{ "on" if ENABLE_HTTPS else "off" }}", - "LANGUAGE_CODE": "{{ LANGUAGE_CODE }}", - "SESSION_COOKIE_DOMAIN": ".{{ LMS_HOST|common_domain(CMS_HOST) }}", - {{ patch("lms-env", separator=",\n", suffix=",")|indent(2) }} - "CACHES": { - "default": { - "KEY_PREFIX": "default", - "VERSION": "1", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "general": { - "KEY_PREFIX": "general", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "mongo_metadata_inheritance": { - "KEY_PREFIX": "mongo_metadata_inheritance", - "TIMEOUT": 300, - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "staticfiles": { - "KEY_PREFIX": "staticfiles_lms", - "BACKEND": "django.core.cache.backends.locmem.LocMemCache", - "LOCATION": "staticfiles_lms" - }, - "configuration": { - "KEY_PREFIX": "configuration", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "celery": { - "KEY_PREFIX": "celery", - "TIMEOUT": "7200", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "course_structure_cache": { - "KEY_PREFIX": "course_structure", - "TIMEOUT": "7200", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - }, - "ora2-storage": { - "KEY_PREFIX": "ora2-storage", - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/1" - } - }, -{% include "apps/openedx/config/partials/auth.json" %} -} diff --git a/tutor/templates/apps/openedx/config/lms.env.yml b/tutor/templates/apps/openedx/config/lms.env.yml new file mode 100644 index 0000000000..565820c81b --- /dev/null +++ b/tutor/templates/apps/openedx/config/lms.env.yml @@ -0,0 +1,45 @@ +SITE_NAME: "{{ LMS_HOST }}" +BOOK_URL: "" +LOG_DIR: "/openedx/data/logs" +LOGGING_ENV: "sandbox" +OAUTH_OIDC_ISSUER: "{{ JWT_COMMON_ISSUER }}" +PLATFORM_NAME: "{{ PLATFORM_NAME }}" +FEATURES: + {{ patch("common-env-features")|indent(2) }} + {{ patch("lms-env-features")|indent(2) }} + CERTIFICATES_HTML_VIEW: true + PREVIEW_LMS_BASE: "{{ PREVIEW_LMS_HOST }}" + ENABLE_COURSE_DISCOVERY: true + ENABLE_COURSEWARE_SEARCH: true + ENABLE_CSMH_EXTENDED: false + ENABLE_DASHBOARD_SEARCH: true + ENABLE_COMBINED_LOGIN_REGISTRATION: true + ENABLE_GRADE_DOWNLOADS: true + ENABLE_LEARNER_RECORDS: false + ENABLE_MOBILE_REST_API: true + ENABLE_OAUTH2_PROVIDER: true + ENABLE_PREREQUISITE_COURSES: true + ENABLE_THIRD_PARTY_AUTH: true + MILESTONES_APP: true +LMS_ROOT_URL: "{{ "https" if ENABLE_HTTPS else "http" }}://{{ LMS_HOST }}" +CMS_ROOT_URL: "{{ "https" if ENABLE_HTTPS else "http" }}://{{ CMS_HOST }}" +CMS_BASE: "{{ CMS_HOST }}" +LMS_BASE: "{{ LMS_HOST }}" +CONTACT_EMAIL: "{{ CONTACT_EMAIL }}" +CELERY_BROKER_TRANSPORT: "redis" +CELERY_BROKER_HOSTNAME: "{{ REDIS_HOST }}:{{ REDIS_PORT }}" +CELERY_BROKER_VHOST: "{{ OPENEDX_CELERY_REDIS_DB }}" +CELERY_BROKER_USER: "{{ REDIS_USERNAME }}" +CELERY_BROKER_PASSWORD: "{{ REDIS_PASSWORD }}" +ALTERNATE_WORKER_QUEUES: "cms" +ENABLE_COMPREHENSIVE_THEMING: true +EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend" +EMAIL_HOST: "{{ SMTP_HOST }}" +EMAIL_PORT: {{ SMTP_PORT }} +EMAIL_USE_TLS: {{ "true" if SMTP_USE_TLS else "false" }} +ACE_ROUTING_KEY: "edx.lms.core.default" +HTTPS: "{{ "on" if ENABLE_HTTPS else "off" }}" +LANGUAGE_CODE: "{{ LANGUAGE_CODE }}" +SESSION_COOKIE_DOMAIN: "{{ LMS_HOST }}" +{{ patch("lms-env") }} +{% include "apps/openedx/config/partials/auth.yml" %} diff --git a/tutor/templates/apps/openedx/config/partials/auth.json b/tutor/templates/apps/openedx/config/partials/auth.json deleted file mode 100644 index 66e9422b60..0000000000 --- a/tutor/templates/apps/openedx/config/partials/auth.json +++ /dev/null @@ -1,26 +0,0 @@ - "SECRET_KEY": "{{ OPENEDX_SECRET_KEY }}", - "AWS_ACCESS_KEY_ID": "{{ OPENEDX_AWS_ACCESS_KEY }}", - "AWS_SECRET_ACCESS_KEY": "{{ OPENEDX_AWS_SECRET_ACCESS_KEY }}", - "CONTENTSTORE": null, - "DOC_STORE_CONFIG": null, - {{ patch("openedx-auth", separator=",\n", suffix=",")|indent(2) }} - "XQUEUE_INTERFACE": { - "django_auth": null, - "url": null - }, - "DATABASES": { - "default": { - "ENGINE": "django.db.backends.mysql", - "HOST": "{{ MYSQL_HOST }}", - "PORT": {{ MYSQL_PORT }}, - "NAME": "{{ OPENEDX_MYSQL_DATABASE }}", - "USER": "{{ OPENEDX_MYSQL_USERNAME }}", - "PASSWORD": "{{ OPENEDX_MYSQL_PASSWORD }}", - "ATOMIC_REQUESTS": true, - "OPTIONS": { - "init_command": "SET sql_mode='STRICT_TRANS_TABLES'" - } - } - }, - "EMAIL_HOST_USER": "{{ SMTP_USERNAME }}", - "EMAIL_HOST_PASSWORD": "{{ SMTP_PASSWORD }}" \ No newline at end of file diff --git a/tutor/templates/apps/openedx/config/partials/auth.yml b/tutor/templates/apps/openedx/config/partials/auth.yml new file mode 100644 index 0000000000..56f2a8b94c --- /dev/null +++ b/tutor/templates/apps/openedx/config/partials/auth.yml @@ -0,0 +1,24 @@ +SECRET_KEY: "{{ OPENEDX_SECRET_KEY }}" +AWS_ACCESS_KEY_ID: "{{ OPENEDX_AWS_ACCESS_KEY }}" +AWS_SECRET_ACCESS_KEY: "{{ OPENEDX_AWS_SECRET_ACCESS_KEY }}" +DOC_STORE_CONFIG: null +{{ patch("openedx-auth") }} +XQUEUE_INTERFACE: + django_auth: null + url: null +DATABASES: + default: + ENGINE: "django.db.backends.mysql" + HOST: "{{ MYSQL_HOST }}" + PORT: {{ MYSQL_PORT }} + NAME: "{{ OPENEDX_MYSQL_DATABASE }}" + USER: "{{ OPENEDX_MYSQL_USERNAME }}" + PASSWORD: "{{ OPENEDX_MYSQL_PASSWORD }}" + ATOMIC_REQUESTS: true + OPTIONS: + init_command: "SET sql_mode='STRICT_TRANS_TABLES'" + {%- if RUN_MYSQL %} + charset: "utf8mb4" + {%- endif %} +EMAIL_HOST_USER: "{{ SMTP_USERNAME }}" +EMAIL_HOST_PASSWORD: "{{ SMTP_PASSWORD }}" diff --git a/tutor/templates/apps/openedx/settings/cms/__init__.py b/tutor/templates/apps/openedx/settings/cms/__init__.py index f0c04d958b..e69de29bb2 100644 --- a/tutor/templates/apps/openedx/settings/cms/__init__.py +++ b/tutor/templates/apps/openedx/settings/cms/__init__.py @@ -1 +0,0 @@ -{% include "apps/openedx/settings/partials/pre_common_all.py" %} diff --git a/tutor/templates/apps/openedx/settings/cms/development.py b/tutor/templates/apps/openedx/settings/cms/development.py index 349edca142..af7af18e12 100644 --- a/tutor/templates/apps/openedx/settings/cms/development.py +++ b/tutor/templates/apps/openedx/settings/cms/development.py @@ -4,7 +4,15 @@ LMS_BASE = "{{ LMS_HOST }}:8000" LMS_ROOT_URL = "http://" + LMS_BASE -FEATURES["PREVIEW_LMS_BASE"] = "preview." + LMS_BASE + +CMS_BASE = "{{ CMS_HOST }}:8001" +CMS_ROOT_URL = "http://" + CMS_BASE + +# Authentication +SOCIAL_AUTH_EDX_OAUTH2_KEY = "{{ CMS_OAUTH2_KEY_SSO_DEV }}" +SOCIAL_AUTH_EDX_OAUTH2_PUBLIC_URL_ROOT = LMS_ROOT_URL + +FEATURES["PREVIEW_LMS_BASE"] = "{{ PREVIEW_LMS_HOST }}:8000" {% include "apps/openedx/settings/partials/common_cms.py" %} diff --git a/tutor/templates/apps/openedx/settings/cms/production.py b/tutor/templates/apps/openedx/settings/cms/production.py index 6b04a882cc..03cae79b09 100644 --- a/tutor/templates/apps/openedx/settings/cms/production.py +++ b/tutor/templates/apps/openedx/settings/cms/production.py @@ -8,5 +8,10 @@ ENV_TOKENS.get("CMS_BASE"), "cms", ] +CORS_ORIGIN_WHITELIST.append("{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ CMS_HOST }}") + +# Authentication +SOCIAL_AUTH_EDX_OAUTH2_KEY = "{{ CMS_OAUTH2_KEY_SSO }}" +SOCIAL_AUTH_EDX_OAUTH2_PUBLIC_URL_ROOT = "{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}" {{ patch("openedx-cms-production-settings") }} diff --git a/tutor/templates/apps/openedx/settings/cms/test.py b/tutor/templates/apps/openedx/settings/cms/test.py new file mode 100644 index 0000000000..14a3e3db47 --- /dev/null +++ b/tutor/templates/apps/openedx/settings/cms/test.py @@ -0,0 +1,3 @@ +from cms.envs.test import * + +{% include "apps/openedx/settings/partials/common_test.py" %} diff --git a/tutor/templates/apps/openedx/settings/lms/__init__.py b/tutor/templates/apps/openedx/settings/lms/__init__.py index f0c04d958b..e69de29bb2 100644 --- a/tutor/templates/apps/openedx/settings/lms/__init__.py +++ b/tutor/templates/apps/openedx/settings/lms/__init__.py @@ -1 +0,0 @@ -{% include "apps/openedx/settings/partials/pre_common_all.py" %} diff --git a/tutor/templates/apps/openedx/settings/lms/development.py b/tutor/templates/apps/openedx/settings/lms/development.py index 09c92a493f..ed0c277366 100644 --- a/tutor/templates/apps/openedx/settings/lms/development.py +++ b/tutor/templates/apps/openedx/settings/lms/development.py @@ -7,8 +7,6 @@ # Setup correct webpack configuration file for development WEBPACK_CONFIG_PATH = "webpack.dev.config.js" -SESSION_COOKIE_DOMAIN = ".{{ LMS_HOST|common_domain(CMS_HOST) }}" - LMS_BASE = "{{ LMS_HOST}}:8000" LMS_ROOT_URL = "http://{}".format(LMS_BASE) LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL @@ -17,8 +15,20 @@ CMS_ROOT_URL = "http://{}".format(CMS_BASE) LOGIN_REDIRECT_WHITELIST.append(CMS_BASE) -FEATURES['ENABLE_COURSEWARE_MICROFRONTEND'] = False -COMMENTS_SERVICE_URL = "http://{{ FORUM_HOST }}:4567" +# Session cookie +SESSION_COOKIE_DOMAIN = "{{ LMS_HOST }}" +SESSION_COOKIE_SECURE = False +CSRF_COOKIE_SECURE = False +SESSION_COOKIE_SAMESITE = "Lax" + +# CMS authentication +IDA_LOGOUT_URI_LIST.append("http://{{ CMS_HOST }}:8001/logout/") + +FEATURES["ENABLE_COURSEWARE_MICROFRONTEND"] = False + +# Disable enterprise integration +FEATURES["ENABLE_ENTERPRISE_INTEGRATION"] = False +SYSTEM_WIDE_ROLE_CLASSES.remove("enterprise.SystemWideEnterpriseUserRoleAssignment") LOGGING["loggers"]["oauth2_provider"] = { "handlers": ["console"], diff --git a/tutor/templates/apps/openedx/settings/lms/production.py b/tutor/templates/apps/openedx/settings/lms/production.py index fad463ce1a..6c2793b3bc 100644 --- a/tutor/templates/apps/openedx/settings/lms/production.py +++ b/tutor/templates/apps/openedx/settings/lms/production.py @@ -9,20 +9,24 @@ FEATURES["PREVIEW_LMS_BASE"], "lms", ] +CORS_ORIGIN_WHITELIST.append("{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}") {% if ENABLE_HTTPS %} # Properly set the "secure" attribute on session/csrf cookies. This is required in # Chrome to support samesite=none cookies. SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True -DCS_SESSION_COOKIE_SAMESITE = "None" +SESSION_COOKIE_SAMESITE = "None" {% else %} # When we cannot provide secure session/csrf cookies, we must disable samesite=none SESSION_COOKIE_SECURE = False CSRF_COOKIE_SECURE = False -DCS_SESSION_COOKIE_SAMESITE = "Lax" +SESSION_COOKIE_SAMESITE = "Lax" {% endif %} +# CMS authentication +IDA_LOGOUT_URI_LIST.append("{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ CMS_HOST }}/logout/") + # Required to display all courses on start page SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True diff --git a/tutor/templates/apps/openedx/settings/lms/test.py b/tutor/templates/apps/openedx/settings/lms/test.py new file mode 100644 index 0000000000..a5fc351859 --- /dev/null +++ b/tutor/templates/apps/openedx/settings/lms/test.py @@ -0,0 +1,3 @@ +from lms.envs.test import * + +{% include "apps/openedx/settings/partials/common_test.py" %} diff --git a/tutor/templates/apps/openedx/settings/partials/common_all.py b/tutor/templates/apps/openedx/settings/partials/common_all.py index b46de5355a..c317f4c06f 100644 --- a/tutor/templates/apps/openedx/settings/partials/common_all.py +++ b/tutor/templates/apps/openedx/settings/partials/common_all.py @@ -6,16 +6,17 @@ # Mongodb connection parameters: simply modify `mongodb_parameters` to affect all connections to MongoDb. mongodb_parameters = { + "db": "{{ MONGODB_DATABASE }}", "host": "{{ MONGODB_HOST }}", "port": {{ MONGODB_PORT }}, - {% if MONGODB_USERNAME and MONGODB_PASSWORD %} - "user": "{{ MONGODB_USERNAME }}", - "password": "{{ MONGODB_PASSWORD }}", - {% else %} - "user": None, - "password": None, - {% endif %} - "db": "{{ MONGODB_DATABASE }}", + "user": {% if MONGODB_USERNAME %}"{{ MONGODB_USERNAME }}"{% else %}None{% endif %}, + "password": {% if MONGODB_PASSWORD %}"{{ MONGODB_PASSWORD }}"{% else %}None{% endif %}, + # Connection/Authentication + "connect": False, + "ssl": {{ MONGODB_USE_SSL }}, + "authsource": "{{ MONGODB_AUTH_SOURCE }}", + "replicaSet": {% if MONGODB_REPLICA_SET %}"{{ MONGODB_REPLICA_SET }}"{% else %}None{% endif %}, + {% if MONGODB_AUTH_MECHANISM %}"authMechanism": "{{ MONGODB_AUTH_MECHANISM }}",{% endif %} } DOC_STORE_CONFIG = mongodb_parameters CONTENTSTORE = { @@ -25,13 +26,70 @@ } # Load module store settings from config files update_module_store_settings(MODULESTORE, doc_store_settings=DOC_STORE_CONFIG) -DATA_DIR = "/openedx/data/" +DATA_DIR = "/openedx/data/modulestore" + for store in MODULESTORE["default"]["OPTIONS"]["stores"]: store["OPTIONS"]["fs_root"] = DATA_DIR # Behave like memcache when it comes to connection errors DJANGO_REDIS_IGNORE_EXCEPTIONS = True +# Elasticsearch connection parameters +ELASTIC_SEARCH_CONFIG = [{ + {% if ELASTICSEARCH_SCHEME == "https" %}"use_ssl": True,{% endif %} + "host": "{{ ELASTICSEARCH_HOST }}", + "port": {{ ELASTICSEARCH_PORT }}, +}] + +# Common cache config +CACHES = { + "default": { + "KEY_PREFIX": "default", + "VERSION": "1", + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "general": { + "KEY_PREFIX": "general", + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "mongo_metadata_inheritance": { + "KEY_PREFIX": "mongo_metadata_inheritance", + "TIMEOUT": 300, + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "configuration": { + "KEY_PREFIX": "configuration", + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "celery": { + "KEY_PREFIX": "celery", + "TIMEOUT": 7200, + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "course_structure_cache": { + "KEY_PREFIX": "course_structure", + "TIMEOUT": 604800, # 1 week + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + }, + "ora2-storage": { + "KEY_PREFIX": "ora2-storage", + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": "redis://{% if REDIS_USERNAME and REDIS_PASSWORD %}{{ REDIS_USERNAME }}:{{ REDIS_PASSWORD }}{% endif %}@{{ REDIS_HOST }}:{{ REDIS_PORT }}/{{ OPENEDX_CACHE_REDIS_DB }}", + } +} + +# The default Django contrib site is the one associated to the LMS domain name. 1 is +# usually "example.com", so it's the next available integer. +SITE_ID = 2 + +# Contact addresses +CONTACT_MAILING_ADDRESS = "{{ PLATFORM_NAME }} - {% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}" DEFAULT_FROM_EMAIL = ENV_TOKENS.get("DEFAULT_FROM_EMAIL", ENV_TOKENS["CONTACT_EMAIL"]) DEFAULT_FEEDBACK_EMAIL = ENV_TOKENS.get("DEFAULT_FEEDBACK_EMAIL", ENV_TOKENS["CONTACT_EMAIL"]) SERVER_EMAIL = ENV_TOKENS.get("SERVER_EMAIL", ENV_TOKENS["CONTACT_EMAIL"]) @@ -54,9 +112,6 @@ # Set uploaded media file path MEDIA_ROOT = "/openedx/media/" -# Add your MFE and third-party app domains here -CORS_ORIGIN_WHITELIST = [] - # Video settings VIDEO_IMAGE_SETTINGS["STORAGE_KWARGS"]["location"] = MEDIA_ROOT VIDEO_TRANSCRIPTS_SETTINGS["STORAGE_KWARGS"]["location"] = MEDIA_ROOT @@ -69,8 +124,10 @@ }, } +# ORA2 ORA2_FILEUPLOAD_BACKEND = "filesystem" ORA2_FILEUPLOAD_ROOT = "/openedx/data/ora2" +FILE_UPLOAD_STORAGE_BUCKET_NAME = "openedxuploads" ORA2_FILEUPLOAD_CACHE_NAME = "ora2-storage" # Change syslog-based loggers which don't work inside docker containers @@ -87,6 +144,31 @@ } LOGGING["loggers"]["tracking"]["handlers"] = ["console", "local", "tracking"] +# Silence some loggers (note: we must attempt to get rid of these when upgrading from one release to the next) +LOGGING["loggers"]["blockstore.apps.bundles.storage"] = {"handlers": ["console"], "level": "WARNING"} + +# These warnings are visible in simple commands and init tasks +import warnings + +try: + from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning + warnings.filterwarnings("ignore", category=RemovedInDjango50Warning) + warnings.filterwarnings("ignore", category=RemovedInDjango51Warning) +except ImportError: + # REMOVE-AFTER-V18: + # In Quince, edx-platform uses Django 5. But on master, edx-platform still uses Django 3. + # So, Tutor v17 needs to silence these warnings, whereas Tutor v17-nightly fails to import them. + # Once edx-platform master is upgraded to Django 5, the try-except wrapper can be removed. + pass + +warnings.filterwarnings("ignore", category=DeprecationWarning, module="wiki.plugins.links.wiki_plugin") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="boto.plugin") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="botocore.vendored.requests.packages.urllib3._collections") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="pkg_resources") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="fs") +warnings.filterwarnings("ignore", category=DeprecationWarning, module="fs.opener") +SILENCED_SYSTEM_CHECKS = ["2_0.W001", "fields.W903"] + # Email EMAIL_USE_SSL = {{ SMTP_USE_SSL }} # Forward all emails from edX's Automated Communication Engine (ACE) to django. @@ -95,13 +177,13 @@ ACE_CHANNEL_TRANSACTIONAL_EMAIL = "django_email" EMAIL_FILE_PATH = "/tmp/openedx/emails" -LOCALE_PATHS.append("/openedx/locale/contrib/locale") -LOCALE_PATHS.append("/openedx/locale/user/locale") +# Language/locales +LANGUAGE_COOKIE_NAME = "openedx-language-preference" # Allow the platform to include itself in an iframe X_FRAME_OPTIONS = "SAMEORIGIN" -{% set jwt_rsa_key = rsa_import_key(JWT_RSA_PRIVATE_KEY) %} +{% set jwt_rsa_key | rsa_import_key %}{{ JWT_RSA_PRIVATE_KEY }}{% endset %} JWT_AUTH["JWT_ISSUER"] = "{{ JWT_COMMON_ISSUER }}" JWT_AUTH["JWT_AUDIENCE"] = "{{ JWT_COMMON_AUDIENCE }}" JWT_AUTH["JWT_SECRET_KEY"] = "{{ JWT_COMMON_SECRET_KEY }}" @@ -114,6 +196,9 @@ "n": "{{ jwt_rsa_key.n|long_to_base64 }}", "p": "{{ jwt_rsa_key.p|long_to_base64 }}", "q": "{{ jwt_rsa_key.q|long_to_base64 }}", + "dq": "{{ jwt_rsa_key.dq|long_to_base64 }}", + "dp": "{{ jwt_rsa_key.dp|long_to_base64 }}", + "qi": "{{ jwt_rsa_key.invq|long_to_base64 }}", } ) JWT_AUTH["JWT_PUBLIC_SIGNING_JWK_SET"] = json.dumps( @@ -136,5 +221,30 @@ } ] +# Enable/Disable some features globally +FEATURES["ENABLE_DISCUSSION_SERVICE"] = False +FEATURES["PREVENT_CONCURRENT_LOGINS"] = False +FEATURES["ENABLE_CORS_HEADERS"] = True + +# CORS +CORS_ALLOW_CREDENTIALS = True +CORS_ORIGIN_ALLOW_ALL = False +CORS_ALLOW_INSECURE = {% if ENABLE_HTTPS %}False{% else %}True{% endif %} +# Note: CORS_ALLOW_HEADERS is intentionally not defined here, because it should +# be consistent across deployments, and is therefore set in edx-platform. + +# Add your MFE and third-party app domains here +CORS_ORIGIN_WHITELIST = [] + +# Disable codejail support +# explicitely configuring python is necessary to prevent unsafe calls +import codejail.jail_code +codejail.jail_code.configure("python", "nonexistingpythonbinary", user=None) +# another configuration entry is required to override prod/dev settings +CODE_JAIL = { + "python_bin": "nonexistingpythonbinary", + "user": None, +} + {{ patch("openedx-common-settings") }} ######## End of settings common to LMS and CMS diff --git a/tutor/templates/apps/openedx/settings/partials/common_cms.py b/tutor/templates/apps/openedx/settings/partials/common_cms.py index 7cfeb07c65..c5dde04400 100644 --- a/tutor/templates/apps/openedx/settings/partials/common_cms.py +++ b/tutor/templates/apps/openedx/settings/partials/common_cms.py @@ -1,19 +1,30 @@ {% include "apps/openedx/settings/partials/common_all.py" %} ######## Common CMS settings +STUDIO_NAME = "{{ PLATFORM_NAME }} - Studio" + +CACHES["staticfiles"] = { + "KEY_PREFIX": "staticfiles_cms", + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "staticfiles_cms", +} + +# Authentication +SOCIAL_AUTH_EDX_OAUTH2_SECRET = "{{ CMS_OAUTH2_SECRET }}" +SOCIAL_AUTH_EDX_OAUTH2_URL_ROOT = "http://lms:8000" +SOCIAL_AUTH_REDIRECT_IS_HTTPS = False # scheme is correctly included in redirect_uri +SESSION_COOKIE_NAME = "studio_session_id" -STUDIO_NAME = u"{{ PLATFORM_NAME }} - Studio" MAX_ASSET_UPLOAD_FILE_SIZE_IN_MB = 100 FRONTEND_LOGIN_URL = LMS_ROOT_URL + '/login' -FRONTEND_LOGOUT_URL = LMS_ROOT_URL + '/logout' FRONTEND_REGISTER_URL = LMS_ROOT_URL + '/register' # Create folders if necessary -for folder in [LOG_DIR, MEDIA_ROOT, STATIC_ROOT_BASE]: +for folder in [LOG_DIR, MEDIA_ROOT, STATIC_ROOT, ORA2_FILEUPLOAD_ROOT]: if not os.path.exists(folder): - os.makedirs(folder) + os.makedirs(folder, exist_ok=True) {{ patch("openedx-cms-common-settings") }} -######## End of common CMS settings \ No newline at end of file +######## End of common CMS settings diff --git a/tutor/templates/apps/openedx/settings/partials/common_lms.py b/tutor/templates/apps/openedx/settings/partials/common_lms.py index ef9bd6be37..1a06d613c2 100644 --- a/tutor/templates/apps/openedx/settings/partials/common_lms.py +++ b/tutor/templates/apps/openedx/settings/partials/common_lms.py @@ -4,12 +4,9 @@ LOGIN_REDIRECT_WHITELIST = ["{{ CMS_HOST }}"] # Better layout of honor code/tos links during registration -REGISTRATION_EXTRA_FIELDS["terms_of_service"] = "required" +REGISTRATION_EXTRA_FIELDS["terms_of_service"] = "hidden" REGISTRATION_EXTRA_FIELDS["honor_code"] = "hidden" -# This url must not be None and should not be used anywhere -LEARNING_MICROFRONTEND_URL = "http://learn.openedx.org" - # Fix media files paths PROFILE_IMAGE_BACKEND["options"]["location"] = os.path.join( MEDIA_ROOT, "profile-images/" @@ -21,11 +18,30 @@ # Allow insecure oauth2 for local interaction with local containers OAUTH_ENFORCE_SECURE = False +# Email settings +DEFAULT_EMAIL_LOGO_URL = LMS_ROOT_URL + "/theming/asset/images/logo.png" +BULK_EMAIL_SEND_USING_EDX_ACE = True +FEATURES["ENABLE_FOOTER_MOBILE_APP_LINKS"] = False + +# Branding +MOBILE_STORE_ACE_URLS = {} +SOCIAL_MEDIA_FOOTER_ACE_URLS = {} + +# Make it possible to hide courses by default from the studio +SEARCH_SKIP_SHOW_IN_CATALOG_FILTERING = False + +# Caching +CACHES["staticfiles"] = { + "KEY_PREFIX": "staticfiles_lms", + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "staticfiles_lms", +} + # Create folders if necessary -for folder in [DATA_DIR, LOG_DIR, MEDIA_ROOT, STATIC_ROOT_BASE, ORA2_FILEUPLOAD_ROOT]: +for folder in [DATA_DIR, LOG_DIR, MEDIA_ROOT, STATIC_ROOT, ORA2_FILEUPLOAD_ROOT]: if not os.path.exists(folder): - os.makedirs(folder) + os.makedirs(folder, exist_ok=True) {{ patch("openedx-lms-common-settings") }} -######## End of common LMS settings \ No newline at end of file +######## End of common LMS settings diff --git a/tutor/templates/apps/openedx/settings/partials/common_test.py b/tutor/templates/apps/openedx/settings/partials/common_test.py new file mode 100644 index 0000000000..290ac2929c --- /dev/null +++ b/tutor/templates/apps/openedx/settings/partials/common_test.py @@ -0,0 +1,3 @@ +# Fix MongoDb connection credentials +DOC_STORE_CONFIG["user"] = None +DOC_STORE_CONFIG["password"] = None diff --git a/tutor/templates/apps/openedx/settings/partials/pre_common_all.py b/tutor/templates/apps/openedx/settings/partials/pre_common_all.py deleted file mode 100644 index 282db6331f..0000000000 --- a/tutor/templates/apps/openedx/settings/partials/pre_common_all.py +++ /dev/null @@ -1,10 +0,0 @@ -# Silence overly verbose warnings -import logging -import warnings -from django.utils.deprecation import RemovedInDjango30Warning, RemovedInDjango31Warning -from rest_framework import RemovedInDRF310Warning, RemovedInDRF311Warning -warnings.simplefilter('ignore', RemovedInDjango30Warning) -warnings.simplefilter('ignore', RemovedInDjango31Warning) -warnings.simplefilter('ignore', RemovedInDRF310Warning) -warnings.simplefilter('ignore', RemovedInDRF311Warning) -warnings.simplefilter('ignore', DeprecationWarning) diff --git a/tutor/templates/apps/openedx/uwsgi.ini b/tutor/templates/apps/openedx/uwsgi.ini new file mode 100644 index 0000000000..8ff0484370 --- /dev/null +++ b/tutor/templates/apps/openedx/uwsgi.ini @@ -0,0 +1,3 @@ +{% include "build/openedx/settings/uwsgi.ini" %} +{{ patch("uwsgi-config") }} + diff --git a/tutor/templates/apps/permissions/setowners.sh b/tutor/templates/apps/permissions/setowners.sh new file mode 100644 index 0000000000..d4044f9067 --- /dev/null +++ b/tutor/templates/apps/permissions/setowners.sh @@ -0,0 +1,8 @@ +#! /bin/sh +setowner $OPENEDX_USER_ID /mounts/lms /mounts/cms /mounts/openedx +{% if RUN_ELASTICSEARCH %}setowner 1000 /mounts/elasticsearch{% endif %} +{% if RUN_MONGODB %}setowner 999 /mounts/mongodb{% endif %} +{% if RUN_MYSQL %}setowner 999 /mounts/mysql{% endif %} +{% if RUN_REDIS %}setowner 1000 /mounts/redis{% endif %} + +{{ patch("local-docker-compose-permissions-command") }} diff --git a/tutor/templates/apps/redis/redis.conf b/tutor/templates/apps/redis/redis.conf index cfdf709d31..1fdd69baaa 100644 --- a/tutor/templates/apps/redis/redis.conf +++ b/tutor/templates/apps/redis/redis.conf @@ -39,3 +39,10 @@ auto-aof-rewrite-percentage 100 auto-aof-rewrite-min-size 64mb aof-load-truncated yes aof-use-rdb-preamble yes + +############################## MEMORY MANAGEMENT ################################ + +maxmemory 4gb +maxmemory-policy allkeys-lru + +{{ patch("redis-conf") }} diff --git a/tutor/templates/build/android/Dockerfile b/tutor/templates/build/android/Dockerfile deleted file mode 100644 index b586322431..0000000000 --- a/tutor/templates/build/android/Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -FROM docker.io/ubuntu:20.04 -MAINTAINER Overhang.io - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && \ - apt upgrade -y && \ - apt install -y wget unzip git openjdk-8-jre openjdk-8-jdk - -RUN mkdir /openedx - -# Install Android SDK -# Inspired from https://github.com/LiveXP/docker-android-sdk/blob/master/Dockerfile -ENV ANDROID_SDK_VERSION 6200805 -ENV ANDROID_SDK_PATH /openedx/android-sdk -ENV ANDROID_HOME /openedx/android-sdk -RUN mkdir ${ANDROID_HOME} -WORKDIR /openedx/android-sdk -RUN wget https://dl.google.com/android/repository/commandlinetools-linux-${ANDROID_SDK_VERSION}_latest.zip && \ - unzip commandlinetools-linux-${ANDROID_SDK_VERSION}_latest.zip && \ - rm commandlinetools-linux-${ANDROID_SDK_VERSION}_latest.zip - -# Accept licenses -# https://developer.android.com/studio/command-line/sdkmanager -ARG ANDROID_API_LEVEL=28 -RUN yes | /openedx/android-sdk/tools/bin/sdkmanager --sdk_root=${ANDROID_HOME} --install "platforms;android-$ANDROID_API_LEVEL" 1> /dev/null - -# Install android app repo -ARG ANDROID_APP_REPOSITORY=https://github.com/edx/edx-app-android -ARG ANDROID_APP_VERSION=release/2.23.2 -RUN git clone $ANDROID_APP_REPOSITORY --branch $ANDROID_APP_VERSION /openedx/edx-app-android -WORKDIR /openedx/edx-app-android - -# Install gradle and all dependencies -RUN ./gradlew -v -RUN ./gradlew tasks - -# User-customized config -COPY ./edx.properties ./OpenEdXMobile/edx.properties -RUN mkdir /openedx/config -RUN ln -s /openedx/config/gradle.properties ./OpenEdXMobile/gradle.properties diff --git a/tutor/templates/build/android/edx.properties b/tutor/templates/build/android/edx.properties deleted file mode 100644 index f49574b029..0000000000 --- a/tutor/templates/build/android/edx.properties +++ /dev/null @@ -1 +0,0 @@ -edx.dir = '/openedx/config' diff --git a/tutor/templates/build/forum/Dockerfile b/tutor/templates/build/forum/Dockerfile deleted file mode 100644 index 33ac5e7980..0000000000 --- a/tutor/templates/build/forum/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM docker.io/ubuntu:20.04 -MAINTAINER Overhang.io - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && \ - apt upgrade -y && \ - apt install -y git wget autoconf bison build-essential libssl-dev libyaml-dev libreadline6-dev zlib1g-dev libncurses5-dev libffi-dev libgdbm-dev - -# Install dockerize to wait for mongodb/elasticsearch availability -ARG DOCKERIZE_VERSION=v0.6.1 -RUN wget -O /tmp/dockerize.tar.gz https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ - && tar -C /usr/local/bin -xzvf /tmp/dockerize.tar.gz \ - && rm /tmp/dockerize.tar.gz - -RUN mkdir /openedx - -# Install ruby-build for building specific version of ruby -# The ruby-build version should be periodically updated to reflect the latest release -ARG RUBY_BUILD_VERSION=v20200401 -RUN git clone https://github.com/rbenv/ruby-build.git --branch $RUBY_BUILD_VERSION /openedx/ruby-build -WORKDIR /openedx/ruby-build -RUN PREFIX=/usr/local ./install.sh - -# Install ruby and some specific dependencies -ARG RUBY_VERSION=2.5.7 -ARG BUNDLER_VERSION=1.17.3 -ARG RAKE_VERSION=13.0.1 -RUN ruby-build $RUBY_VERSION /openedx/ruby -ENV PATH "/openedx/ruby/bin:$PATH" -RUN gem install bundler -v $BUNDLER_VERSION -RUN gem install rake -v $RAKE_VERSION - -# Install forum -RUN git clone https://github.com/edx/cs_comments_service.git --branch {{ OPENEDX_COMMON_VERSION }} --depth 1 /openedx/cs_comments_service -WORKDIR /openedx/cs_comments_service -RUN bundle install --deployment - -COPY ./bin /openedx/bin -RUN chmod a+x /openedx/bin/* -ENV PATH /openedx/bin:${PATH} -ENTRYPOINT ["docker-entrypoint.sh"] - -ENV SINATRA_ENV staging -ENV NEW_RELIC_ENABLE false -ENV API_KEY forumapikey -ENV SEARCH_SERVER "http://elasticsearch:9200" -ENV MONGODB_AUTH "" -ENV MONGOID_AUTH_MECH "" -ENV MONGODB_HOST "mongodb" -ENV MONGODB_PORT "27017" -EXPOSE 4567 -CMD ./bin/unicorn -c config/unicorn_tcp.rb -I '.' diff --git a/tutor/templates/build/forum/bin/docker-entrypoint.sh b/tutor/templates/build/forum/bin/docker-entrypoint.sh deleted file mode 100755 index 6ad2f892b1..0000000000 --- a/tutor/templates/build/forum/bin/docker-entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -e - -export MONGOHQ_URL="mongodb://$MONGODB_AUTH$MONGODB_HOST:$MONGODB_PORT/cs_comments_service" - -echo "Waiting for mongodb/elasticsearch..." -dockerize -wait tcp://$MONGODB_HOST:$MONGODB_PORT -wait $SEARCH_SERVER -wait-retry-interval 5s -timeout 600s - -exec "$@" diff --git a/tutor/templates/build/openedx-dev/Dockerfile b/tutor/templates/build/openedx-dev/Dockerfile deleted file mode 100644 index 84bc19877d..0000000000 --- a/tutor/templates/build/openedx-dev/Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -FROM {{ DOCKER_IMAGE_OPENEDX }} -MAINTAINER Overhang.io - -# Install useful system requirements -RUN apt update && \ - apt install -y vim iputils-ping dnsutils telnet \ - && rm -rf /var/lib/apt/lists/* - -# Install dev python requirements -RUN pip install -r requirements/edx/development.txt -# We install ipython from source to avoid too many deprecation warnings -# https://github.com/ipython/ipython/issues/12206 -# We might be able to avoid this once they make a release later than 7.19.0. -RUN pip install ipdb==0.13.4 git+https://github.com/ipython/ipython.git@d0649a54a8936a8019d54549779dc92bcbde4e68#egg=ipython - -# Recompile static assets: in development mode all static assets are stored in edx-platform, -# and the location of these files is stored in webpack-stats.json. If we don't recompile -# static assets, then production assets will be served instead. -RUN rm -r /openedx/staticfiles && \ - mkdir /openedx/staticfiles && \ - openedx-assets webpack --env=dev - -# Copy new entrypoint (to take care of permission issues at runtime) -COPY ./bin /openedx/bin -RUN chmod a+x /openedx/bin/* - -# Configure new user -ARG USERID=1000 -RUN create-user.sh $USERID - -# Default django settings -ENV SETTINGS tutor.development diff --git a/tutor/templates/build/openedx-dev/bin/create-user.sh b/tutor/templates/build/openedx-dev/bin/create-user.sh deleted file mode 100755 index 4b91302ab8..0000000000 --- a/tutor/templates/build/openedx-dev/bin/create-user.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/sh -e -USERID=$1 - -if [ "$USERID" != "" ] && [ "$USERID" != "0" ] -then - echo "Creating 'openedx' user with id $USERID" - useradd --home-dir /openedx --uid $USERID openedx - chown -R openedx:openedx /openedx -else - echo "Running as root" -fi \ No newline at end of file diff --git a/tutor/templates/build/openedx-dev/bin/docker-entrypoint.sh b/tutor/templates/build/openedx-dev/bin/docker-entrypoint.sh deleted file mode 100644 index 7c393e6175..0000000000 --- a/tutor/templates/build/openedx-dev/bin/docker-entrypoint.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh -e -export DJANGO_SETTINGS_MODULE=$SERVICE_VARIANT.envs.$SETTINGS - -if id -u openedx > /dev/null 2>&1; then - # Change owners of mounted volumes - echo "Setting file permissions for user openedx..." - find /openedx \ - -not -path "/openedx/edx-platform/*" \ - -not -user openedx \ - -perm /u=w \ - -exec chown openedx:openedx {} \+ - echo "File permissions set." - - # Run CMD as user openedx - exec chroot --userspec="openedx:openedx" --skip-chdir / env HOME=/openedx "$@" -else - echo "Running openedx-dev as root user" - exec "$@" -fi diff --git a/tutor/templates/build/openedx/Dockerfile b/tutor/templates/build/openedx/Dockerfile index 9ae9cd6e45..621b463422 100644 --- a/tutor/templates/build/openedx/Dockerfile +++ b/tutor/templates/build/openedx/Dockerfile @@ -1,214 +1,309 @@ +# syntax=docker/dockerfile:1 ###### Minimal image with base system requirements for most stages -FROM docker.io/ubuntu:20.04 as minimal -MAINTAINER Overhang.io +FROM docker.io/ubuntu:20.04 AS minimal +LABEL maintainer="Overhang.io " ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && \ apt install -y build-essential curl git language-pack-en -ENV LC_ALL en_US.UTF-8 +ENV LC_ALL=en_US.UTF-8 +{{ patch("openedx-dockerfile-minimal") }} ###### Install python with pyenv in /opt/pyenv and create virtualenv in /openedx/venv -FROM minimal as python +FROM minimal AS python # https://github.com/pyenv/pyenv/wiki/Common-build-problems#prerequisites -RUN apt update && \ +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && \ apt install -y libssl-dev zlib1g-dev libbz2-dev \ libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \ xz-utils tk-dev libffi-dev liblzma-dev python-openssl git -ARG PYTHON_VERSION=3.8.6 -ENV PYENV_ROOT /opt/pyenv -RUN git clone https://github.com/pyenv/pyenv $PYENV_ROOT --branch v1.2.21 --depth 1 + +# Install pyenv +# https://www.python.org/downloads/ +# https://github.com/pyenv/pyenv/releases +ARG PYTHON_VERSION=3.11.8 +ENV PYENV_ROOT=/opt/pyenv +RUN git clone https://github.com/pyenv/pyenv $PYENV_ROOT --branch v2.3.36 --depth 1 + +# Install Python RUN $PYENV_ROOT/bin/pyenv install $PYTHON_VERSION -RUN $PYENV_ROOT/versions/$PYTHON_VERSION/bin/python -m venv /openedx/venv -###### Install Dockerize to wait for mysql DB availability -FROM minimal as dockerize -ARG DOCKERIZE_VERSION=v0.6.1 -RUN curl -L -o /tmp/dockerize.tar.gz https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \ - && tar -C /usr/local/bin -xzvf /tmp/dockerize.tar.gz \ - && rm /tmp/dockerize.tar.gz +# Create virtualenv +RUN $PYENV_ROOT/versions/$PYTHON_VERSION/bin/python -m venv /openedx/venv ###### Checkout edx-platform code -FROM minimal as code -ARG EDX_PLATFORM_REPOSITORY=https://github.com/edx/edx-platform.git -ARG EDX_PLATFORM_VERSION={{ OPENEDX_COMMON_VERSION }} -RUN mkdir -p /openedx/edx-platform && \ - git clone $EDX_PLATFORM_REPOSITORY --branch $EDX_PLATFORM_VERSION --depth 1 /openedx/edx-platform +FROM minimal AS code +ARG EDX_PLATFORM_REPOSITORY={{ EDX_PLATFORM_REPOSITORY }} +ARG EDX_PLATFORM_VERSION={{ EDX_PLATFORM_VERSION }} +RUN mkdir -p /openedx/edx-platform WORKDIR /openedx/edx-platform - -# Patch edx-platform -# Make it possible to disable learner records globally -# https://github.com/edx/edx-platform/pull/25182 -# https://github.com/overhangio/edx-platform/tree/overhangio/disable-learner-records-from-settings -RUN curl https://github.com/overhangio/edx-platform/commit/bd038bab3cf02df147e754f7743e46b68b43bac8.patch | git apply - -# Fix inconvenient pavelib warning -# https://github.com/edx/edx-platform/pull/25771 -# https://github.com/overhangio/edx-platform/tree/overhangio/fix-paver-warning -RUN curl https://github.com/overhangio/edx-platform/commit/bc0ab09f9945bd14aa6be1dbbf928cce58f079d2.patch | git apply - -# Apply security fixes -RUN curl https://github.com/overhangio/edx-platform/commit/fab755d80083575d3466b990cfcef3b9fd97e755.patch | git apply - -# Fix lost celery tasks -# https://github.com/edx/edx-platform/pull/25840 -RUN curl https://github.com/overhangio/edx-platform/commit/928fe14a5656303b538281edce897faf082b549c.patch | git apply - -# Fix video transcript upload to S3 -# https://github.com/edx/edx-platform/pull/24800 -RUN curl https://github.com/overhangio/edx-platform/commit/81b18b1b97ca89e2d941c9f7283e3f6e385c5f92.patch | git apply - - - - -###### Download extra locales to /openedx/locale/contrib/locale -FROM minimal as locales -# TODO: openedx-i18n is not yet tagged for koa.1: replace versions below by OPENEDX_COMMON_VERSION -ARG OPENEDX_I18N_VERSION=open-release/koa.test02 -RUN cd /tmp \ - && curl -L -o openedx-i18n.tar.gz https://github.com/openedx/openedx-i18n/archive/{{ OPENEDX_COMMON_VERSION }}.tar.gz \ - && tar xzf /tmp/openedx-i18n.tar.gz \ - && mkdir -p /openedx/locale/contrib \ - && mv openedx-i18n-{{ OPENEDX_COMMON_VERSION|replace("/", "-") }}/edx-platform/locale /openedx/locale/contrib \ - && rm -rf openedx-i18n* +ADD --keep-git-dir=true $EDX_PLATFORM_REPOSITORY#$EDX_PLATFORM_VERSION . + +# Identify tutor user to apply patches using git +RUN git config --global user.email "tutor@overhang.io" \ + && git config --global user.name "Tutor" + +{%- if patch("openedx-dockerfile-git-patches-default") %} +# Custom edx-platform patches +{{ patch("openedx-dockerfile-git-patches-default") }} +{%- elif EDX_PLATFORM_VERSION == "master" %} +# Patches in nightly node +{%- else %} +# Patches in non-nightly mode +{%- endif %} + +{# Example: RUN curl -fsSL https://github.com/openedx/edx-platform/commit/.patch | git am #} +{{ patch("openedx-dockerfile-post-git-checkout") }} + +##### Empty layer with just the repo at the root. +# This is useful when overriding the build context with a host repo: +# docker build --build-context edx-platform=/path/to/edx-platform +FROM scratch AS edx-platform +COPY --from=code /openedx/edx-platform / + +{# Create empty layers for all bind-mounted directories #} +{% for name in iter_mounted_directories(MOUNTS, "openedx") %} +FROM scratch AS mnt-{{ name }} +{% endfor %} ###### Install python requirements in virtualenv -FROM python as python-requirements -ENV PATH /openedx/venv/bin:${PATH} -ENV VIRTUAL_ENV /openedx/venv/ - -RUN apt update && apt install -y software-properties-common libmysqlclient-dev libxmlsec1-dev +FROM python AS python-requirements +ENV PATH=/openedx/venv/bin:${PATH} +ENV VIRTUAL_ENV=/openedx/venv/ +ENV XDG_CACHE_HOME=/openedx/.cache -# Note that this means that we need to reinstall all requirements whenever there is a -# change in edx-platform, which sucks. But there is no obvious alternative, as we need -# to install some packages from edx-platform. -COPY --from=code /openedx/edx-platform /openedx/edx-platform -WORKDIR /openedx/edx-platform +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update \ + && apt install -y software-properties-common libmysqlclient-dev libxmlsec1-dev libgeos-dev # Install the right version of pip/setuptools -RUN pip install setuptools==44.1.0 pip==20.0.2 wheel==0.34.2 +RUN --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install \ + # https://pypi.org/project/setuptools/ + # https://pypi.org/project/pip/ + # https://pypi.org/project/wheel/ + setuptools==69.1.1 pip==24.0 wheel==0.43.0 # Install base requirements -RUN pip install -r ./requirements/edx/base.txt +RUN --mount=type=bind,from=edx-platform,source=/requirements/edx/base.txt,target=/openedx/edx-platform/requirements/edx/base.txt \ + --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install -r /openedx/edx-platform/requirements/edx/base.txt -# Install scorm xblock -RUN pip install "openedx-scorm-xblock<12.0.0,>=11.0.0" +# Install extra requirements +RUN --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install \ + # Use redis as a django cache https://pypi.org/project/django-redis/ + django-redis==5.4.0 \ + # uwsgi server https://pypi.org/project/uWSGI/ + uwsgi==2.0.24 -# Install django-redis for using redis as a django cache -RUN pip install django-redis==4.12.1 +{{ patch("openedx-dockerfile-post-python-requirements") }} -# Install uwsgi -RUN pip install uwsgi==2.0.19.1 +# Install scorm xblock +RUN pip install "openedx-scorm-xblock>=18.0.0,<19.0.0" -# Install private requirements: this is useful for installing custom xblocks. -COPY ./requirements/ /openedx/requirements -RUN cd /openedx/requirements/ \ - && touch ./private.txt \ - && pip install -r ./private.txt +{% for extra_requirements in OPENEDX_EXTRA_PIP_REQUIREMENTS %} +RUN --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install '{{ extra_requirements }}' +{% endfor %} ###### Install nodejs with nodeenv in /openedx/nodeenv -FROM python as nodejs-requirements -ENV PATH /openedx/nodeenv/bin:/openedx/venv/bin:${PATH} +FROM python AS nodejs-requirements +ENV PATH=/openedx/nodeenv/bin:/openedx/venv/bin:${PATH} # Install nodeenv with the version provided by edx-platform -RUN pip install nodeenv==1.4.0 -RUN nodeenv /openedx/nodeenv --node=12.13.0 --prebuilt +# https://github.com/openedx/edx-platform/blob/master/requirements/edx/base.txt +RUN pip install nodeenv==1.8.0 +RUN nodeenv /openedx/nodeenv --node=18.20.1 --prebuilt # Install nodejs requirements -ARG NPM_REGISTRY=https://registry.npmjs.org/ -COPY --from=code /openedx/edx-platform/package.json /openedx/edx-platform/package.json +ARG NPM_REGISTRY={{ NPM_REGISTRY }} WORKDIR /openedx/edx-platform -RUN npm install --verbose --registry=$NPM_REGISTRY +RUN --mount=type=bind,from=edx-platform,source=/package.json,target=/openedx/edx-platform/package.json \ + --mount=type=bind,from=edx-platform,source=/package-lock.json,target=/openedx/edx-platform/package-lock.json \ + --mount=type=bind,from=edx-platform,source=/scripts/copy-node-modules.sh,target=/openedx/edx-platform/scripts/copy-node-modules.sh \ + --mount=type=cache,target=/root/.npm,sharing=shared \ + npm clean-install --no-audit --registry=$NPM_REGISTRY ###### Production image with system and python requirements -FROM minimal as production +FROM minimal AS production # Install system requirements -RUN apt update && \ - apt install -y gettext gfortran graphviz graphviz-dev libffi-dev libfreetype6-dev libgeos-dev libjpeg8-dev liblapack-dev libmysqlclient-dev libpng-dev libsqlite3-dev libxmlsec1-dev lynx ntp pkg-config rdfind && \ - rm -rf /var/lib/apt/lists/* - -COPY --from=dockerize /usr/local/bin/dockerize /usr/local/bin/dockerize -COPY --from=code /openedx/edx-platform /openedx/edx-platform -COPY --from=locales /openedx/locale/contrib/locale /openedx/locale/contrib/locale -COPY --from=python /opt/pyenv /opt/pyenv -COPY --from=python-requirements /openedx/venv /openedx/venv -COPY --from=python-requirements /openedx/requirements /openedx/requirements -COPY --from=nodejs-requirements /openedx/nodeenv /openedx/nodeenv -COPY --from=nodejs-requirements /openedx/edx-platform/node_modules /openedx/edx-platform/node_modules - -ENV PATH /openedx/venv/bin:./node_modules/.bin:/openedx/nodeenv/bin:${PATH} -ENV VIRTUAL_ENV /openedx/venv/ +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update \ + && apt install -y gettext gfortran graphviz graphviz-dev libffi-dev libfreetype6-dev libgeos-dev libjpeg8-dev liblapack-dev libmysqlclient-dev libpng-dev libsqlite3-dev libxmlsec1-dev lynx mysql-client ntp pkg-config rdfind + +# From then on, run as unprivileged "app" user +# Note that this must always be different from root (APP_USER_ID=0) +ARG APP_USER_ID=1000 +RUN if [ "$APP_USER_ID" = 0 ]; then echo "app user may not be root" && false; fi +RUN useradd --no-log-init --home-dir /openedx --create-home --shell /bin/bash --uid ${APP_USER_ID} app +USER ${APP_USER_ID} + +# Note: +# For directories from other stages, we prefer 'COPY --link' to plain 'COPY' because it copies +# without regard to files from previous layers, providing significant caching benefits. However, +# since Linux's username->userid mapping is stored in a file (/etc/passwd), it means that we must +# --chown with an integer user id ($APP_USER_ID) rather the a username (app). + +# https://hub.docker.com/r/powerman/dockerize/tags +COPY --link --from=docker.io/powerman/dockerize:0.19.0 /usr/local/bin/dockerize /usr/local/bin/dockerize +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=edx-platform / /openedx/edx-platform +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=python /opt/pyenv /opt/pyenv +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=python-requirements /openedx/venv /openedx/venv +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=python-requirements /mnt /mnt +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=nodejs-requirements /openedx/nodeenv /openedx/nodeenv +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=nodejs-requirements /openedx/edx-platform/node_modules /openedx/node_modules + +# Symlink node_modules such that we can bind-mount the edx-platform repository +RUN ln -s /openedx/node_modules /openedx/edx-platform/node_modules + +ENV PATH=/openedx/venv/bin:./node_modules/.bin:/openedx/nodeenv/bin:${PATH} +ENV VIRTUAL_ENV=/openedx/venv/ +ENV COMPREHENSIVE_THEME_DIRS=/openedx/themes +ENV STATIC_ROOT_LMS=/openedx/staticfiles +ENV STATIC_ROOT_CMS=/openedx/staticfiles/studio + WORKDIR /openedx/edx-platform -# Re-install local requirements, otherwise egg-info folders are missing -RUN pip install -r requirements/edx/local.in +{# Install auto-mounted directories as Python packages. #} +{% for name in iter_mounted_directories(MOUNTS, "openedx") %} +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=mnt-{{ name }} / /mnt/{{ name }} +RUN pip install -e "/mnt/{{ name }}" +{% endfor %} -# Create folder that will store lms/cms.env.json files, as well as +# We install edx-platform here because it creates an egg-info folder in the current +# repo. We need both the source code and the virtualenv to run this command. +RUN pip install -e . + +# Create folder that will store lms/cms.env.yml files, as well as # the tutor-specific settings files. RUN mkdir -p /openedx/config ./lms/envs/tutor ./cms/envs/tutor -COPY revisions.yml /openedx/config/ -ENV LMS_CFG /openedx/config/lms.env.json -ENV STUDIO_CFG /openedx/config/cms.env.json -ENV REVISION_CFG /openedx/config/revisions.yml -COPY settings/lms/*.py ./lms/envs/tutor/ -COPY settings/cms/*.py ./cms/envs/tutor/ - -# Copy user-specific locales to /openedx/locale/user/locale and compile them -RUN mkdir -p /openedx/locale/user -COPY ./locale/ /openedx/locale/user/locale/ -RUN cd /openedx/locale/user && \ - django-admin.py compilemessages -v1 - -# Compile i18n strings: in some cases, js locales are not properly compiled out of the box -# and we need to do a pass ourselves. Also, we need to compile the djangojs.js files for -# the downloaded locales. +COPY --chown=app:app revisions.yml /openedx/config/ +ENV LMS_CFG=/openedx/config/lms.env.yml +ENV CMS_CFG=/openedx/config/cms.env.yml +ENV REVISION_CFG=/openedx/config/revisions.yml +COPY --chown=app:app settings/lms/*.py ./lms/envs/tutor/ +COPY --chown=app:app settings/cms/*.py ./cms/envs/tutor/ + +# Pull latest translations via atlas +RUN make clean_translations +RUN ./manage.py lms --settings=tutor.i18n pull_plugin_translations --verbose --repository='{{ ATLAS_REPOSITORY }}' --revision='{{ ATLAS_REVISION }}' {{ ATLAS_OPTIONS }} +RUN ./manage.py lms --settings=tutor.i18n pull_xblock_translations --repository='{{ ATLAS_REPOSITORY }}' --revision='{{ ATLAS_REVISION }}' {{ ATLAS_OPTIONS }} +RUN atlas pull --repository='{{ ATLAS_REPOSITORY }}' --revision='{{ ATLAS_REVISION }}' {{ ATLAS_OPTIONS }} \ + translations/edx-platform/conf/locale:conf/locale \ + translations/studio-frontend/src/i18n/messages:conf/plugins-locale/studio-frontend +RUN ./manage.py lms --settings=tutor.i18n compile_xblock_translations +RUN ./manage.py cms --settings=tutor.i18n compile_xblock_translations +RUN ./manage.py lms --settings=tutor.i18n compile_plugin_translations +RUN ./manage.py lms --settings=tutor.i18n compilemessages -v1 RUN ./manage.py lms --settings=tutor.i18n compilejsi18n RUN ./manage.py cms --settings=tutor.i18n compilejsi18n # Copy scripts -COPY ./bin /openedx/bin +COPY --chown=app:app ./bin /openedx/bin RUN chmod a+x /openedx/bin/* -ENV PATH /openedx/bin:${PATH} +ENV PATH=/openedx/bin:${PATH} {{ patch("openedx-dockerfile-pre-assets") }} -# Collect production assets. By default, only assets from the default theme +# Build & collect production assets. By default, only assets from the default theme # will be processed. This makes the docker image lighter and faster to build. -# Only the custom themes added to /openedx/themes will be compiled. -# Here, we don't run "paver update_assets" which is slow, compiles all themes -# and requires a complex settings file. Instead, we decompose the commands -# and run each one individually to collect the production static assets to -# /openedx/staticfiles. -ENV NO_PYTHON_UNINSTALL 1 -ENV NO_PREREQ_INSTALL 1 -# We need to rely on a separate openedx-assets command to accelerate asset processing. -# For instance, we don't want to run all steps of asset collection every time the theme -# is modified. -RUN openedx-assets xmodule \ - && openedx-assets npm \ - && openedx-assets webpack --env=prod \ - && openedx-assets common -COPY ./themes/ /openedx/themes/ -RUN openedx-assets themes \ - && openedx-assets collect --settings=tutor.assets \ - # De-duplicate static assets with symlinks - && rdfind -makesymlinks true -followsymlinks true /openedx/staticfiles/ +RUN npm run postinstall # Postinstall artifacts are stuck in nodejs-requirements layer. Create them here too. +RUN npm run compile-sass -- --skip-themes +RUN npm run webpack + +# Now that the default theme is built, build any custom themes +COPY --chown=app:app ./themes/ /openedx/themes +RUN npm run compile-sass -- --skip-default + +# and finally, collect assets for the production image, +# de-duping assets with symlinks. +RUN ./manage.py lms collectstatic --noinput --settings=tutor.assets && \ + ./manage.py cms collectstatic --noinput --settings=tutor.assets && \ + # De-duplicate static assets with symlinks \ + rdfind -makesymlinks true -followsymlinks true /openedx/staticfiles/ # Create a data directory, which might be used (or not) RUN mkdir /openedx/data +# If this "canary" file is missing from a container, then that indicates that a +# local edx-platform was bind-mounted into that container, thus overwriting the +# canary. This information is useful during edx-platform initialisation. +RUN echo \ + "This copy of edx-platform was built into a Docker image." \ + > bindmount-canary + # service variant is "lms" or "cms" -ENV SERVICE_VARIANT lms -ENV SETTINGS tutor.production +ENV SERVICE_VARIANT=lms +ENV DJANGO_SETTINGS_MODULE=lms.envs.tutor.production {{ patch("openedx-dockerfile") }} -# Entrypoint will set right environment variables -ENTRYPOINT ["docker-entrypoint.sh"] +EXPOSE 8000 + +###### Intermediate image with dev/test dependencies +FROM production AS development + +# Install useful system requirements (as root) +USER root +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt update && \ + apt install -y vim iputils-ping dnsutils telnet +USER app + +# Install dev python requirements +RUN --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install -r requirements/edx/development.txt +# https://pypi.org/project/ipdb/ +# https://pypi.org/project/ipython (>=Python 3.10 started with 8.20) +RUN --mount=type=cache,target=/openedx/.cache/pip,sharing=shared \ + pip install ipdb==0.13.13 ipython==8.24.0 + +{# Re-install mounted requirements, otherwise they will be superseded by upstream reqs #} +{% for name in iter_mounted_directories(MOUNTS, "openedx") %} +COPY --link --chown=$APP_USER_ID:$APP_USER_ID --from=mnt-{{ name }} / /mnt/{{ name }} +RUN pip install -e "/mnt/{{ name }}" +{% endfor %} + +# Add ipdb as default PYTHONBREAKPOINT +ENV PYTHONBREAKPOINT=ipdb.set_trace + +# Point unit tests at the MongoDB container +ENV EDXAPP_TEST_MONGO_HOST={{ MONGODB_HOST }} +ENV EDXAPP_TEST_MONGO_PORT_NUM={{ MONGODB_PORT }} + +# Recompile static assets: in development mode all static assets are stored in edx-platform, +# and the location of these files is stored in webpack-stats.json. If we don't recompile +# static assets, then production assets will be served instead. +RUN rm -r /openedx/staticfiles && \ + mkdir /openedx/staticfiles && \ + npm run build-dev + +{{ patch("openedx-dev-dockerfile-post-python-requirements") }} + +# Default django settings +ENV DJANGO_SETTINGS_MODULE=lms.envs.tutor.development + +CMD ["./manage.py", "$SERVICE_VARIANT", "runserver", "0.0.0.0:8000"] + +###### Final image with production cmd +FROM production AS final + +# Default amount of uWSGI processes +ENV UWSGI_WORKERS=2 + +# Copy the default uWSGI configuration +COPY --chown=app:app settings/uwsgi.ini /openedx # Run server -EXPOSE 8000 -CMD uwsgi \ - --static-map /static=/openedx/staticfiles/ \ - --static-map /media=/openedx/media/ \ - --http 0.0.0.0:8000 \ - --thunder-lock \ - --single-interpreter \ - --enable-threads \ - --processes=${UWSGI_WORKERS:-2} \ - --wsgi-file ${SERVICE_VARIANT}/wsgi.py +CMD ["uwsgi", "/openedx/uwsgi.ini"] + +{{ patch("openedx-dockerfile-final") }} + diff --git a/tutor/templates/build/openedx/bin/docker-entrypoint.sh b/tutor/templates/build/openedx/bin/docker-entrypoint.sh deleted file mode 100755 index 235d2a6138..0000000000 --- a/tutor/templates/build/openedx/bin/docker-entrypoint.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -e -export DJANGO_SETTINGS_MODULE=$SERVICE_VARIANT.envs.$SETTINGS -exec "$@" \ No newline at end of file diff --git a/tutor/templates/build/openedx/bin/openedx-assets b/tutor/templates/build/openedx/bin/openedx-assets deleted file mode 100755 index 1d2103876b..0000000000 --- a/tutor/templates/build/openedx/bin/openedx-assets +++ /dev/null @@ -1,218 +0,0 @@ -#! /usr/bin/env python -from __future__ import print_function -import argparse -import os -import subprocess -import sys -import traceback - -from path import Path - -from pavelib import assets - - -DEFAULT_STATIC_ROOT = "/openedx/staticfiles" -DEFAULT_THEMES_DIR = "/openedx/themes" - - -def main(): - parser = argparse.ArgumentParser( - description="Various assets processing/building/collection utility for Open edX" - ) - subparsers = parser.add_subparsers() - - npm = subparsers.add_parser("npm", help="Copy static assets from node_modules") - npm.set_defaults(func=run_npm) - - build = subparsers.add_parser("build", help="Build all assets") - build.add_argument("-e", "--env", choices=["prod", "dev"], default="prod") - build.add_argument("--theme-dirs", nargs="+", default=[DEFAULT_THEMES_DIR]) - build.add_argument("--themes", nargs="+", default=["all"]) - build.add_argument("-r", "--static-root", default=DEFAULT_STATIC_ROOT) - build.add_argument("--systems", nargs="+", default=["lms", "cms"]) - build.set_defaults(func=run_build) - - xmodule = subparsers.add_parser("xmodule", help="Process assets from xmodule") - xmodule.set_defaults(func=run_xmodule) - - webpack = subparsers.add_parser("webpack", help="Run webpack") - webpack.add_argument("-r", "--static-root", default=DEFAULT_STATIC_ROOT) - webpack.add_argument("-e", "--env", choices=["prod", "dev"], default="prod") - webpack.set_defaults(func=run_webpack) - - common = subparsers.add_parser( - "common", help="Compile static assets for common theme" - ) - common.add_argument("--systems", nargs="+", default=["lms", "cms"]) - common.set_defaults(func=run_common) - - themes = subparsers.add_parser( - "themes", help="Compile static assets for custom themes" - ) - themes.add_argument("--theme-dirs", nargs="+", default=[DEFAULT_THEMES_DIR]) - themes.add_argument("--themes", nargs="+", default=["all"]) - themes.add_argument("--systems", nargs="+", default=["lms", "cms"]) - themes.set_defaults(func=run_themes) - - collect = subparsers.add_parser( - "collect", help="Collect static assets to be served by webserver" - ) - collect.add_argument( - "-s", - "--settings", - default=os.environ.get("SETTINGS"), - help="Django settings module", - ) - collect.add_argument( - "--systems", - nargs="+", - choices=["lms", "cms"], - default=["lms", "cms"], - help="Limit collection to lms or cms", - ) - collect.set_defaults(func=run_collect) - - watch_themes = subparsers.add_parser( - "watch-themes", help="Watch theme assets for changes and recompile on-the-fly" - ) - watch_themes.add_argument( - "-e", - "--env", - choices=["prod", "dev"], - default="prod", - help="Webpack target to run", - ) - watch_themes.add_argument("--theme-dirs", default=[DEFAULT_THEMES_DIR]) - watch_themes.set_defaults(func=run_watch_themes) - - args = parser.parse_args() - args.func(args) - - -def run_build(args): - run_xmodule(args) - run_npm(args) - run_webpack(args) - run_common(args) - run_themes(args) - - -def run_xmodule(_args): - # Collecting xmodule assets is incompatible with setting the django path, because - # of an unfortunate call to settings.configure() - django_settings_module = os.environ.get("DJANGO_SETTINGS_MODULE") - if django_settings_module: - os.environ.pop("DJANGO_SETTINGS_MODULE") - - sys.argv[1:] = ["common/static/xmodule"] - import xmodule.static_content - - xmodule.static_content.main() - - if django_settings_module: - os.environ["DJANGO_SETTINGS_MODULE"] = django_settings_module - - -def run_npm(_args): - assets.process_npm_assets() - - -def run_webpack(args): - os.environ["STATIC_ROOT_LMS"] = args.static_root - os.environ["STATIC_ROOT_CMS"] = os.path.join(args.static_root, "studio") - os.environ["NODE_ENV"] = {"prod": "production", "dev": "development"}[args.env] - subprocess.check_call( - [ - "webpack", - "--progress", - "--config=webpack.{env}.config.js".format(env=args.env), - ] - ) - - -def run_common(args): - for system in args.systems: - print("Compiling {} sass assets from common theme...".format(system)) - assets._compile_sass(system, None, False, False, []) - - -def run_themes(args): - for theme_dir in args.theme_dirs: - local_themes = ( - list_subdirectories(theme_dir) if "all" in args.themes else args.themes - ) - for theme in local_themes: - theme_path = os.path.join(theme_dir, theme) - if os.path.exists(theme_path): - for system in args.systems: - print( - "Compiling {} sass assets from theme {}...".format( - system, theme_path - ) - ) - assets._compile_sass(system, Path(theme_path), False, False, []) - - -def run_collect(args): - assets.collect_assets(args.systems, args.settings) - - -def run_watch_themes(args): - """ - Watch static assets for changes and re-compile those changes when - necessary. This piece of code is heavily inspired from the - edx-platform/pavelib/assets.py:watch_assets function, which could not be - used directly because it does not properly read the platform settings - environment variable. - - Note that this function will only work for watching assets in development - mode. In production, watching changes does not make much sense anyway. - """ - observer = assets.Observer() - for theme_dir in args.theme_dirs: - print("Watching changes in {}...".format(theme_dir)) - ThemeWatcher(theme_dir).register(observer) - observer.start() - try: - while True: - observer.join(2) - except KeyboardInterrupt: - observer.stop() - - -def list_subdirectories(path): - return [ - subpath - for subpath in os.listdir(path) - if os.path.isdir(os.path.join(path, subpath)) - ] - - -class ThemeWatcher(assets.SassWatcher): - def __init__(self, theme_dir): - super(ThemeWatcher, self).__init__() - self.theme_dir = theme_dir - - # pylint: disable=arguments-differ - def register(self, observer): - return super(ThemeWatcher, self).register(observer, [self.theme_dir]) - - @assets.debounce() - def on_any_event(self, event): - components = os.path.relpath(event.src_path, self.theme_dir).split("/") - try: - theme = components[0] - system = components[1] - except IndexError: - return - try: - print("Detected change:", event.src_path) - print("\tRecompiling {} theme for {}".format(theme, system)) - assets._compile_sass(system, Path(self.theme_dir) / theme, False, False, []) - print("\tDone recompiling {} theme for {}".format(theme, system)) - except Exception: # pylint: disable=broad-except - traceback.print_exc() - - -if __name__ == "__main__": - main() diff --git a/tutor/templates/build/openedx/bin/site-configuration b/tutor/templates/build/openedx/bin/site-configuration new file mode 100644 index 0000000000..22e7788e77 --- /dev/null +++ b/tutor/templates/build/openedx/bin/site-configuration @@ -0,0 +1,83 @@ +#! /usr/bin/env python3 +import argparse +import lms.startup + +lms.startup.run() + +from django.conf import settings +from django.contrib.sites.models import Site + +from openedx.core.djangoapps.site_configuration.models import SiteConfiguration + + +def main(): + parser = argparse.ArgumentParser(description="Manage site configuration") + subparsers = parser.add_subparsers() + + # Set command + parser_set = subparsers.add_parser("set", help="Set a site configuration key/value") + parser_set.add_argument( + "-d", "--domain", help="Site domain: by default this will be the LMS domain" + ) + parser_set.add_argument("key", help="Configuration key") + parser_set.add_argument( + "value", + help="Configuration value: 'true' and 'false' will be converted to booleans.", + ) + parser_set.set_defaults(func=set_command) + + # Unset command + parser_unset = subparsers.add_parser( + "unset", help="Remove a site configuration key" + ) + parser_unset.add_argument( + "-d", "--domain", help="Site domain: by default this will be the LMS domain" + ) + parser_unset.add_argument("key", help="Configuration key") + parser_unset.set_defaults(func=unset_command) + + args = parser.parse_args() + if hasattr(args, "func"): + args.func(args) + else: + parser.print_help() + + +def set_command(args): + configuration = get_site_configuration(args.domain) + + value = args.value + if value == "true": + value = True + elif value == "false": + value = False + + configuration.site_values[args.key] = args.value + configuration.save() + + +def get_site_configuration(domain): + domain = domain or settings.LMS_BASE + site, site_created = Site.objects.get_or_create(domain=domain) + if site_created: + # Limit the site name to 50 characters + # https://github.com/django/django/blob/4.2.16/django/contrib/sites/models.py#L86 + site.name = domain[:50] + site.save() + configuration, configuration_created = SiteConfiguration.objects.get_or_create(site=site) + if configuration_created: + # Configuration is disabled by default + configuration.enabled = True + configuration.save() + return configuration + + +def unset_command(args): + configuration = get_site_configuration(args.domain) + if args.key in configuration.site_values: + configuration.site_values.pop(args.key) + configuration.save() + + +if __name__ == "__main__": + main() diff --git a/tutor/templates/build/openedx/locale/customlocales.md b/tutor/templates/build/openedx/locale/customlocales.md deleted file mode 100644 index a098936305..0000000000 --- a/tutor/templates/build/openedx/locale/customlocales.md +++ /dev/null @@ -1,13 +0,0 @@ -Add your custom translations to this folder, with the following filesystem structure: - - languagecode/ - LC_MESSAGES/ - django.po - djangojs.po - -Where "languagecode" is one of "fr", "de_DE", "zh_CN", etc. - -The localized string in the *.po file should have the following format: - - msgid "String to translate" - msgstr "Your custom translation δ½ ε‘ιŸ³ηš„δΈœθ₯Ώ le bidule que vous voulez traduire" diff --git a/tutor/templates/build/openedx/requirements/private-sample.txt b/tutor/templates/build/openedx/requirements/private-sample.txt deleted file mode 100644 index a0ea63a612..0000000000 --- a/tutor/templates/build/openedx/requirements/private-sample.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Add your additional requirements, such as xblocks, to this file. For -# requirements coming from private repositories, clone the repository to this -# folder and then add your requirement with the `-e` flag. Ex: -# -# git clone git@myserver:myprivaterepo.git -# echo "-e ./myprivaterepo/" >> private.txt diff --git a/tutor/templates/build/openedx/revisions.yml b/tutor/templates/build/openedx/revisions.yml index a62b76995d..d30d0cbc0e 100644 --- a/tutor/templates/build/openedx/revisions.yml +++ b/tutor/templates/build/openedx/revisions.yml @@ -1 +1 @@ -EDX_PLATFORM_REVISION: koa \ No newline at end of file +EDX_PLATFORM_REVISION: redwood diff --git a/tutor/templates/build/openedx/settings/cms/assets.py b/tutor/templates/build/openedx/settings/cms/assets.py index 59b1dd0d30..010df2b44a 100644 --- a/tutor/templates/build/openedx/settings/cms/assets.py +++ b/tutor/templates/build/openedx/settings/cms/assets.py @@ -1,9 +1,5 @@ {% include "build/openedx/settings/partials/assets.py" %} -STATIC_ROOT = path(STATIC_ROOT_BASE) / 'studio' -WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json" +WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = path(STATIC_ROOT) / "webpack-stats.json" derive_settings(__name__) - -LOCALE_PATHS.append("/openedx/locale/contrib/locale") -LOCALE_PATHS.append("/openedx/locale/user/locale") diff --git a/tutor/templates/build/openedx/settings/lms/assets.py b/tutor/templates/build/openedx/settings/lms/assets.py index 4cf476d1ca..010df2b44a 100644 --- a/tutor/templates/build/openedx/settings/lms/assets.py +++ b/tutor/templates/build/openedx/settings/lms/assets.py @@ -1,9 +1,5 @@ {% include "build/openedx/settings/partials/assets.py" %} -STATIC_ROOT = path(STATIC_ROOT_BASE) -WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json" +WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = path(STATIC_ROOT) / "webpack-stats.json" derive_settings(__name__) - -LOCALE_PATHS.append("/openedx/locale/contrib/locale") -LOCALE_PATHS.append("/openedx/locale/user/locale") diff --git a/tutor/templates/build/openedx/settings/partials/assets.py b/tutor/templates/build/openedx/settings/partials/assets.py index 1f08d4788b..86ef9110ab 100644 --- a/tutor/templates/build/openedx/settings/partials/assets.py +++ b/tutor/templates/build/openedx/settings/partials/assets.py @@ -5,9 +5,6 @@ from openedx.core.lib.derived import derive_settings ENABLE_COMPREHENSIVE_THEMING = True -COMPREHENSIVE_THEME_DIRS.append('/openedx/themes') - -STATIC_ROOT_BASE = '/openedx/staticfiles' SECRET_KEY = 'secret' XQUEUE_INTERFACE = { @@ -17,3 +14,5 @@ DATABASES = { "default": {}, } + +{{ patch("openedx-common-assets-settings") }} diff --git a/tutor/templates/build/openedx/settings/partials/i18n.py b/tutor/templates/build/openedx/settings/partials/i18n.py index fb3046626b..4c5e98a77f 100644 --- a/tutor/templates/build/openedx/settings/partials/i18n.py +++ b/tutor/templates/build/openedx/settings/partials/i18n.py @@ -1,8 +1,6 @@ from ..common import * from openedx.core.lib.derived import derive_settings -STATIC_ROOT_BASE = '/openedx/staticfiles' - SECRET_KEY = 'secret' XQUEUE_INTERFACE = { 'django_auth': None, @@ -14,5 +12,5 @@ derive_settings(__name__) -LOCALE_PATHS.append("/openedx/locale/contrib/locale") -LOCALE_PATHS.append("/openedx/locale/user/locale") + +{{ patch("openedx-common-i18n-settings") }} diff --git a/tutor/templates/build/openedx/settings/uwsgi.ini b/tutor/templates/build/openedx/settings/uwsgi.ini new file mode 100644 index 0000000000..d612bd25fb --- /dev/null +++ b/tutor/templates/build/openedx/settings/uwsgi.ini @@ -0,0 +1,23 @@ +[uwsgi] +static-map = /static=/openedx/staticfiles/ +static-map = /media=/openedx/media/ +http = 0.0.0.0:8000 +buffer-size = 8192 +wsgi-file = $(SERVICE_VARIANT)/wsgi.py +processes = $(UWSGI_WORKERS) +thunder-lock = true +single-interpreter = true +enable-threads = true +# Fix 502 errors for closed connections +http-keepalive = 1 +add-header = Connection: Keep-Alive +# Better startup/shutdown in docker: +die-on-term = true +lazy-apps = false +need-app = true +no-defer-accept = true +# Enable the master process for performance +master = true +# Clean up settings +py-call-osafterfork = true +vacuum = true diff --git a/tutor/templates/build/permissions/Dockerfile b/tutor/templates/build/permissions/Dockerfile new file mode 100644 index 0000000000..44146b08d6 --- /dev/null +++ b/tutor/templates/build/permissions/Dockerfile @@ -0,0 +1,7 @@ +FROM docker.io/alpine:3.13.6 +LABEL maintainer="Overhang.io " + +COPY ./setowner.sh /usr/local/bin/setowner +RUN chmod a+x /usr/local/bin/setowner + +ENTRYPOINT ["setowner"] diff --git a/tutor/templates/build/permissions/setowner.sh b/tutor/templates/build/permissions/setowner.sh new file mode 100644 index 0000000000..f0a3ea96ae --- /dev/null +++ b/tutor/templates/build/permissions/setowner.sh @@ -0,0 +1,14 @@ +#! /bin/sh +set -e +user_id="$1" +shift +for path in $@; do + path_user_id="$(stat -c '%u' $path)" + if [ "$path_user_id" != "$user_id" ] + then + echo "$path changing UID from $path_user_id to $user_id..." + chown --recursive $user_id $path + else + echo "$path already owned by $user_id" + fi +done diff --git a/tutor/templates/config.yml b/tutor/templates/config.yml deleted file mode 100644 index 7e08d2c7d9..0000000000 --- a/tutor/templates/config.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- -# These configuration values must be stored in the user's config.yml. -MYSQL_ROOT_PASSWORD: "{{ 8|random_string }}" -OPENEDX_MYSQL_PASSWORD: "{{ 8|random_string }}" -OPENEDX_SECRET_KEY: "{{ 24|random_string }}" -ANDROID_OAUTH2_SECRET: "{{ 24|random_string }}" -ID: "{{ 24|random_string }}" - -# This must be defined early -LMS_HOST: "www.myopenedx.com" - -# The following are default values -RUN_CADDY: true -RUN_LMS: true -RUN_CMS: true -RUN_FORUM: true -RUN_ELASTICSEARCH: true -ENABLE_HTTPS: false -RUN_MONGODB: true -RUN_MYSQL: true -RUN_REDIS: true -RUN_SMTP: true -CMS_HOST: "studio.{{ LMS_HOST }}" -CONTACT_EMAIL: "contact@{{ LMS_HOST }}" -OPENEDX_AWS_ACCESS_KEY: "" -OPENEDX_AWS_SECRET_ACCESS_KEY: "" -ANDROID_RELEASE_STORE_PASSWORD: "android store password" -ANDROID_RELEASE_KEY_PASSWORD: "android release key password" -ANDROID_RELEASE_KEY_ALIAS: "android release key alias" -DEV_PROJECT_NAME: "tutor_dev" -DOCKER_REGISTRY: "docker.io/" -DOCKER_IMAGE_OPENEDX: "{{ DOCKER_REGISTRY }}overhangio/openedx:{{ TUTOR_VERSION }}" -DOCKER_IMAGE_OPENEDX_DEV: "{{ DOCKER_REGISTRY }}overhangio/openedx-dev:{{ TUTOR_VERSION }}" -DOCKER_IMAGE_ANDROID: "{{ DOCKER_REGISTRY }}overhangio/openedx-android:{{ TUTOR_VERSION }}" -DOCKER_IMAGE_CADDY: "{{ DOCKER_REGISTRY }}caddy:2.2.1" -DOCKER_IMAGE_FORUM: "{{ DOCKER_REGISTRY }}overhangio/openedx-forum:{{ TUTOR_VERSION }}" -DOCKER_IMAGE_MONGODB: "{{ DOCKER_REGISTRY }}mongo:3.6.18" -DOCKER_IMAGE_MYSQL: "{{ DOCKER_REGISTRY }}mysql:5.7.32" -DOCKER_IMAGE_ELASTICSEARCH: "{{ DOCKER_REGISTRY }}elasticsearch:1.5.2" -DOCKER_IMAGE_NGINX: "{{ DOCKER_REGISTRY }}nginx:1.13" -DOCKER_IMAGE_REDIS: "{{ DOCKER_REGISTRY }}redis:6.0.9" -DOCKER_IMAGE_SMTP: "{{ DOCKER_REGISTRY }}namshi/smtp:latest" -LOCAL_PROJECT_NAME: "tutor_local" -ELASTICSEARCH_HOST: "elasticsearch" -ELASTICSEARCH_PORT: 9200 -ELASTICSEARCH_SCHEME: "http" -ELASTICSEARCH_HEAP_SIZE: 1g -FORUM_HOST: "forum" -JWT_COMMON_AUDIENCE: "openedx" -JWT_COMMON_ISSUER: "{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}/oauth2" -JWT_COMMON_SECRET_KEY: "{{ OPENEDX_SECRET_KEY }}" -JWT_RSA_PRIVATE_KEY: "{{ 2048|rsa_private_key }}" -K8S_NAMESPACE: "openedx" -LANGUAGE_CODE: "en" -MONGODB_HOST: "mongodb" -MONGODB_DATABASE: "openedx" -MONGODB_PORT: 27017 -MONGODB_USERNAME: "" -MONGODB_PASSWORD: "" -OPENEDX_CMS_UWSGI_WORKERS: 2 -OPENEDX_LMS_UWSGI_WORKERS: 2 -OPENEDX_MYSQL_DATABASE: "openedx" -OPENEDX_CSMH_MYSQL_DATABASE: "{{ OPENEDX_MYSQL_DATABASE }}_csmh" -OPENEDX_MYSQL_USERNAME: "openedx" -OPENEDX_COMMON_VERSION: "open-release/koa.1" -MYSQL_HOST: "mysql" -MYSQL_PORT: 3306 -MYSQL_ROOT_USERNAME: "root" -NGINX_HTTP_PORT: 80 -PLATFORM_NAME: "My Open edX" -PLUGINS: [] -REDIS_HOST: "redis" -REDIS_PORT: 6379 -REDIS_USERNAME: "" -REDIS_PASSWORD: "" -SMTP_HOST: "smtp" -SMTP_PORT: 25 -SMTP_USERNAME: "" -SMTP_PASSWORD: "" -SMTP_USE_TLS: false -SMTP_USE_SSL: false diff --git a/tutor/templates/config/base.yml b/tutor/templates/config/base.yml new file mode 100644 index 0000000000..b1d6a14afa --- /dev/null +++ b/tutor/templates/config/base.yml @@ -0,0 +1,15 @@ +--- +CMS_OAUTH2_SECRET: "{{ 24|random_string }}" +ID: "{{ 24|random_string }}" +JWT_RSA_PRIVATE_KEY: "{{ 2048|rsa_private_key }}" +MYSQL_ROOT_PASSWORD: "{{ 8|random_string }}" +OPENEDX_MYSQL_PASSWORD: "{{ 8|random_string }}" +OPENEDX_SECRET_KEY: "{{ 24|random_string }}" +PLUGINS: + # enabled by default + - mfe + - indigo +PLUGIN_INDEXES: + # Indexes in this list will be suffixed with the Open edX named version and + # "plugins.yml". E.g: https://overhang.io/tutor/main/olive/plugins.yml + - https://overhang.io/tutor/main diff --git a/tutor/templates/config/defaults.yml b/tutor/templates/config/defaults.yml new file mode 100644 index 0000000000..816ce21747 --- /dev/null +++ b/tutor/templates/config/defaults.yml @@ -0,0 +1,86 @@ +--- +# This file includes all Tutor setting defaults. Settings that do not have a +# default value, such as passwords, should be stored in base.yml. +# This must be defined early +ATLAS_REVISION: "{% if OPENEDX_COMMON_VERSION == 'master' %}main{% else %}{{ OPENEDX_COMMON_VERSION }}{% endif %}" +ATLAS_REPOSITORY: "openedx/openedx-translations" +ATLAS_OPTIONS: "" +CADDY_HTTP_PORT: 80 +CMS_HOST: "studio.{{ LMS_HOST }}" +CMS_OAUTH2_KEY_SSO: "cms-sso" +CMS_OAUTH2_KEY_SSO_DEV: "cms-sso-dev" +CONTACT_EMAIL: "contact@{{ LMS_HOST }}" +DEV_PROJECT_NAME: "{{ TUTOR_APP }}_dev" +DOCKER_REGISTRY: "docker.io/" +DOCKER_IMAGE_OPENEDX: "{{ DOCKER_REGISTRY }}overhangio/openedx:{{ TUTOR_VERSION }}" +DOCKER_IMAGE_OPENEDX_DEV: "openedx-dev:{{ TUTOR_VERSION }}" +# https://hub.docker.com/_/caddy/tags +DOCKER_IMAGE_CADDY: "docker.io/caddy:2.7.4" +# https://hub.docker.com/_/elasticsearch/tags +DOCKER_IMAGE_ELASTICSEARCH: "docker.io/elasticsearch:7.17.13" +# https://hub.docker.com/_/mongo/tags +DOCKER_IMAGE_MONGODB: "docker.io/mongo:7.0.7" +# https://hub.docker.com/_/mysql/tags +DOCKER_IMAGE_MYSQL: "docker.io/mysql:8.4.0" +DOCKER_IMAGE_PERMISSIONS: "{{ DOCKER_REGISTRY }}overhangio/openedx-permissions:{{ TUTOR_VERSION }}" +# https://hub.docker.com/_/redis/tags +DOCKER_IMAGE_REDIS: "docker.io/redis:7.2.4" +# https://hub.docker.com/r/devture/exim-relay/tags +DOCKER_IMAGE_SMTP: "docker.io/devture/exim-relay:4.96-r1-0" +EDX_PLATFORM_REPOSITORY: "https://github.com/openedx/edx-platform.git" +EDX_PLATFORM_VERSION: "{{ OPENEDX_COMMON_VERSION }}" +ELASTICSEARCH_HOST: "elasticsearch" +ELASTICSEARCH_PORT: 9200 +ELASTICSEARCH_SCHEME: "http" +ELASTICSEARCH_HEAP_SIZE: 1g +ENABLE_HTTPS: false +ENABLE_WEB_PROXY: true +JWT_COMMON_AUDIENCE: "openedx" +JWT_COMMON_ISSUER: "{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ LMS_HOST }}/oauth2" +JWT_COMMON_SECRET_KEY: "{{ OPENEDX_SECRET_KEY }}" +K8S_NAMESPACE: "openedx" +LANGUAGE_CODE: "en" +LMS_HOST: "www.myopenedx.com" +LOCAL_PROJECT_NAME: "{{ TUTOR_APP }}_local" +MONGODB_AUTH_MECHANISM: "" +MONGODB_AUTH_SOURCE: "admin" +MONGODB_HOST: "mongodb" +MONGODB_DATABASE: "openedx" +MONGODB_PORT: 27017 +MONGODB_USERNAME: "" +MONGODB_PASSWORD: "" +MONGODB_REPLICA_SET: "" +MONGODB_USE_SSL: false +MOUNTS: [] +OPENEDX_AWS_ACCESS_KEY: "" +OPENEDX_AWS_SECRET_ACCESS_KEY: "" +OPENEDX_CACHE_REDIS_DB: 1 +OPENEDX_CELERY_REDIS_DB: 0 +OPENEDX_CMS_UWSGI_WORKERS: 2 +OPENEDX_LMS_UWSGI_WORKERS: 2 +OPENEDX_MYSQL_DATABASE: "openedx" +OPENEDX_MYSQL_USERNAME: "openedx" +# the common version will be automatically set to "master" in the nightly branch +OPENEDX_COMMON_VERSION: "open-release/redwood.3" +OPENEDX_EXTRA_PIP_REQUIREMENTS: [] +MYSQL_HOST: "mysql" +MYSQL_PORT: 3306 +MYSQL_ROOT_USERNAME: "root" +NPM_REGISTRY: "https://registry.npmjs.org/" +PLATFORM_NAME: "My Open edX" +PREVIEW_LMS_HOST: "preview.{{ LMS_HOST }}" +REDIS_HOST: "redis" +REDIS_PORT: 6379 +REDIS_USERNAME: "" +REDIS_PASSWORD: "" +RUN_ELASTICSEARCH: true +RUN_MONGODB: true +RUN_MYSQL: true +RUN_REDIS: true +RUN_SMTP: true +SMTP_HOST: "smtp" +SMTP_PORT: 8025 +SMTP_USERNAME: "" +SMTP_PASSWORD: "" +SMTP_USE_TLS: false +SMTP_USE_SSL: false diff --git a/tutor/templates/dev/docker-compose.jobs.yml b/tutor/templates/dev/docker-compose.jobs.yml new file mode 100644 index 0000000000..130a801a62 --- /dev/null +++ b/tutor/templates/dev/docker-compose.jobs.yml @@ -0,0 +1,26 @@ +x-openedx-job-service: + &openedx-job-service + image: {{ DOCKER_IMAGE_OPENEDX_DEV }} + build: + context: ../build/openedx/ + target: development + args: + # Note that we never build the openedx-dev image with root user ID, as it would simply fail. + APP_USER_ID: "{{ HOST_USER_ID or 1000 }}" + volumes: + # Settings & config + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + # theme files + - ../build/openedx/themes:/openedx/themes + +services: + + lms-job: + <<: *openedx-job-service + + cms-job: + <<: *openedx-job-service + + {{ patch("dev-docker-compose-jobs-services")|indent(2) }} diff --git a/tutor/templates/dev/docker-compose.yml b/tutor/templates/dev/docker-compose.yml index 228ed0857f..acbaa56925 100644 --- a/tutor/templates/dev/docker-compose.yml +++ b/tutor/templates/dev/docker-compose.yml @@ -1,24 +1,22 @@ -version: "3.7" - x-openedx-service: &openedx-service image: {{ DOCKER_IMAGE_OPENEDX_DEV }} - environment: - SETTINGS: ${TUTOR_EDX_PLATFORM_SETTINGS:-tutor.development} + stdin_open: true + tty: true volumes: - # Settings & config - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro # theme files - ../build/openedx/themes:/openedx/themes - # editable requirements - - ../build/openedx/requirements:/openedx/requirements services: + permissions: + environment: + OPENEDX_USER_ID: "{{ HOST_USER_ID or 1000 }}" + lms: <<: *openedx-service command: ./manage.py lms runserver 0.0.0.0:8000 + environment: + DJANGO_SETTINGS_MODULE: lms.envs.tutor.development ports: - "8000:8000" networks: @@ -29,19 +27,24 @@ services: cms: <<: *openedx-service command: ./manage.py cms runserver 0.0.0.0:8000 + environment: + DJANGO_SETTINGS_MODULE: cms.envs.tutor.development ports: - "8001:8000" - lms-worker: - <<: *openedx-service - - cms-worker: - <<: *openedx-service - # Additional service for watching theme changes watchthemes: <<: *openedx-service - command: openedx-assets watch-themes --env dev + command: npm run watch-sass restart: unless-stopped - {{ patch("local-docker-compose-dev-services")|indent(2) }} \ No newline at end of file + {% if RUN_ELASTICSEARCH and is_docker_rootless() %} + elasticsearch: + ulimits: + memlock: + # Fixes error setting rlimits for ready process in rootless docker + soft: 0 # zero means "unset" in the memlock context + hard: 0 + {% endif %} + + {{ patch("local-docker-compose-dev-services")|indent(2) }} diff --git a/tutor/templates/hooks/cms/importdemocourse b/tutor/templates/hooks/cms/importdemocourse deleted file mode 100644 index c3297d8c0b..0000000000 --- a/tutor/templates/hooks/cms/importdemocourse +++ /dev/null @@ -1,8 +0,0 @@ -echo "Loading settings $DJANGO_SETTINGS_MODULE" - -# Import demo course -git clone https://github.com/edx/edx-demo-course --branch {{ OPENEDX_COMMON_VERSION }} --depth 1 ../edx-demo-course -python ./manage.py cms import ../data ../edx-demo-course - -# Re-index courses -./manage.py cms reindex_course --all --setup \ No newline at end of file diff --git a/tutor/templates/hooks/forum/init b/tutor/templates/hooks/forum/init deleted file mode 100644 index 2e9cf9fef4..0000000000 --- a/tutor/templates/hooks/forum/init +++ /dev/null @@ -1,2 +0,0 @@ -bundle exec rake search:initialize -bundle exec rake search:rebuild_index diff --git a/tutor/templates/hooks/lms/init b/tutor/templates/hooks/lms/init deleted file mode 100644 index 0eb92705eb..0000000000 --- a/tutor/templates/hooks/lms/init +++ /dev/null @@ -1,31 +0,0 @@ -dockerize -wait tcp://{{ MYSQL_HOST }}:{{ MYSQL_PORT }} -timeout 20s - -echo "Loading settings $DJANGO_SETTINGS_MODULE" - -./manage.py lms migrate - -# Delete obsolete credentials for Android application -./manage.py lms shell -c 'from oauth2_provider.models import get_application_model -get_application_model().objects.filter(name="android").exclude(user__username="login_service_user").delete()' -# Create oauth credentials for Android application -./manage.py lms create_dot_application \ - --client-id android \ - --client-secret {{ ANDROID_OAUTH2_SECRET }} \ - --grant-type password \ - --public \ - --update \ - android \ - login_service_user - -# Fix incorrect uploaded file path -if [ -d /openedx/data/uploads/ ]; then - if [ -n "$(ls -A /openedx/data/uploads/)" ]; then - echo "Migrating LMS uploaded files to shared directory" - mv /openedx/data/uploads/* /openedx/media/ - rm -rf /openedx/data/uploads/ - fi -fi - -# Create waffle switches to enable some features, if they have not been explicitely defined before -# Completion tracking: add green ticks to every completed unit -(./manage.py lms waffle_switch --list | grep completion.enable_completion_tracking) || ./manage.py lms waffle_switch --create completion.enable_completion_tracking on diff --git a/tutor/templates/hooks/cms/init b/tutor/templates/jobs/init/cms.sh similarity index 53% rename from tutor/templates/hooks/cms/init rename to tutor/templates/jobs/init/cms.sh index 2f732da22f..1d420a48ab 100644 --- a/tutor/templates/hooks/cms/init +++ b/tutor/templates/jobs/init/cms.sh @@ -11,4 +11,8 @@ if [ -d /openedx/data/uploads/ ]; then mv /openedx/data/uploads/* /openedx/media/ rm -rf /openedx/data/uploads/ fi -fi \ No newline at end of file +fi + +# Create waffle switches to enable some features, if they have not been explicitly defined before +# Copy-paste of units in Studio (highly requested new feature, but defaults to off in Quince) +(./manage.py cms waffle_flag --list | grep contentstore.enable_copy_paste_units) || ./manage.py lms waffle_flag --create contentstore.enable_copy_paste_units --everyone diff --git a/tutor/templates/jobs/init/lms.sh b/tutor/templates/jobs/init/lms.sh new file mode 100644 index 0000000000..88c94625c5 --- /dev/null +++ b/tutor/templates/jobs/init/lms.sh @@ -0,0 +1,45 @@ +dockerize -wait tcp://{{ MYSQL_HOST }}:{{ MYSQL_PORT }} -timeout 20s + +{%- if MONGODB_HOST.startswith("mongodb+srv://") %} +echo "MongoDB is using SRV records, so we cannot wait for it to be ready" +{%- else %} +dockerize -wait tcp://{{ MONGODB_HOST }}:{{ MONGODB_PORT }} -timeout 20s +{%- endif %} + +echo "Loading settings $DJANGO_SETTINGS_MODULE" + +./manage.py lms migrate + +# Create oauth2 apps for CMS SSO +# https://github.com/openedx/edx-platform/blob/master/docs/guides/studio_oauth.rst +./manage.py lms manage_user cms cms@openedx --unusable-password +./manage.py lms create_dot_application \ + --grant-type authorization-code \ + --redirect-uris "{% if ENABLE_HTTPS %}https{% else %}http{% endif %}://{{ CMS_HOST }}/complete/edx-oauth2/" \ + --client-id {{ CMS_OAUTH2_KEY_SSO }} \ + --client-secret {{ CMS_OAUTH2_SECRET }} \ + --scopes user_id \ + --skip-authorization \ + --update cms-sso cms +./manage.py lms create_dot_application \ + --grant-type authorization-code \ + --redirect-uris "http://{{ CMS_HOST }}:8001/complete/edx-oauth2/" \ + --client-id {{ CMS_OAUTH2_KEY_SSO_DEV }} \ + --client-secret {{ CMS_OAUTH2_SECRET }} \ + --scopes user_id \ + --skip-authorization \ + --update cms-sso-dev cms + + +# Fix incorrect uploaded file path +if [ -d /openedx/data/uploads/ ]; then + if [ -n "$(ls -A /openedx/data/uploads/)" ]; then + echo "Migrating LMS uploaded files to shared directory" + mv /openedx/data/uploads/* /openedx/media/ + rm -rf /openedx/data/uploads/ + fi +fi + +# Create waffle switches to enable some features, if they have not been explicitly defined before +# Completion tracking: add green ticks to every completed unit +(./manage.py lms waffle_switch --list | grep completion.enable_completion_tracking) || ./manage.py lms waffle_switch --create completion.enable_completion_tracking on diff --git a/tutor/templates/jobs/init/mounted-directories.sh b/tutor/templates/jobs/init/mounted-directories.sh new file mode 100644 index 0000000000..0f7c615b8b --- /dev/null +++ b/tutor/templates/jobs/init/mounted-directories.sh @@ -0,0 +1,43 @@ +# The initialization job contains various re-install operations needed to be done +# on mounted directories (edx-platform, /mnt/*xblock, /mnt/) +# 1. /mnt/* +# Whenever xblocks or other installable packages are mounted, during the image build, they are copied over to container +# and installed. This results in egg_info generation for the mounted directories. However, the egg_info is not carried +# over to host. When the containers are launched, the host directories without egg_info are mounted on runtime +# and disappear from pip list. +# +# 2. edx-platform +# When a new local copy of edx-platform is bind-mounted, certain build +# artifacts from the openedx image's edx-platform directory are lost. +# We regenerate them here. + + +for mounted_dir in /mnt/*; do + if [ -f $mounted_dir/setup.py ] && ! ls $mounted_dir/*.egg-info >/dev/null 2>&1 ; then + echo "Unable to locate egg-info in $mounted_dir" + pip install -e $mounted_dir + fi +done + +if [ -f /openedx/edx-platform/bindmount-canary ] ; then + # If this file exists, then edx-platform has not been bind-mounted, + # so no build artifacts need to be regenerated. + echo "Using edx-platform from image (not bind-mount)." + echo "No extra setup is required." + exit +fi + +echo "Performing additional setup for bind-mounted edx-platform." +set -x # Echo out executed lines + +# Regenerate Open_edX.egg-info +pip install -e . + +# Regenerate node_modules +npm clean-install + +# Regenerate static assets. +npm run build-dev + +set -x +echo "Done setting up bind-mounted edx-platform." diff --git a/tutor/templates/hooks/mysql/init b/tutor/templates/jobs/init/mysql.sh similarity index 53% rename from tutor/templates/hooks/mysql/init rename to tutor/templates/jobs/init/mysql.sh index 43a10efd70..25d1d51db6 100644 --- a/tutor/templates/hooks/mysql/init +++ b/tutor/templates/jobs/init/mysql.sh @@ -15,5 +15,7 @@ done echo "MySQL is up and running" # edx-platform database -mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e 'CREATE DATABASE IF NOT EXISTS {{ OPENEDX_MYSQL_DATABASE }};' -mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e 'GRANT ALL ON {{ OPENEDX_MYSQL_DATABASE }}.* TO "{{ OPENEDX_MYSQL_USERNAME }}"@"%" IDENTIFIED BY "{{ OPENEDX_MYSQL_PASSWORD }}";' +mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e "CREATE DATABASE IF NOT EXISTS {{ OPENEDX_MYSQL_DATABASE }};" +mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e "CREATE USER IF NOT EXISTS '{{ OPENEDX_MYSQL_USERNAME }}';" +mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e "ALTER USER '{{ OPENEDX_MYSQL_USERNAME }}'@'%' IDENTIFIED BY '{{ OPENEDX_MYSQL_PASSWORD }}';" +mysql -u {{ MYSQL_ROOT_USERNAME }} --password="{{ MYSQL_ROOT_PASSWORD }}" --host "{{ MYSQL_HOST }}" --port {{ MYSQL_PORT }} -e "GRANT ALL ON {{ OPENEDX_MYSQL_DATABASE }}.* TO '{{ OPENEDX_MYSQL_USERNAME }}'@'%';" diff --git a/tutor/templates/k8s/deployments.yml b/tutor/templates/k8s/deployments.yml index 34d515661a..f4ba005b53 100644 --- a/tutor/templates/k8s/deployments.yml +++ b/tutor/templates/k8s/deployments.yml @@ -1,4 +1,3 @@ -{% if RUN_CADDY %} --- apiVersion: apps/v1 kind: Deployment @@ -15,26 +14,49 @@ spec: labels: app.kubernetes.io/name: caddy spec: + {%- if ENABLE_WEB_PROXY %} + # This Deployment uses a persistent volume claim. This requires + # that in order to enable rolling updates (i.e. use a deployment + # strategy other than Replace), we schedule the new Pod to the + # same node as the original Pod. + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - caddy + topologyKey: "kubernetes.io/hostname" + {%- endif %} containers: - name: caddy image: {{ DOCKER_IMAGE_CADDY }} + env: + - name: default_site_port + value: "{% if not ENABLE_HTTPS or not ENABLE_WEB_PROXY %}:80{% endif %}" volumeMounts: - mountPath: /etc/caddy/ name: config + {%- if ENABLE_WEB_PROXY %} - mountPath: /data/ name: data + {%- endif %} ports: - containerPort: 80 + {%- if ENABLE_WEB_PROXY %} - containerPort: 443 + {%- endif %} volumes: - name: config configMap: name: caddy-config + {%- if ENABLE_WEB_PROXY %} - name: data persistentVolumeClaim: claimName: caddy -{% endif %} -{% if RUN_CMS %} + {%- endif %} --- apiVersion: apps/v1 kind: Deployment @@ -51,12 +73,19 @@ spec: labels: app.kubernetes.io/name: cms spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 containers: - name: cms image: {{ DOCKER_IMAGE_OPENEDX }} env: - name: SERVICE_VARIANT value: cms + - name: DJANGO_SETTINGS_MODULE + value: cms.envs.tutor.production + - name: UWSGI_WORKERS + value: "{{ OPENEDX_CMS_UWSGI_WORKERS }}" ports: - containerPort: 8000 volumeMounts: @@ -66,9 +95,14 @@ spec: name: settings-cms - mountPath: /openedx/config name: config + - mountPath: /openedx/uwsgi.ini + name: uwsgi-config + subPath: uwsgi.ini resources: requests: memory: 2Gi + securityContext: + allowPrivilegeEscalation: false volumes: - name: settings-lms configMap: @@ -79,6 +113,12 @@ spec: - name: config configMap: name: openedx-config + - name: uwsgi-config + configMap: + name: openedx-uwsgi-config + items: + - key: uwsgi.ini + path: uwsgi.ini --- apiVersion: apps/v1 kind: Deployment @@ -95,15 +135,20 @@ spec: labels: app.kubernetes.io/name: cms-worker spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 containers: - name: cms-worker image: {{ DOCKER_IMAGE_OPENEDX }} - args: ["celery", "worker", "--app=cms.celery", "--loglevel=info", "--hostname=edx.cms.core.default.%%h", "--maxtasksperchild", "100", "--exclude-queues=edx.lms.core.default"] + args: + {% for value in iter_cms_celery_parameters() %} + - "{{value}}"{% endfor %} env: - name: SERVICE_VARIANT value: cms - - name: C_FORCE_ROOT - value: "1" + - name: DJANGO_SETTINGS_MODULE + value: cms.envs.tutor.production volumeMounts: - mountPath: /openedx/edx-platform/lms/envs/tutor/ name: settings-lms @@ -111,6 +156,8 @@ spec: name: settings-cms - mountPath: /openedx/config name: config + securityContext: + allowPrivilegeEscalation: false volumes: - name: settings-lms configMap: @@ -121,40 +168,6 @@ spec: - name: config configMap: name: openedx-config -{% endif %} -{% if RUN_FORUM %} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: forum - labels: - app.kubernetes.io/name: forum -spec: - selector: - matchLabels: - app.kubernetes.io/name: forum - template: - metadata: - labels: - app.kubernetes.io/name: forum - spec: - containers: - - name: forum - image: {{ DOCKER_IMAGE_FORUM }} - ports: - - containerPort: 4567 - env: - - name: SEARCH_SERVER - value: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}" - - name: MONGODB_AUTH - value: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}" - - name: MONGODB_HOST - value: "{{ MONGODB_HOST }}" - - name: MONGODB_PORT - value: "{{ MONGODB_PORT }}" -{% endif %} -{% if RUN_LMS %} --- apiVersion: apps/v1 kind: Deployment @@ -171,9 +184,19 @@ spec: labels: app.kubernetes.io/name: lms spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 containers: - name: lms image: {{ DOCKER_IMAGE_OPENEDX }} + env: + - name: SERVICE_VARIANT + value: lms + - name: DJANGO_SETTINGS_MODULE + value: lms.envs.tutor.production + - name: UWSGI_WORKERS + value: "{{ OPENEDX_LMS_UWSGI_WORKERS }}" ports: - containerPort: 8000 volumeMounts: @@ -183,9 +206,14 @@ spec: name: settings-cms - mountPath: /openedx/config name: config + - mountPath: /openedx/uwsgi.ini + name: uwsgi-config + subPath: uwsgi.ini resources: requests: memory: 2Gi + securityContext: + allowPrivilegeEscalation: false volumes: - name: settings-lms configMap: @@ -196,6 +224,12 @@ spec: - name: config configMap: name: openedx-config + - name: uwsgi-config + configMap: + name: openedx-uwsgi-config + items: + - key: uwsgi.ini + path: uwsgi.ini --- apiVersion: apps/v1 kind: Deployment @@ -212,15 +246,19 @@ spec: labels: app.kubernetes.io/name: lms-worker spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 containers: - name: lms-worker image: {{ DOCKER_IMAGE_OPENEDX }} - args: ["celery", "worker", "--app=cms.celery", "--loglevel=info", "--hostname=edx.lms.core.default.%%h", "--maxtasksperchild", "100", "--exclude-queues=edx.cms.core.default"] + args: {% for value in iter_lms_celery_parameters() %} + - "{{value}}"{% endfor %} env: - name: SERVICE_VARIANT value: lms - - name: C_FORCE_ROOT - value: "1" + - name: DJANGO_SETTINGS_MODULE + value: lms.envs.tutor.production volumeMounts: - mountPath: /openedx/edx-platform/lms/envs/tutor/ name: settings-lms @@ -228,6 +266,8 @@ spec: name: settings-cms - mountPath: /openedx/config name: config + securityContext: + allowPrivilegeEscalation: false volumes: - name: settings-lms configMap: @@ -238,7 +278,6 @@ spec: - name: config configMap: name: openedx-config -{% endif %} {% if RUN_ELASTICSEARCH %} --- apiVersion: apps/v1 @@ -258,18 +297,29 @@ spec: labels: app.kubernetes.io/name: elasticsearch spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" containers: - name: elasticsearch image: {{ DOCKER_IMAGE_ELASTICSEARCH }} env: - - name: ES_JAVA_OPTS - value: "-Xms1g -Xmx1g" - - name: "cluster.name" - value: openedx - - name: "bootstrap.memory_lock" + - name: cluster.name + value: "openedx" + - name: bootstrap.memory_lock value: "true" + - name: discovery.type + value: "single-node" + - name: ES_JAVA_OPTS + value: "-Xms{{ ELASTICSEARCH_HEAP_SIZE }} -Xmx{{ ELASTICSEARCH_HEAP_SIZE }}" + - name: TAKE_FILE_OWNERSHIP + value: "1" ports: - containerPort: 9200 + securityContext: + allowPrivilegeEscalation: false volumeMounts: - mountPath: /usr/share/elasticsearch/data name: data @@ -297,16 +347,22 @@ spec: labels: app.kubernetes.io/name: mongodb spec: + securityContext: + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 + fsGroupChangePolicy: "OnRootMismatch" containers: - name: mongodb image: {{ DOCKER_IMAGE_MONGODB }} - args: ["mongod", "--smallfiles", "--nojournal", "--storageEngine", "wiredTiger"] + args: ["mongod", "--storageEngine", "wiredTiger"] ports: - containerPort: 27017 volumeMounts: - mountPath: /data/db name: data - + securityContext: + allowPrivilegeEscalation: false volumes: - name: data persistentVolumeClaim: @@ -331,12 +387,20 @@ spec: labels: app.kubernetes.io/name: mysql spec: + securityContext: + runAsUser: 999 + runAsGroup: 999 + fsGroup: 999 + fsGroupChangePolicy: "OnRootMismatch" containers: - name: mysql image: {{ DOCKER_IMAGE_MYSQL }} - # Note the ignore-db-dir: this is because ext4 volumes are created with a lost+found directory in them, which causes mysql - # initialization to fail - args: ["mysqld", "--character-set-server=utf8", "--collation-server=utf8_general_ci", "--ignore-db-dir=lost+found"] + args: + - "mysqld" + - "--character-set-server=utf8mb4" + - "--collation-server=utf8mb4_unicode_ci" + - "--binlog-expire-logs-seconds=259200" + - "--mysql-native-password=ON" env: - name: MYSQL_ROOT_PASSWORD value: "{{ MYSQL_ROOT_PASSWORD }}" @@ -345,6 +409,8 @@ spec: volumeMounts: - mountPath: /var/lib/mysql name: data + securityContext: + allowPrivilegeEscalation: false volumes: - name: data persistentVolumeClaim: @@ -367,42 +433,15 @@ spec: labels: app.kubernetes.io/name: smtp spec: + securityContext: + runAsUser: 100 + runAsGroup: 101 containers: - name: smtp image: {{ DOCKER_IMAGE_SMTP }} ports: - - containerPort: 25 + - containerPort: 8025 {% endif %} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx - labels: - app.kubernetes.io/name: nginx -spec: - selector: - matchLabels: - app.kubernetes.io/name: nginx - template: - metadata: - labels: - app.kubernetes.io/name: nginx - spec: - containers: - - name: nginx - image: {{ DOCKER_IMAGE_NGINX }} - volumeMounts: - - mountPath: /etc/nginx/conf.d/ - name: config - {{ patch("k8s-deployments-nginx-volume-mounts")|indent(12) }} - ports: - - containerPort: 80 - volumes: - - name: config - configMap: - name: nginx-config - {{ patch("k8s-deployments-nginx-volumes")|indent(8) }} {% if RUN_REDIS %} --- apiVersion: apps/v1 @@ -422,9 +461,16 @@ spec: labels: app.kubernetes.io/name: redis spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: "OnRootMismatch" containers: - name: redis image: {{ DOCKER_IMAGE_REDIS }} + args: ["redis-server", "/openedx/redis/config/redis.conf"] + workingDir: /openedx/redis/data ports: - containerPort: {{ REDIS_PORT }} volumeMounts: @@ -432,6 +478,8 @@ spec: name: config - mountPath: /openedx/redis/data name: data + securityContext: + allowPrivilegeEscalation: false volumes: - name: config configMap: diff --git a/tutor/templates/k8s/jobs.yml b/tutor/templates/k8s/jobs.yml index 810b811a1e..7b75136006 100644 --- a/tutor/templates/k8s/jobs.yml +++ b/tutor/templates/k8s/jobs.yml @@ -12,6 +12,11 @@ spec: containers: - name: lms image: {{ DOCKER_IMAGE_OPENEDX }} + env: + - name: SERVICE_VARIANT + value: lms + - name: DJANGO_SETTINGS_MODULE + value: lms.envs.tutor.production volumeMounts: - mountPath: /openedx/edx-platform/lms/envs/tutor/ name: settings-lms @@ -46,6 +51,8 @@ spec: env: - name: SERVICE_VARIANT value: cms + - name: DJANGO_SETTINGS_MODULE + value: cms.envs.tutor.production volumeMounts: - mountPath: /openedx/edx-platform/lms/envs/tutor/ name: settings-lms @@ -77,30 +84,5 @@ spec: containers: - name: mysql image: {{ DOCKER_IMAGE_MYSQL }} - command: [] ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: forum-job - labels: - app.kubernetes.io/component: job -spec: - template: - spec: - restartPolicy: Never - containers: - - name: forum - image: {{ DOCKER_IMAGE_FORUM }} - env: - - name: SEARCH_SERVER - value: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}" - - name: MONGODB_AUTH - value: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}" - - name: MONGODB_HOST - value: "{{ MONGODB_HOST }}" - - name: MONGODB_PORT - value: "{{ MONGODB_PORT }}" {{ patch("k8s-jobs") }} - diff --git a/tutor/templates/k8s/override.yml b/tutor/templates/k8s/override.yml new file mode 100644 index 0000000000..5c105154f6 --- /dev/null +++ b/tutor/templates/k8s/override.yml @@ -0,0 +1 @@ +{{ patch("k8s-override") }} diff --git a/tutor/templates/k8s/services.yml b/tutor/templates/k8s/services.yml index f50da216db..c34d2255d8 100644 --- a/tutor/templates/k8s/services.yml +++ b/tutor/templates/k8s/services.yml @@ -1,69 +1,82 @@ -{% if RUN_CADDY %} +{% if ENABLE_WEB_PROXY %} --- apiVersion: v1 kind: Service metadata: name: caddy + labels: + app.kubernetes.io/name: caddy + app.kubernetes.io/component: loadbalancer spec: type: LoadBalancer ports: - port: 80 name: http + {%- if ENABLE_HTTPS %} - port: 443 + protocol: TCP name: https + # include support for http/3 + - port: 443 + protocol: UDP + name: http3 + {%- endif %} selector: app.kubernetes.io/name: caddy -{% endif %} -{% if RUN_CMS %} +{% else %} --- apiVersion: v1 kind: Service metadata: - name: cms + name: caddy + labels: + app.kubernetes.io/name: caddy spec: - type: NodePort + type: ClusterIP ports: - - port: 8000 - protocol: TCP + - port: {{ CADDY_HTTP_PORT }} + name: http selector: - app.kubernetes.io/name: cms + app.kubernetes.io/name: caddy {% endif %} -{% if RUN_FORUM %} --- apiVersion: v1 kind: Service metadata: - name: forum + name: cms + labels: + app.kubernetes.io/name: cms spec: - type: NodePort + type: ClusterIP ports: - - port: 4567 + - port: 8000 protocol: TCP selector: - app.kubernetes.io/name: forum -{% endif %} -{% if RUN_LMS %} + app.kubernetes.io/name: cms --- apiVersion: v1 kind: Service metadata: name: lms + labels: + app.kubernetes.io/name: lms spec: - type: NodePort + type: ClusterIP ports: - port: 8000 protocol: TCP selector: app.kubernetes.io/name: lms -{% endif %} {% if RUN_ELASTICSEARCH %} --- apiVersion: v1 kind: Service metadata: name: elasticsearch + labels: + app.kubernetes.io/name: elasticsearch spec: - type: NodePort + type: ClusterIP ports: - port: 9200 protocol: TCP @@ -76,8 +89,10 @@ apiVersion: v1 kind: Service metadata: name: mongodb + labels: + app.kubernetes.io/name: mongodb spec: - type: NodePort + type: ClusterIP ports: - port: 27017 protocol: TCP @@ -90,34 +105,26 @@ apiVersion: v1 kind: Service metadata: name: mysql + labels: + app.kubernetes.io/name: mysql spec: - type: NodePort + type: ClusterIP ports: - port: 3306 protocol: TCP selector: app.kubernetes.io/name: mysql {% endif %} ---- -apiVersion: v1 -kind: Service -metadata: - name: nginx -spec: - type: NodePort - ports: - - port: 80 - name: http - selector: - app.kubernetes.io/name: nginx {% if RUN_REDIS %} --- apiVersion: v1 kind: Service metadata: name: redis + labels: + app.kubernetes.io/name: redis spec: - type: NodePort + type: ClusterIP ports: - port: {{ REDIS_PORT }} protocol: TCP @@ -130,12 +137,14 @@ apiVersion: v1 kind: Service metadata: name: smtp + labels: + app.kubernetes.io/name: smtp spec: - type: NodePort + type: ClusterIP ports: - - port: 25 + - port: 8025 protocol: TCP selector: app.kubernetes.io/name: smtp {% endif %} -{{ patch("k8s-services") }} \ No newline at end of file +{{ patch("k8s-services") }} diff --git a/tutor/templates/k8s/volumes.yml b/tutor/templates/k8s/volumes.yml index 20d1dcfca6..ffb4b66486 100644 --- a/tutor/templates/k8s/volumes.yml +++ b/tutor/templates/k8s/volumes.yml @@ -1,4 +1,4 @@ -{% if RUN_CADDY %} +{% if ENABLE_WEB_PROXY %} --- apiVersion: v1 kind: PersistentVolumeClaim diff --git a/tutor/templates/kustomization.yml b/tutor/templates/kustomization.yml index a337c7f320..f29fea1e81 100644 --- a/tutor/templates/kustomization.yml +++ b/tutor/templates/kustomization.yml @@ -12,11 +12,16 @@ resources: # namespace to deploy all Resources to namespace: {{ K8S_NAMESPACE }} -# labels added to all Resources +# annotations added to all Resources +# https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/commonannotations/ +commonAnnotations: + app.kubernetes.io/version: {{ TUTOR_VERSION }} + +# labels (and label selectors) added to all Resources # https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +# https://kubectl.docs.kubernetes.io/references/kustomize/kustomization/commonlabels/ commonLabels: app.kubernetes.io/instance: openedx-{{ ID }} - app.kubernetes.io/version: {{ TUTOR_VERSION }} app.kubernetes.io/part-of: openedx app.kubernetes.io/managed-by: tutor {{ patch("kustomization-commonlabels")|indent(2) }} @@ -25,21 +30,47 @@ configMapGenerator: - name: caddy-config files: - apps/caddy/Caddyfile + options: + labels: + app.kubernetes.io/name: caddy - name: openedx-settings-lms files:{% for file in "apps/openedx/settings/lms"|walk_templates %} - {{ file }}{% endfor %} + options: + labels: + app.kubernetes.io/name: openedx - name: openedx-settings-cms files:{% for file in "apps/openedx/settings/cms"|walk_templates %} - {{ file }}{% endfor %} + options: + labels: + app.kubernetes.io/name: openedx - name: openedx-config files:{% for file in "apps/openedx/config"|walk_templates %} - {{ file }}{% endfor %} -- name: nginx-config - files:{% for file in "apps/nginx"|walk_templates %} - - {{ file }}{% endfor %} + options: + labels: + app.kubernetes.io/name: openedx +- name: openedx-uwsgi-config + files: + - apps/openedx/uwsgi.ini + options: + labels: + app.kubernetes.io/name: openedx - name: redis-config files: - apps/redis/redis.conf + options: + labels: + app.kubernetes.io/name: redis {{ patch("kustomization-configmapgenerator") }} -{{ patch("kustomization") }} \ No newline at end of file +{%- if patch("k8s-override") or patch("kustomization-patches-strategic-merge") %} +patchesStrategicMerge: +{%- if patch("k8s-override") %} +- k8s/override.yml +{%- endif %} +{{ patch("kustomization-patches-strategic-merge") }} +{%- endif %} + +{{ patch("kustomization") }} diff --git a/tutor/templates/local/docker-compose.jobs.yml b/tutor/templates/local/docker-compose.jobs.yml index 9546a313d5..6155cd688a 100644 --- a/tutor/templates/local/docker-compose.jobs.yml +++ b/tutor/templates/local/docker-compose.jobs.yml @@ -1,47 +1,48 @@ -version: "3.7" +# Tutor provides the `tutor MODE do JOB ...` CLI as a consistent way to execute jobs +# across the dev, local, and k8s modes. To support jobs in the docker compose modes +# (dev and local), we must define a `-job` variant service in which jobs could be run. + +# When `tutor local do JOB ...` is invoked, we `docker compose run` each of JOB's +# tasks against the appropriate `-job` services, as defined here. +# When `tutor dev do JOB ...` is invoked, we do the same, but also include any +# compose overrides in ../dev/docker-compose.jobs.yml. + +# Note that these services will all be `run` rather than `start`ed and `exec`ed. +# This is because jobs are often used for initialization tasks, which may need to +# happen before the service can be successfully `start`ed. + services: mysql-job: image: {{ DOCKER_IMAGE_MYSQL }} - entrypoint: [] - command: ["echo", "done"] depends_on: {{ [("mysql", RUN_MYSQL)]|list_if }} lms-job: image: {{ DOCKER_IMAGE_OPENEDX }} environment: SERVICE_VARIANT: lms - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} + DJANGO_SETTINGS_MODULE: lms.envs.tutor.production volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro - depends_on: {{ [("mysql", RUN_MYSQL)]|list_if }} + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + {%- for mount in iter_mounts(MOUNTS, "openedx", "lms-job") %} + - {{ mount }} + {%- endfor %} + depends_on: {{ [("mysql", RUN_MYSQL), ("mongodb", RUN_MONGODB)]|list_if }} cms-job: image: {{ DOCKER_IMAGE_OPENEDX }} environment: SERVICE_VARIANT: cms - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} - volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro - depends_on: {{ [("mysql", RUN_MYSQL)]|list_if }} - - forum-job: - image: {{ DOCKER_IMAGE_FORUM }} - environment: - SEARCH_SERVER: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}" - MONGODB_AUTH: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}" - MONGODB_HOST: "{{ MONGODB_HOST }}" - MONGODB_PORT: "{{ MONGODB_PORT }}" - depends_on: {{ [("elasticsearch", RUN_ELASTICSEARCH), ("mongodb", RUN_MONGODB)]|list_if }} - - android-job: - image: {{ DOCKER_IMAGE_ANDROID }} + DJANGO_SETTINGS_MODULE: cms.envs.tutor.production volumes: - - "../android/:/openedx/config/" - - "../../data/android/:/openedx/data/" + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + {%- for mount in iter_mounts(MOUNTS, "openedx", "cms-job") %} + - {{ mount }} + {%- endfor %} + depends_on: {{ [("mysql", RUN_MYSQL), ("mongodb", RUN_MONGODB), ("elasticsearch", RUN_ELASTICSEARCH), ("redis", RUN_REDIS)]|list_if }} - {{ patch("local-docker-compose-jobs-services")|indent(4) }} \ No newline at end of file + {{ patch("local-docker-compose-jobs-services")|indent(4) }} diff --git a/tutor/templates/local/docker-compose.prod.yml b/tutor/templates/local/docker-compose.prod.yml index c18441986b..ed72ab6b04 100644 --- a/tutor/templates/local/docker-compose.prod.yml +++ b/tutor/templates/local/docker-compose.prod.yml @@ -1,36 +1,69 @@ -version: "3.7" services: - {% if RUN_CADDY %} - # Web proxy for SSL termination + # Web proxy for load balancing and SSL termination caddy: image: {{ DOCKER_IMAGE_CADDY }} restart: unless-stopped ports: - - "80:80" - {% if ENABLE_HTTPS %}- "443:443"{% endif %} + - "{{ CADDY_HTTP_PORT }}:80" + {% if ENABLE_HTTPS and ENABLE_WEB_PROXY %} + - "443:443" + # include support for http/3 + - "443:443/udp" + {% endif %} + environment: + default_site_port: "{% if not ENABLE_HTTPS or not ENABLE_WEB_PROXY %}:80{% endif %}" volumes: - ../apps/caddy/Caddyfile:/etc/caddy/Caddyfile:ro - {% if ENABLE_HTTPS %}- ../../data/caddy:/data{% endif %} - {% endif %} - - # Web server - nginx: - image: {{ DOCKER_IMAGE_NGINX }} - restart: unless-stopped - {% if not RUN_CADDY %} - ports: - - "{{ NGINX_HTTP_PORT }}:80" - {% endif %} - {% if RUN_CADDY and not ENABLE_HTTPS %} + {% if ENABLE_HTTPS and ENABLE_WEB_PROXY %}- ../../data/caddy:/data{% endif %} + {% if not ENABLE_HTTPS %} networks: default: - # These aliases are for internal communication between containers when running locally with *.local.overhang.io hostnames. + # These aliases are for internal communication between containers when running locally + # with *.local.edly.io hostnames. aliases: - "{{ LMS_HOST }}" - {{ patch("local-docker-compose-nginx-aliases")|indent(10) }} + {{ patch("local-docker-compose-caddy-aliases")|indent(10) }} {% endif %} + + ############# LMS and CMS workers + lms-worker: + image: {{ DOCKER_IMAGE_OPENEDX }} + environment: + SERVICE_VARIANT: lms + DJANGO_SETTINGS_MODULE: lms.envs.tutor.production + command: {% for value in iter_lms_celery_parameters() %} + - "{{value}}"{% endfor %} + restart: unless-stopped + volumes: + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + - ../../data/lms:/openedx/data + - ../../data/openedx-media:/openedx/media + {%- for mount in iter_mounts(MOUNTS, "openedx", "lms-worker") %} + - {{ mount }} + {%- endfor %} + depends_on: + - lms + + cms-worker: + image: {{ DOCKER_IMAGE_OPENEDX }} + environment: + SERVICE_VARIANT: cms + DJANGO_SETTINGS_MODULE: cms.envs.tutor.production + command: {% for value in iter_lms_celery_parameters() %} + - "{{value}}"{% endfor %} + restart: unless-stopped volumes: - - ../apps/nginx:/etc/nginx/conf.d/:ro - depends_on: {{ [("lms", RUN_LMS), ("cms", RUN_CMS)]|list_if }} + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + - ../../data/cms:/openedx/data + - ../../data/openedx-media:/openedx/media + {%- for mount in iter_mounts(MOUNTS, "openedx", "cms-worker") %} + - {{ mount }} + {%- endfor %} + depends_on: + - cms - {{ patch("local-docker-compose-prod-services")|indent(2) }} \ No newline at end of file + {{ patch("local-docker-compose-prod-services")|indent(2) }} diff --git a/tutor/templates/local/docker-compose.yml b/tutor/templates/local/docker-compose.yml index 1d0de011e5..f4bfeaad76 100644 --- a/tutor/templates/local/docker-compose.yml +++ b/tutor/templates/local/docker-compose.yml @@ -1,160 +1,154 @@ -version: "3.7" services: + # Set bind-mounted folder ownership + permissions: + image: {{ DOCKER_IMAGE_PERMISSIONS }} + restart: on-failure + entrypoint: [] + command: ["sh", "/usr/local/bin/setowners.sh"] + environment: + OPENEDX_USER_ID: "1000" + volumes: + # Command script + - ../apps/permissions/setowners.sh:/usr/local/bin/setowners.sh:ro + # Bind-mounted volumes to set ownership + - ../../data/lms:/mounts/lms + - ../../data/cms:/mounts/cms + - ../../data/openedx-media:/mounts/openedx + {% if RUN_MONGODB %}- ../../data/mongodb:/mounts/mongodb{% endif %} + {% if RUN_MYSQL %}- ../../data/mysql:/mounts/mysql{% endif %} + {% if RUN_ELASTICSEARCH %}- ../../data/elasticsearch:/mounts/elasticsearch{% endif %} + {% if RUN_REDIS %}- ../../data/redis:/mounts/redis{% endif %} + {{ patch("local-docker-compose-permissions-volumes")|indent(6) }} + ############# External services - {% if RUN_MONGODB %} + {% if RUN_MONGODB -%} mongodb: image: {{ DOCKER_IMAGE_MONGODB }} # Use WiredTiger in all environments, just like at edx.org - command: mongod --smallfiles --nojournal --storageEngine wiredTiger + command: mongod --storageEngine wiredTiger restart: unless-stopped + user: "999:999" volumes: - ../../data/mongodb:/data/db - {% endif %} + depends_on: + - permissions + {%- endif %} - {% if RUN_MYSQL %} + {% if RUN_MYSQL -%} mysql: image: {{ DOCKER_IMAGE_MYSQL }} - command: mysqld --character-set-server=utf8 --collation-server=utf8_general_ci + command: > + mysqld + --character-set-server=utf8mb4 + --collation-server=utf8mb4_unicode_ci + --binlog-expire-logs-seconds=259200 + --mysql-native-password=ON restart: unless-stopped + user: "999:999" volumes: - ../../data/mysql:/var/lib/mysql environment: MYSQL_ROOT_PASSWORD: "{{ MYSQL_ROOT_PASSWORD }}" - {% endif %} + {%- endif %} - {% if RUN_ELASTICSEARCH %} + {% if RUN_ELASTICSEARCH -%} elasticsearch: image: {{ DOCKER_IMAGE_ELASTICSEARCH }} - command: ["elasticsearch", "-Xms{{ ELASTICSEARCH_HEAP_SIZE }}", "-Xmx{{ ELASTICSEARCH_HEAP_SIZE }}", "--cluster.name=openedx", "--bootstrap.mlockall=true"] + environment: + - cluster.name=openedx + - bootstrap.memory_lock=true + - discovery.type=single-node + - "ES_JAVA_OPTS=-Xms{{ ELASTICSEARCH_HEAP_SIZE }} -Xmx{{ ELASTICSEARCH_HEAP_SIZE }}" ulimits: memlock: soft: -1 hard: -1 restart: unless-stopped + user: "1000:1000" volumes: - ../../data/elasticsearch:/usr/share/elasticsearch/data - {% endif %} + depends_on: + - permissions + {%- endif %} - {% if RUN_REDIS %} + {% if RUN_REDIS -%} redis: image: {{ DOCKER_IMAGE_REDIS }} + working_dir: /openedx/redis/data + user: "1000:1000" volumes: - - ../../env/redis/redis.conf:/openedx/redis/config/redis.conf:ro + - ../apps/redis/redis.conf:/openedx/redis/config/redis.conf:ro - ../../data/redis:/openedx/redis/data command: redis-server /openedx/redis/config/redis.conf restart: unless-stopped - {% endif %} + depends_on: + - permissions + {%- endif %} - {% if RUN_SMTP %} + {% if RUN_SMTP -%} smtp: image: {{ DOCKER_IMAGE_SMTP }} restart: unless-stopped - {% endif %} - - ############# Forum - - {% if RUN_FORUM %} - forum: - image: {{ DOCKER_IMAGE_FORUM }} + user: "100:101" environment: - SEARCH_SERVER: "{{ ELASTICSEARCH_SCHEME }}://{{ ELASTICSEARCH_HOST }}:{{ ELASTICSEARCH_PORT }}" - MONGODB_AUTH: "{% if MONGODB_USERNAME and MONGODB_PASSWORD %}{{ MONGODB_USERNAME}}:{{ MONGODB_PASSWORD }}@{% endif %}" - MONGODB_HOST: "{{ MONGODB_HOST }}" - MONGODB_PORT: "{{ MONGODB_PORT }}" - restart: unless-stopped - depends_on: {{ [("elasticsearch", RUN_ELASTICSEARCH), ("mongodb", RUN_MONGODB)]|list_if }} - {% endif %} + HOSTNAME: "{{ LMS_HOST }}" + {%- endif %} ############# LMS and CMS - {% if RUN_LMS %} lms: image: {{ DOCKER_IMAGE_OPENEDX }} environment: SERVICE_VARIANT: lms + DJANGO_SETTINGS_MODULE: lms.envs.tutor.production UWSGI_WORKERS: {{ OPENEDX_LMS_UWSGI_WORKERS }} - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} restart: unless-stopped volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + - ../apps/openedx/uwsgi.ini:/openedx/uwsgi.ini:ro - ../../data/lms:/openedx/data - ../../data/openedx-media:/openedx/media + {%- for mount in iter_mounts(MOUNTS, "openedx", "lms") %} + - {{ mount }} + {%- endfor %} depends_on: + - permissions {% if RUN_MYSQL %}- mysql{% endif %} {% if RUN_ELASTICSEARCH %}- elasticsearch{% endif %} - {% if RUN_FORUM %}- forum{% endif %} {% if RUN_MONGODB %}- mongodb{% endif %} {% if RUN_REDIS %}- redis{% endif %} {% if RUN_SMTP %}- smtp{% endif %} {{ patch("local-docker-compose-lms-dependencies")|indent(6) }} - {% endif %} - {% if RUN_CMS %} cms: image: {{ DOCKER_IMAGE_OPENEDX }} environment: SERVICE_VARIANT: cms + DJANGO_SETTINGS_MODULE: cms.envs.tutor.production UWSGI_WORKERS: {{ OPENEDX_CMS_UWSGI_WORKERS }} - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} restart: unless-stopped volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro + - ../apps/openedx/settings/lms:/openedx/edx-platform/lms/envs/tutor:ro + - ../apps/openedx/settings/cms:/openedx/edx-platform/cms/envs/tutor:ro + - ../apps/openedx/config:/openedx/config:ro + - ../apps/openedx/uwsgi.ini:/openedx/uwsgi.ini:ro - ../../data/cms:/openedx/data - ../../data/openedx-media:/openedx/media + {%- for mount in iter_mounts(MOUNTS, "openedx", "cms") %} + - {{ mount }} + {%- endfor %} depends_on: + - permissions + - lms {% if RUN_MYSQL %}- mysql{% endif %} {% if RUN_ELASTICSEARCH %}- elasticsearch{% endif %} {% if RUN_MONGODB %}- mongodb{% endif %} {% if RUN_REDIS %}- redis{% endif %} {% if RUN_SMTP %}- smtp{% endif %} - {% if RUN_LMS %}- lms{% endif %} {{ patch("local-docker-compose-cms-dependencies")|indent(6) }} - {% endif %} - - ############# LMS and CMS workers - - {% if RUN_LMS %} - lms-worker: - image: {{ DOCKER_IMAGE_OPENEDX }} - environment: - SERVICE_VARIANT: lms - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} - C_FORCE_ROOT: "1" # run celery tasks as root #nofear - command: celery worker --app=cms.celery --loglevel=info --hostname=edx.lms.core.default.%%h --maxtasksperchild 100 --exclude-queues=edx.cms.core.default - restart: unless-stopped - volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro - - ../../data/lms:/openedx/data - - ../../data/openedx-media:/openedx/media - depends_on: - - lms - {% endif %} - - {% if RUN_CMS %} - cms-worker: - image: {{ DOCKER_IMAGE_OPENEDX }} - environment: - SERVICE_VARIANT: cms - SETTINGS: ${EDX_PLATFORM_SETTINGS:-tutor.production} - C_FORCE_ROOT: "1" # run celery tasks as root #nofear - command: celery worker --app=cms.celery --loglevel=info --hostname=edx.cms.core.default.%%h --maxtasksperchild 100 --exclude-queues=edx.lms.core.default - restart: unless-stopped - volumes: - - ../apps/openedx/settings/lms/:/openedx/edx-platform/lms/envs/tutor/:ro - - ../apps/openedx/settings/cms/:/openedx/edx-platform/cms/envs/tutor/:ro - - ../apps/openedx/config/:/openedx/config/:ro - - ../../data/cms:/openedx/data - - ../../data/openedx-media:/openedx/media - depends_on: - - cms - {% endif %} {{ patch("local-docker-compose-services")|indent(2) }} diff --git a/tutor/types.py b/tutor/types.py new file mode 100644 index 0000000000..c7813d4b38 --- /dev/null +++ b/tutor/types.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +# The Tutor plugin system is licensed under the terms of the Apache 2.0 license. +__license__ = "Apache 2.0" + +import typing as t + +from typing_extensions import TypeAlias + +from . import exceptions + +ConfigValue: TypeAlias = t.Union[ + str, + float, + None, + bool, + t.List[str], + t.List[t.Any], + t.Dict[str, t.Any], + t.Dict[t.Any, t.Any], +] + +#: Type alias for the user configuration. +Config: TypeAlias = t.Dict[str, ConfigValue] + + +def cast_config(config: t.Any) -> Config: + if not isinstance(config, dict): + raise exceptions.TutorError( + f"Invalid configuration: expected dict, got {config.__class__}" + ) + for key in config.keys(): + if not isinstance(key, str): + raise exceptions.TutorError( + f"Invalid configuration: expected str, got {key.__class__} for key '{key}'" + ) + return config + + +T = t.TypeVar("T") + + +def get_typed( + config: dict[str, t.Any], + key: str, + expected_type: type[T], + default: t.Optional[T] = None, +) -> T: + value = config.get(key, default) + if not isinstance(value, expected_type): + raise exceptions.TutorError( + f"Invalid config entry: expected {expected_type.__name__}, got {value.__class__} for key '{key}'" + ) + return value diff --git a/tutor/utils.py b/tutor/utils.py index f7b859f013..cdb082f38a 100644 --- a/tutor/utils.py +++ b/tutor/utils.py @@ -2,21 +2,27 @@ import json import os import random +import re +import shlex import shutil import string import struct import subprocess import sys +from functools import lru_cache +from typing import List, Tuple +from urllib.error import URLError +from urllib.request import urlopen import click -from Crypto.PublicKey import RSA from Crypto.Protocol.KDF import bcrypt, bcrypt_check +from Crypto.PublicKey import RSA +from Crypto.PublicKey.RSA import RsaKey -from . import exceptions -from . import fmt +from . import exceptions, fmt -def encrypt(text): +def encrypt(text: str) -> str: """ Encrypt some textual content with bcrypt. https://pycryptodome.readthedocs.io/en/latest/src/protocol/kdf.html#bcrypt @@ -26,7 +32,7 @@ def encrypt(text): return bcrypt(text.encode(), 12).decode() -def verify_encrypted(encrypted, text): +def verify_encrypted(encrypted: str, text: str) -> bool: """ Return True/False if the encrypted content corresponds to the unencrypted text. """ @@ -37,26 +43,40 @@ def verify_encrypted(encrypted, text): return False -def ensure_file_directory_exists(path): +def ensure_file_directory_exists(path: str) -> None: """ Create file's base directory if it does not exist. """ - directory = os.path.dirname(path) - if not os.path.exists(directory): - os.makedirs(directory) + if os.path.isdir(path): + raise exceptions.TutorError( + f"Attempting to write to a file, but a directory with the same name already exists: {path}" + ) + ensure_directory_exists(os.path.dirname(path)) + + +def ensure_directory_exists(path: str) -> None: + """ + Create directory if it does not exist. + """ + if os.path.isfile(path): + raise exceptions.TutorError( + f"Attempting to create a directory, but a file with the same name already exists: {path}" + ) + if not os.path.exists(path): + os.makedirs(path) -def random_string(length): +def random_string(length: int) -> str: return "".join( [random.choice(string.ascii_letters + string.digits) for _ in range(length)] ) -def list_if(services): +def list_if(services: List[Tuple[str, bool]]) -> str: return json.dumps([service[0] for service in services if service[1]]) -def common_domain(d1, d2): +def common_domain(d1: str, d2: str) -> str: """ Return the common domain between two domain names. @@ -73,7 +93,7 @@ def common_domain(d1, d2): return ".".join(common[::-1]) -def reverse_host(domain): +def reverse_host(domain: str) -> str: """ Return the reverse domain name, java-style. @@ -82,7 +102,7 @@ def reverse_host(domain): return ".".join(domain.split(".")[::-1]) -def rsa_private_key(bits=2048): +def rsa_private_key(bits: int = 2048) -> str: """ Export an RSA private key in PEM format. """ @@ -90,43 +110,34 @@ def rsa_private_key(bits=2048): return key.export_key().decode() -def rsa_import_key(key): +def rsa_import_key(key: str) -> RsaKey: """ Import PEM-formatted RSA key and return the corresponding object. """ return RSA.import_key(key.encode()) -def long_to_base64(n): +def long_to_base64(n: int) -> str: """ Borrowed from jwkest.__init__ """ - def long2intarr(long_int): - _bytes = [] + def long2intarr(long_int: int) -> List[int]: + _bytes: List[int] = [] while long_int: long_int, r = divmod(long_int, 256) _bytes.insert(0, r) return _bytes bys = long2intarr(n) - data = struct.pack("%sB" % len(bys), *bys) + data = struct.pack(f"{len(bys)}B", *bys) if not data: - data = "\x00" + data = b"\x00" s = base64.urlsafe_b64encode(data).rstrip(b"=") return s.decode("ascii") -def walk_files(path): - """ - Iterate on file paths located in directory. - """ - for dirpath, _, filenames in os.walk(path): - for filename in filenames: - yield os.path.join(dirpath, filename) - - -def is_root(): +def is_root() -> bool: """ Check whether tutor is being run as root/sudo. """ @@ -136,24 +147,25 @@ def is_root(): return get_user_id() == 0 -def get_user_id(): +def get_user_id() -> int: """ Portable way to get user ID. Note: I have no idea if it actually works on windows... """ - if sys.platform == "win32": - # Don't even try - return 0 - return os.getuid() + if sys.platform != "win32": + return os.getuid() + + # Don't even try for windows + return 0 -def docker_run(*command): +def docker_run(*command: str) -> int: args = ["run", "--rm"] if is_a_tty(): args.append("-it") return docker(*args, *command) -def docker(*command): +def docker(*command: str) -> int: if shutil.which("docker") is None: raise exceptions.TutorError( "docker is not installed. Please follow instructions from https://docs.docker.com/install/" @@ -161,15 +173,25 @@ def docker(*command): return execute("docker", *command) -def docker_compose(*command): - if shutil.which("docker-compose") is None: - raise exceptions.TutorError( - "docker-compose is not installed. Please follow instructions from https://docs.docker.com/compose/install/" - ) - return execute("docker-compose", *command) +@lru_cache(maxsize=None) +def is_docker_rootless() -> bool: + """ + A helper function to determine if Docker is running in rootless mode. + + - https://docs.docker.com/engine/security/rootless/ + """ + try: + results = subprocess.run(["docker", "info"], capture_output=True, check=True) + return "rootless" in results.stdout.decode() + except subprocess.CalledProcessError: + return False -def kubectl(*command): +def docker_compose(*command: str) -> int: + return execute("docker", "compose", *command) + + +def kubectl(*command: str) -> int: if shutil.which("kubectl") is None: raise exceptions.TutorError( "kubectl is not installed. Please follow instructions from https://kubernetes.io/docs/tasks/tools/install-kubectl/" @@ -177,16 +199,20 @@ def kubectl(*command): return execute("kubectl", *command) -def is_a_tty(): +def is_a_tty() -> bool: """ Return True if stdin is able to allocate a tty. Tty allocation sometimes cannot be enabled, for instance in cron jobs """ - return os.isatty(sys.stdin.fileno()) + return sys.stdin.isatty() + + +def execute(*command: str) -> int: + click.echo(fmt.command(shlex.join(command))) + return execute_silent(*command) -def execute(*command): - click.echo(fmt.command(" ".join(command))) +def execute_silent(*command: str) -> int: with subprocess.Popen(command) as p: try: result = p.wait(timeout=None) @@ -197,20 +223,146 @@ def execute(*command): except Exception as e: p.kill() p.wait() - raise exceptions.TutorError( - "Command failed: {}".format(" ".join(command)) - ) from e + raise exceptions.TutorError(f"Command failed: {' '.join(command)}") from e if result > 0: raise exceptions.TutorError( - "Command failed with status {}: {}".format(result, " ".join(command)) + f"Command failed with status {result}: {' '.join(command)}" ) + return result -def check_output(*command): - click.echo(fmt.command(" ".join(command))) +def check_output(*command: str) -> bytes: + literal_command = shlex.join(command) + click.echo(fmt.command(literal_command)) try: return subprocess.check_output(command) except Exception as e: + raise exceptions.TutorError(f"Command failed: {literal_command}") from e + + +def warn_macos_docker_memory() -> None: + try: + check_macos_docker_memory() + except exceptions.TutorError as e: + fmt.echo_alert( + f"""Could not verify sufficient RAM allocation in Docker: + + {e} + +Tutor may not work if Docker is configured with < 4 GB RAM. Please follow instructions from: + https://docs.tutor.edly.io/install.html""" + ) + + +def check_macos_docker_memory() -> None: + """ + Try to check that the RAM allocated to the Docker VM on macOS is at least 4 GB. + + Parse macOS Docker settings file from user directory and return the max + allocated memory. Will raise TutorError in case of parsing/loading error. + """ + if sys.platform != "darwin": + return + + settings_path = os.path.expanduser( + "~/Library/Group Containers/group.com.docker/settings.json" + ) + + try: + with open(settings_path, encoding="utf-8") as fp: + data = json.load(fp) + memory_mib = int(data["memoryMiB"]) + except OSError as e: + raise exceptions.TutorError(f"Error accessing Docker settings file: {e}") from e + except json.JSONDecodeError as e: + raise exceptions.TutorError( + f"Error reading {settings_path}, invalid JSON: {e}" + ) from e + except ValueError as e: raise exceptions.TutorError( - "Command failed: {}".format(" ".join(command)) + f"Unexpected JSON data in {settings_path}: {e}" ) from e + except KeyError as e: + # Value is absent (Docker creates the file with the default setting of 2048 explicitly + # written in, so we shouldn't need to assume a default value here.) + raise exceptions.TutorError( + f"key 'memoryMiB' not found in {settings_path}" + ) from e + except (TypeError, OverflowError) as e: + # TypeError from open() indicates an encoding error + raise exceptions.TutorError( + f"Text encoding error in {settings_path}: {e}" + ) from e + + if memory_mib < 4096: + raise exceptions.TutorError( + f"Docker is configured to allocate {memory_mib} MiB RAM, less than the recommended {4096} MiB" + ) + + +def read_url(url: str) -> str: + """ + Read an index url, either remote (http/https) or local. + """ + if is_http(url): + # web index + try: + response = urlopen(url) + content: str = response.read().decode() + return content + except URLError as e: + raise exceptions.TutorError(f"Request error: {e}") from e + except UnicodeDecodeError as e: + raise exceptions.TutorError( + f"Remote response must be encoded as utf8: {e}" + ) from e + try: + with open(url, encoding="utf8") as f: + # local file index + return f.read() + except FileNotFoundError as e: + raise exceptions.TutorError(f"File could not be found: {e}") from e + except UnicodeDecodeError as e: + raise exceptions.TutorError(f"File must be encoded as utf8: {e}") from e + + +def is_url(text: str) -> bool: + """ + Return true if the string points to a file on disk or a web URL. + """ + return os.path.isfile(text) or is_http(text) + + +def is_http(url: str) -> bool: + """ + Basic test to check whether a string is a web URL. Use only for basic use cases. + """ + return re.match(r"^https?://", url) is not None + + +def format_table(rows: List[Tuple[str, ...]], separator: str = "\t") -> str: + """ + Format a list of values as a tab-separated table. Column sizes are determined such + that row values are vertically aligned. + """ + formatted = "" + if not rows: + return formatted + columns_count = len(rows[0]) + # Determine each column size + col_sizes = [1] * columns_count + for row in rows: + for c, value in enumerate(row): + col_sizes[c] = max(col_sizes[c], len(value)) + # Print all values + for r, row in enumerate(rows): + for c, value in enumerate(row): + if c < len(col_sizes) - 1: + formatted += f"{value:{col_sizes[c]}}{separator}" + else: + # The last column is not left-justified + formatted += f"{value}" + if r < len(rows) - 1: + # Append EOL at all lines but the last one + formatted += "\n" + return formatted