diff --git a/.github/actions/verify-tests-count/action.yml b/.github/actions/verify-tests-count/action.yml
index 11afb7367180..576ffc751a65 100644
--- a/.github/actions/verify-tests-count/action.yml
+++ b/.github/actions/verify-tests-count/action.yml
@@ -7,7 +7,7 @@ runs:
shell: bash
run: |
echo "root_cms_unit_tests_count=$(pytest --disable-warnings --collect-only --ds=cms.envs.test cms/ -q | head -n -2 | wc -l)" >> $GITHUB_ENV
- echo "root_lms_unit_tests_count=$(pytest --disable-warnings --collect-only --ds=lms.envs.test lms/ openedx/ common/djangoapps/ xmodule/ pavelib/ -q | head -n -2 | wc -l)" >> $GITHUB_ENV
+ echo "root_lms_unit_tests_count=$(pytest --disable-warnings --collect-only --ds=lms.envs.test lms/ openedx/ common/djangoapps/ xmodule/ -q | head -n -2 | wc -l)" >> $GITHUB_ENV
- name: get GHA unit test paths
shell: bash
diff --git a/.github/workflows/pylint-checks.yml b/.github/workflows/pylint-checks.yml
index 3e3c87568cb4..c2e04fc191d3 100644
--- a/.github/workflows/pylint-checks.yml
+++ b/.github/workflows/pylint-checks.yml
@@ -22,7 +22,7 @@ jobs:
- module-name: openedx-2
path: "--django-settings-module=lms.envs.test openedx/core/djangoapps/geoinfo/ openedx/core/djangoapps/header_control/ openedx/core/djangoapps/heartbeat/ openedx/core/djangoapps/lang_pref/ openedx/core/djangoapps/models/ openedx/core/djangoapps/monkey_patch/ openedx/core/djangoapps/oauth_dispatch/ openedx/core/djangoapps/olx_rest_api/ openedx/core/djangoapps/password_policy/ openedx/core/djangoapps/plugin_api/ openedx/core/djangoapps/plugins/ openedx/core/djangoapps/profile_images/ openedx/core/djangoapps/programs/ openedx/core/djangoapps/safe_sessions/ openedx/core/djangoapps/schedules/ openedx/core/djangoapps/service_status/ openedx/core/djangoapps/session_inactivity_timeout/ openedx/core/djangoapps/signals/ openedx/core/djangoapps/site_configuration/ openedx/core/djangoapps/system_wide_roles/ openedx/core/djangoapps/theming/ openedx/core/djangoapps/user_api/ openedx/core/djangoapps/user_authn/ openedx/core/djangoapps/util/ openedx/core/djangoapps/verified_track_content/ openedx/core/djangoapps/video_config/ openedx/core/djangoapps/video_pipeline/ openedx/core/djangoapps/waffle_utils/ openedx/core/djangoapps/xblock/ openedx/core/djangoapps/xmodule_django/ openedx/core/tests/ openedx/features/ openedx/testing/ openedx/tests/ openedx/core/djangoapps/learner_pathway/ openedx/core/djangoapps/notifications/ openedx/core/djangoapps/staticfiles/ openedx/core/djangoapps/content_tagging/"
- module-name: common
- path: "--django-settings-module=lms.envs.test common pavelib"
+ path: "--django-settings-module=lms.envs.test common"
- module-name: cms
path: "--django-settings-module=cms.envs.test cms"
- module-name: xmodule
diff --git a/.github/workflows/unit-test-shards.json b/.github/workflows/unit-test-shards.json
index a96f6a9f67cb..3afd691daf58 100644
--- a/.github/workflows/unit-test-shards.json
+++ b/.github/workflows/unit-test-shards.json
@@ -259,15 +259,13 @@
"common-with-lms": {
"settings": "lms.envs.test",
"paths": [
- "common/djangoapps/",
- "pavelib/"
+ "common/djangoapps/"
]
},
"common-with-cms": {
"settings": "cms.envs.test",
"paths": [
- "common/djangoapps/",
- "pavelib/"
+ "common/djangoapps/"
]
},
"xmodule-with-lms": {
diff --git a/pavelib/__init__.py b/pavelib/__init__.py
deleted file mode 100644
index 3307f120714a..000000000000
--- a/pavelib/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-""" # lint-amnesty, pylint: disable=django-not-configured
-paver commands
-"""
-
-
-from . import js_test, quality
diff --git a/pavelib/js_test.py b/pavelib/js_test.py
deleted file mode 100644
index 1ab19271f473..000000000000
--- a/pavelib/js_test.py
+++ /dev/null
@@ -1,144 +0,0 @@
-"""
-Javascript test tasks
-"""
-
-
-import os
-import re
-import sys
-
-from paver.easy import cmdopts, needs, sh, task
-
-from pavelib.utils.envs import Env
-from pavelib.utils.test.suites import JestSnapshotTestSuite, JsTestSuite
-from pavelib.utils.timer import timed
-
-try:
- from pygments.console import colorize
-except ImportError:
- colorize = lambda color, text: text
-
-__test__ = False # do not collect
-
-
-@task
-@needs(
- 'pavelib.utils.test.utils.clean_reports_dir',
-)
-@cmdopts([
- ("suite=", "s", "Test suite to run"),
- ("mode=", "m", "dev or run"),
- ("coverage", "c", "Run test under coverage"),
- ("port=", "p", "Port to run test server on (dev mode only)"),
- ('skip-clean', 'C', 'skip cleaning repository before running tests'),
- ('skip_clean', None, 'deprecated in favor of skip-clean'),
-], share_with=["pavelib.utils.tests.utils.clean_reports_dir"])
-@timed
-def test_js(options):
- """
- Run the JavaScript tests
- """
- sh("npm clean-install")
- mode = getattr(options, 'mode', 'run')
- port = None
- skip_clean = getattr(options, 'skip_clean', False)
-
- if mode == 'run':
- suite = getattr(options, 'suite', 'all')
- coverage = getattr(options, 'coverage', False)
- elif mode == 'dev':
- suite = getattr(options, 'suite', None)
- coverage = False
- port = getattr(options, 'port', None)
- else:
- sys.stderr.write("Invalid mode. Please choose 'dev' or 'run'.")
- return
-
- if (suite != 'all') and (suite not in Env.JS_TEST_ID_KEYS):
- sys.stderr.write(
- "Unknown test suite. Please choose from ({suites})\n".format(
- suites=", ".join(Env.JS_TEST_ID_KEYS)
- )
- )
- return
-
- if suite != 'jest-snapshot':
- test_suite = JsTestSuite(suite, mode=mode, with_coverage=coverage, port=port, skip_clean=skip_clean)
- test_suite.run()
-
- if (suite == 'jest-snapshot') or (suite == 'all'): # lint-amnesty, pylint: disable=consider-using-in
- test_suite = JestSnapshotTestSuite('jest')
- test_suite.run()
-
-
-@task
-@cmdopts([
- ("suite=", "s", "Test suite to run"),
- ("coverage", "c", "Run test under coverage"),
-])
-@timed
-def test_js_run(options):
- """
- Run the JavaScript tests and print results to the console
- """
- options.mode = 'run'
- test_js(options)
-
-
-@task
-@cmdopts([
- ("suite=", "s", "Test suite to run"),
- ("port=", "p", "Port to run test server on"),
-])
-@timed
-def test_js_dev(options):
- """
- Run the JavaScript tests in your default browsers
- """
- options.mode = 'dev'
- test_js(options)
-
-
-@task
-@cmdopts([
- ("compare-branch=", "b", "Branch to compare against, defaults to origin/master"),
-], share_with=['coverage'])
-@timed
-def diff_coverage(options):
- """
- Build the diff coverage reports
- """
- sh("pip install -r requirements/edx/coverage.txt")
-
- compare_branch = options.get('compare_branch', 'origin/master')
-
- # Find all coverage XML files (both Python and JavaScript)
- xml_reports = []
-
- for filepath in Env.REPORT_DIR.walk():
- if bool(re.match(r'^coverage.*\.xml$', filepath.basename())):
- xml_reports.append(filepath)
-
- if not xml_reports:
- err_msg = colorize(
- 'red',
- "No coverage info found. Run `paver test` before running "
- "`paver coverage`.\n"
- )
- sys.stderr.write(err_msg)
- else:
- xml_report_str = ' '.join(xml_reports)
- diff_html_path = os.path.join(Env.REPORT_DIR, 'diff_coverage_combined.html')
-
- # Generate the diff coverage reports (HTML and console)
- # The --diff-range-notation parameter is a workaround for https://github.com/Bachmann1234/diff_cover/issues/153
- sh(
- "diff-cover {xml_report_str} --diff-range-notation '..' --compare-branch={compare_branch} "
- "--html-report {diff_html_path}".format(
- xml_report_str=xml_report_str,
- compare_branch=compare_branch,
- diff_html_path=diff_html_path,
- )
- )
-
- print("\n")
diff --git a/pavelib/paver_tests/__init__.py b/pavelib/paver_tests/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/pavelib/paver_tests/conftest.py b/pavelib/paver_tests/conftest.py
deleted file mode 100644
index 214a35e3fe85..000000000000
--- a/pavelib/paver_tests/conftest.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""
-Pytest fixtures for the pavelib unit tests.
-"""
-
-
-import os
-from shutil import rmtree
-
-import pytest
-
-from pavelib.utils.envs import Env
-
-
-@pytest.fixture(autouse=True, scope='session')
-def delete_quality_junit_xml():
- """
- Delete the JUnit XML results files for quality check tasks run during the
- unit tests.
- """
- yield
- if os.path.exists(Env.QUALITY_DIR):
- rmtree(Env.QUALITY_DIR, ignore_errors=True)
diff --git a/pavelib/paver_tests/pylint_test_list.json b/pavelib/paver_tests/pylint_test_list.json
deleted file mode 100644
index d0e8b43aa93d..000000000000
--- a/pavelib/paver_tests/pylint_test_list.json
+++ /dev/null
@@ -1,6 +0,0 @@
-[
- "foo/bar.py:192: [C0111(missing-docstring), Bliptv] Missing docstring",
- "foo/bar/test.py:74: [C0322(no-space-before-operator)] Operator not preceded by a space",
- "ugly/string/test.py:16: [C0103(invalid-name)] Invalid name \"whats up\" for type constant (should match (([A-Z_][A-Z0-9_]*)|(__.*__)|log|urlpatterns)$)",
- "multiple/lines/test.py:72: [C0322(no-space-before-operator)] Operator not preceded by a space\nFOO_BAR='pipeline.storage.NonPackagingPipelineStorage'\n ^"
-]
diff --git a/pavelib/paver_tests/test_eslint.py b/pavelib/paver_tests/test_eslint.py
deleted file mode 100644
index 5802d7d0d21b..000000000000
--- a/pavelib/paver_tests/test_eslint.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Tests for Paver's Stylelint tasks.
-"""
-
-
-import unittest
-from unittest.mock import patch
-
-import pytest
-from paver.easy import BuildFailure, call_task
-
-import pavelib.quality
-
-
-class TestPaverESLint(unittest.TestCase):
- """
- For testing run_eslint
- """
-
- def setUp(self):
- super().setUp()
-
- # Mock the paver @needs decorator
- self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()
- self._mock_paver_needs.return_value = 0
-
- # Mock shell commands
- patcher = patch('pavelib.quality.sh')
- self._mock_paver_sh = patcher.start()
-
- # Cleanup mocks
- self.addCleanup(patcher.stop)
- self.addCleanup(self._mock_paver_needs.stop)
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_count_from_last_line')
- def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
- """
- run_eslint encounters an error parsing the eslint output log
- """
- mock_count.return_value = None
- with pytest.raises(BuildFailure):
- call_task('pavelib.quality.run_eslint', args=[''])
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_count_from_last_line')
- def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
- """
- eslint finds violations, but a limit was not set
- """
- mock_count.return_value = 1
- pavelib.quality.run_eslint("")
diff --git a/pavelib/paver_tests/test_js_test.py b/pavelib/paver_tests/test_js_test.py
deleted file mode 100644
index 4b165a156674..000000000000
--- a/pavelib/paver_tests/test_js_test.py
+++ /dev/null
@@ -1,148 +0,0 @@
-"""Unit tests for the Paver JavaScript testing tasks."""
-
-from unittest.mock import patch
-
-import ddt
-from paver.easy import call_task
-
-import pavelib.js_test
-from pavelib.utils.envs import Env
-
-from .utils import PaverTestCase
-
-
-@ddt.ddt
-class TestPaverJavaScriptTestTasks(PaverTestCase):
- """
- Test the Paver JavaScript testing tasks.
- """
-
- EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND = 'find {platform_root}/reports/javascript -type f -delete'
- EXPECTED_KARMA_OPTIONS = (
- "{config_file} "
- "--single-run={single_run} "
- "--capture-timeout=60000 "
- "--junitreportpath="
- "{platform_root}/reports/javascript/javascript_xunit-{suite}.xml "
- "--browsers={browser}"
- )
- EXPECTED_COVERAGE_OPTIONS = (
- ' --coverage --coveragereportpath={platform_root}/reports/javascript/coverage-{suite}.xml'
- )
-
- EXPECTED_COMMANDS = [
- "make report_dir",
- 'git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads',
- "find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \\;",
- 'rm -rf test_root/log/auto_screenshots/*',
- "rm -rf /tmp/mako_[cl]ms",
- ]
-
- def setUp(self):
- super().setUp()
-
- # Mock the paver @needs decorator
- self._mock_paver_needs = patch.object(pavelib.js_test.test_js, 'needs').start()
- self._mock_paver_needs.return_value = 0
-
- # Cleanup mocks
- self.addCleanup(self._mock_paver_needs.stop)
-
- @ddt.data(
- [""],
- ["--coverage"],
- ["--suite=lms"],
- ["--suite=lms --coverage"],
- )
- @ddt.unpack
- def test_test_js_run(self, options_string):
- """
- Test the "test_js_run" task.
- """
- options = self.parse_options_string(options_string)
- self.reset_task_messages()
- call_task("pavelib.js_test.test_js_run", options=options)
- self.verify_messages(options=options, dev_mode=False)
-
- @ddt.data(
- [""],
- ["--port=9999"],
- ["--suite=lms"],
- ["--suite=lms --port=9999"],
- )
- @ddt.unpack
- def test_test_js_dev(self, options_string):
- """
- Test the "test_js_run" task.
- """
- options = self.parse_options_string(options_string)
- self.reset_task_messages()
- call_task("pavelib.js_test.test_js_dev", options=options)
- self.verify_messages(options=options, dev_mode=True)
-
- def parse_options_string(self, options_string):
- """
- Parse a string containing the options for a test run
- """
- parameters = options_string.split(" ")
- suite = "all"
- if "--system=lms" in parameters:
- suite = "lms"
- elif "--system=common" in parameters:
- suite = "common"
- coverage = "--coverage" in parameters
- port = None
- if "--port=9999" in parameters:
- port = 9999
- return {
- "suite": suite,
- "coverage": coverage,
- "port": port,
- }
-
- def verify_messages(self, options, dev_mode):
- """
- Verify that the messages generated when running tests are as expected
- for the specified options and dev_mode.
- """
- is_coverage = options['coverage']
- port = options['port']
- expected_messages = []
- suites = Env.JS_TEST_ID_KEYS if options['suite'] == 'all' else [options['suite']]
-
- expected_messages.extend(self.EXPECTED_COMMANDS)
- if not dev_mode and not is_coverage:
- expected_messages.append(self.EXPECTED_DELETE_JAVASCRIPT_REPORT_COMMAND.format(
- platform_root=self.platform_root
- ))
-
- command_template = (
- 'node --max_old_space_size=4096 node_modules/.bin/karma start {options}'
- )
-
- for suite in suites:
- # Karma test command
- if suite != 'jest-snapshot':
- karma_config_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(suite)]
- expected_test_tool_command = command_template.format(
- options=self.EXPECTED_KARMA_OPTIONS.format(
- config_file=karma_config_file,
- single_run='false' if dev_mode else 'true',
- suite=suite,
- platform_root=self.platform_root,
- browser=Env.KARMA_BROWSER,
- ),
- )
- if is_coverage:
- expected_test_tool_command += self.EXPECTED_COVERAGE_OPTIONS.format(
- platform_root=self.platform_root,
- suite=suite
- )
- if port:
- expected_test_tool_command += f" --port={port}"
- else:
- expected_test_tool_command = 'jest'
-
- expected_messages.append(expected_test_tool_command)
-
- assert self.task_messages == expected_messages
diff --git a/pavelib/paver_tests/test_paver_quality.py b/pavelib/paver_tests/test_paver_quality.py
deleted file mode 100644
index 36d6dd59e172..000000000000
--- a/pavelib/paver_tests/test_paver_quality.py
+++ /dev/null
@@ -1,156 +0,0 @@
-""" # lint-amnesty, pylint: disable=django-not-configured
-Tests for paver quality tasks
-"""
-
-
-import os
-import shutil # lint-amnesty, pylint: disable=unused-import
-import tempfile
-import textwrap
-import unittest
-from unittest.mock import MagicMock, mock_open, patch # lint-amnesty, pylint: disable=unused-import
-
-import pytest # lint-amnesty, pylint: disable=unused-import
-from ddt import data, ddt, file_data, unpack # lint-amnesty, pylint: disable=unused-import
-from path import Path as path
-from paver.easy import BuildFailure # lint-amnesty, pylint: disable=unused-import
-
-import pavelib.quality
-from pavelib.paver_tests.utils import PaverTestCase, fail_on_eslint # lint-amnesty, pylint: disable=unused-import
-
-OPEN_BUILTIN = 'builtins.open'
-
-
-@ddt
-class TestPaverQualityViolations(unittest.TestCase):
- """
- For testing the paver violations-counting tasks
- """
- def setUp(self):
- super().setUp()
- self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
- self.f.close()
- self.addCleanup(os.remove, self.f.name)
-
- def test_pep8_parser(self):
- with open(self.f.name, 'w') as f:
- f.write("hello\nhithere")
- num = len(pavelib.quality._pep8_violations(f.name)) # pylint: disable=protected-access
- assert num == 2
-
-
-class TestPaverReportViolationsCounts(unittest.TestCase):
- """
- For testing utility functions for getting counts from reports for
- run_eslint and run_xsslint.
- """
-
- def setUp(self):
- super().setUp()
-
- # Temporary file infrastructure
- self.f = tempfile.NamedTemporaryFile(delete=False) # lint-amnesty, pylint: disable=consider-using-with
- self.f.close()
-
- # Cleanup various mocks and tempfiles
- self.addCleanup(os.remove, self.f.name)
-
- def test_get_eslint_violations_count(self):
- with open(self.f.name, 'w') as f:
- f.write("3000 violations found")
- actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
- assert actual_count == 3000
-
- def test_get_eslint_violations_no_number_found(self):
- with open(self.f.name, 'w') as f:
- f.write("Not expected string regex")
- actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
- assert actual_count is None
-
- def test_get_eslint_violations_count_truncated_report(self):
- """
- A truncated report (i.e. last line is just a violation)
- """
- with open(self.f.name, 'w') as f:
- f.write("foo/bar/js/fizzbuzz.js: line 45, col 59, Missing semicolon.")
- actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "eslint") # pylint: disable=protected-access
- assert actual_count is None
-
- def test_generic_value(self):
- """
- Default behavior is to look for an integer appearing at head of line
- """
- with open(self.f.name, 'w') as f:
- f.write("5.777 good to see you")
- actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
- assert actual_count == 5
-
- def test_generic_value_none_found(self):
- """
- Default behavior is to look for an integer appearing at head of line
- """
- with open(self.f.name, 'w') as f:
- f.write("hello 5.777 good to see you")
- actual_count = pavelib.quality._get_count_from_last_line(self.f.name, "foo") # pylint: disable=protected-access
- assert actual_count is None
-
- def test_get_xsslint_counts_happy(self):
- """
- Test happy path getting violation counts from xsslint report.
- """
- report = textwrap.dedent("""
- test.html: 30:53: javascript-jquery-append: $('#test').append(print_tos);
-
- javascript-concat-html: 310 violations
- javascript-escape: 7 violations
-
- 2608 violations total
- """)
- with open(self.f.name, 'w') as f:
- f.write(report)
- counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
- self.assertDictEqual(counts, {
- 'rules': {
- 'javascript-concat-html': 310,
- 'javascript-escape': 7,
- },
- 'total': 2608,
- })
-
- def test_get_xsslint_counts_bad_counts(self):
- """
- Test getting violation counts from truncated and malformed xsslint
- report.
- """
- report = textwrap.dedent("""
- javascript-concat-html: violations
- """)
- with open(self.f.name, 'w') as f:
- f.write(report)
- counts = pavelib.quality._get_xsslint_counts(self.f.name) # pylint: disable=protected-access
- self.assertDictEqual(counts, {
- 'rules': {},
- 'total': None,
- })
-
-
-class TestPrepareReportDir(unittest.TestCase):
- """
- Tests the report directory preparation
- """
-
- def setUp(self):
- super().setUp()
- self.test_dir = tempfile.mkdtemp()
- self.test_file = tempfile.NamedTemporaryFile(delete=False, dir=self.test_dir) # lint-amnesty, pylint: disable=consider-using-with
- self.addCleanup(os.removedirs, self.test_dir)
-
- def test_report_dir_with_files(self):
- assert os.path.exists(self.test_file.name)
- pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
- assert not os.path.exists(self.test_file.name)
-
- def test_report_dir_without_files(self):
- os.remove(self.test_file.name)
- pavelib.quality._prepare_report_dir(path(self.test_dir)) # pylint: disable=protected-access
- assert os.listdir(path(self.test_dir)) == []
diff --git a/pavelib/paver_tests/test_pii_check.py b/pavelib/paver_tests/test_pii_check.py
deleted file mode 100644
index d034360acde0..000000000000
--- a/pavelib/paver_tests/test_pii_check.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""
-Tests for Paver's PII checker task.
-"""
-
-import shutil
-import tempfile
-import unittest
-from unittest.mock import patch
-
-from path import Path as path
-from paver.easy import call_task, BuildFailure
-
-import pavelib.quality
-from pavelib.utils.envs import Env
-
-
-class TestPaverPIICheck(unittest.TestCase):
- """
- For testing the paver run_pii_check task
- """
- def setUp(self):
- super().setUp()
- self.report_dir = path(tempfile.mkdtemp())
- self.addCleanup(shutil.rmtree, self.report_dir)
-
- @patch.object(pavelib.quality.run_pii_check, 'needs')
- @patch('pavelib.quality.sh')
- def test_pii_check_report_dir_override(self, mock_paver_sh, mock_needs):
- """
- run_pii_check succeeds with proper report dir
- """
- # Make the expected stdout files.
- cms_stdout_report = self.report_dir / 'pii_check_cms.report'
- cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
- lms_stdout_report = self.report_dir / 'pii_check_lms.report'
- lms_stdout_report.write_lines(['Coverage found 66 uncovered models:\n'])
-
- mock_needs.return_value = 0
- call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
- mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
- assert len(mock_calls) == 2
- assert any('lms.envs.test' in call for call in mock_calls)
- assert any('cms.envs.test' in call for call in mock_calls)
- assert all(str(self.report_dir) in call for call in mock_calls)
- metrics_file = Env.METRICS_DIR / 'pii'
- assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
-
- @patch.object(pavelib.quality.run_pii_check, 'needs')
- @patch('pavelib.quality.sh')
- def test_pii_check_failed(self, mock_paver_sh, mock_needs):
- """
- run_pii_check fails due to crossing the threshold.
- """
- # Make the expected stdout files.
- cms_stdout_report = self.report_dir / 'pii_check_cms.report'
- cms_stdout_report.write_lines(['Coverage found 33 uncovered models:\n'])
- lms_stdout_report = self.report_dir / 'pii_check_lms.report'
- lms_stdout_report.write_lines([
- 'Coverage found 66 uncovered models:',
- 'Coverage threshold not met! Needed 100.0, actually 95.0!',
- ])
-
- mock_needs.return_value = 0
- try:
- with self.assertRaises(BuildFailure):
- call_task('pavelib.quality.run_pii_check', options={"report_dir": str(self.report_dir)})
- except SystemExit:
- # Sometimes the BuildFailure raises a SystemExit, sometimes it doesn't, not sure why.
- # As a hack, we just wrap it in try-except.
- # This is not good, but these tests weren't even running for years, and we're removing this whole test
- # suite soon anyway.
- pass
- mock_calls = [str(call) for call in mock_paver_sh.mock_calls]
- assert len(mock_calls) == 2
- assert any('lms.envs.test' in call for call in mock_calls)
- assert any('cms.envs.test' in call for call in mock_calls)
- assert all(str(self.report_dir) in call for call in mock_calls)
- metrics_file = Env.METRICS_DIR / 'pii'
- assert open(metrics_file).read() == 'Number of PII Annotation violations: 66\n'
diff --git a/pavelib/paver_tests/test_stylelint.py b/pavelib/paver_tests/test_stylelint.py
deleted file mode 100644
index 3e1c79c93f28..000000000000
--- a/pavelib/paver_tests/test_stylelint.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Tests for Paver's Stylelint tasks.
-"""
-
-from unittest.mock import MagicMock, patch
-
-import pytest
-import ddt
-from paver.easy import call_task
-
-from .utils import PaverTestCase
-
-
-@ddt.ddt
-class TestPaverStylelint(PaverTestCase):
- """
- Tests for Paver's Stylelint tasks.
- """
- @ddt.data(
- [False],
- [True],
- )
- @ddt.unpack
- def test_run_stylelint(self, should_pass):
- """
- Verify that the quality task fails with Stylelint violations.
- """
- if should_pass:
- _mock_stylelint_violations = MagicMock(return_value=0)
- with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
- call_task('pavelib.quality.run_stylelint')
- else:
- _mock_stylelint_violations = MagicMock(return_value=100)
- with patch('pavelib.quality._get_stylelint_violations', _mock_stylelint_violations):
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_stylelint')
diff --git a/pavelib/paver_tests/test_timer.py b/pavelib/paver_tests/test_timer.py
deleted file mode 100644
index 5ccbf74abcf9..000000000000
--- a/pavelib/paver_tests/test_timer.py
+++ /dev/null
@@ -1,190 +0,0 @@
-"""
-Tests of the pavelib.utils.timer module.
-"""
-
-
-from datetime import datetime, timedelta
-from unittest import TestCase
-
-from unittest.mock import MagicMock, patch
-
-from pavelib.utils import timer
-
-
-@timer.timed
-def identity(*args, **kwargs):
- """
- An identity function used as a default task to test the timing of.
- """
- return args, kwargs
-
-
-MOCK_OPEN = MagicMock(spec=open)
-
-
-@patch.dict('pavelib.utils.timer.__builtins__', open=MOCK_OPEN)
-class TimedDecoratorTests(TestCase):
- """
- Tests of the pavelib.utils.timer:timed decorator.
- """
- def setUp(self):
- super().setUp()
-
- patch_dumps = patch.object(timer.json, 'dump', autospec=True)
- self.mock_dump = patch_dumps.start()
- self.addCleanup(patch_dumps.stop)
-
- patch_makedirs = patch.object(timer.os, 'makedirs', autospec=True)
- self.mock_makedirs = patch_makedirs.start()
- self.addCleanup(patch_makedirs.stop)
-
- patch_datetime = patch.object(timer, 'datetime', autospec=True)
- self.mock_datetime = patch_datetime.start()
- self.addCleanup(patch_datetime.stop)
-
- patch_exists = patch.object(timer, 'exists', autospec=True)
- self.mock_exists = patch_exists.start()
- self.addCleanup(patch_exists.stop)
-
- MOCK_OPEN.reset_mock()
-
- def get_log_messages(self, task=identity, args=None, kwargs=None, raises=None):
- """
- Return all timing messages recorded during the execution of ``task``.
- """
- if args is None:
- args = []
- if kwargs is None:
- kwargs = {}
-
- if raises is None:
- task(*args, **kwargs)
- else:
- self.assertRaises(raises, task, *args, **kwargs)
-
- return [
- call[0][0] # log_message
- for call in self.mock_dump.call_args_list
- ]
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
- def test_times(self):
- start = datetime(2016, 7, 20, 10, 56, 19)
- end = start + timedelta(seconds=35.6)
-
- self.mock_datetime.utcnow.side_effect = [start, end]
-
- messages = self.get_log_messages()
- assert len(messages) == 1
-
- # I'm not using assertDictContainsSubset because it is
- # removed in python 3.2 (because the arguments were backwards)
- # and it wasn't ever replaced by anything *headdesk*
- assert 'duration' in messages[0]
- assert 35.6 == messages[0]['duration']
-
- assert 'started_at' in messages[0]
- assert start.isoformat(' ') == messages[0]['started_at']
-
- assert 'ended_at' in messages[0]
- assert end.isoformat(' ') == messages[0]['ended_at']
-
- @patch.object(timer, 'PAVER_TIMER_LOG', None)
- def test_no_logs(self):
- messages = self.get_log_messages()
- assert len(messages) == 0
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
- def test_arguments(self):
- messages = self.get_log_messages(args=(1, 'foo'), kwargs=dict(bar='baz'))
- assert len(messages) == 1
-
- # I'm not using assertDictContainsSubset because it is
- # removed in python 3.2 (because the arguments were backwards)
- # and it wasn't ever replaced by anything *headdesk*
- assert 'args' in messages[0]
- assert [repr(1), repr('foo')] == messages[0]['args']
- assert 'kwargs' in messages[0]
- assert {'bar': repr('baz')} == messages[0]['kwargs']
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
- def test_task_name(self):
- messages = self.get_log_messages()
- assert len(messages) == 1
-
- # I'm not using assertDictContainsSubset because it is
- # removed in python 3.2 (because the arguments were backwards)
- # and it wasn't ever replaced by anything *headdesk*
- assert 'task' in messages[0]
- assert 'pavelib.paver_tests.test_timer.identity' == messages[0]['task']
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
- def test_exceptions(self):
-
- @timer.timed
- def raises():
- """
- A task used for testing exception handling of the timed decorator.
- """
- raise Exception('The Message!')
-
- messages = self.get_log_messages(task=raises, raises=Exception)
- assert len(messages) == 1
-
- # I'm not using assertDictContainsSubset because it is
- # removed in python 3.2 (because the arguments were backwards)
- # and it wasn't ever replaced by anything *headdesk*
- assert 'exception' in messages[0]
- assert 'Exception: The Message!' == messages[0]['exception']
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log-%Y-%m-%d-%H-%M-%S.log')
- def test_date_formatting(self):
- start = datetime(2016, 7, 20, 10, 56, 19)
- end = start + timedelta(seconds=35.6)
-
- self.mock_datetime.utcnow.side_effect = [start, end]
-
- messages = self.get_log_messages()
- assert len(messages) == 1
-
- MOCK_OPEN.assert_called_once_with('/tmp/some-log-2016-07-20-10-56-19.log', 'a')
-
- @patch.object(timer, 'PAVER_TIMER_LOG', '/tmp/some-log')
- def test_nested_tasks(self):
-
- @timer.timed
- def parent():
- """
- A timed task that calls another task
- """
- identity()
-
- parent_start = datetime(2016, 7, 20, 10, 56, 19)
- parent_end = parent_start + timedelta(seconds=60)
- child_start = parent_start + timedelta(seconds=10)
- child_end = parent_end - timedelta(seconds=10)
-
- self.mock_datetime.utcnow.side_effect = [parent_start, child_start, child_end, parent_end]
-
- messages = self.get_log_messages(task=parent)
- assert len(messages) == 2
-
- # Child messages first
- assert 'duration' in messages[0]
- assert 40 == messages[0]['duration']
-
- assert 'started_at' in messages[0]
- assert child_start.isoformat(' ') == messages[0]['started_at']
-
- assert 'ended_at' in messages[0]
- assert child_end.isoformat(' ') == messages[0]['ended_at']
-
- # Parent messages after
- assert 'duration' in messages[1]
- assert 60 == messages[1]['duration']
-
- assert 'started_at' in messages[1]
- assert parent_start.isoformat(' ') == messages[1]['started_at']
-
- assert 'ended_at' in messages[1]
- assert parent_end.isoformat(' ') == messages[1]['ended_at']
diff --git a/pavelib/paver_tests/test_xsslint.py b/pavelib/paver_tests/test_xsslint.py
deleted file mode 100644
index a9b4a41e1600..000000000000
--- a/pavelib/paver_tests/test_xsslint.py
+++ /dev/null
@@ -1,120 +0,0 @@
-"""
-Tests for paver xsslint quality tasks
-"""
-from unittest.mock import patch
-
-import pytest
-from paver.easy import call_task
-
-import pavelib.quality
-
-from .utils import PaverTestCase
-
-
-class PaverXSSLintTest(PaverTestCase):
- """
- Test run_xsslint with a mocked environment in order to pass in opts
- """
-
- def setUp(self):
- super().setUp()
- self.reset_task_messages()
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint encounters an error parsing the xsslint output log
- """
- _mock_counts.return_value = {}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint')
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_vanilla(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint finds violations, but a limit was not set
- """
- _mock_counts.return_value = {'total': 0}
- call_task('pavelib.quality.run_xsslint')
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_invalid_thresholds_option(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint fails when thresholds option is poorly formatted
- """
- _mock_counts.return_value = {'total': 0}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint', options={"thresholds": "invalid"})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_invalid_thresholds_option_key(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint fails when thresholds option is poorly formatted
- """
- _mock_counts.return_value = {'total': 0}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"invalid": 3}'})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_too_many_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint finds more violations than are allowed
- """
- _mock_counts.return_value = {'total': 4}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 3}'})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_under_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint finds fewer violations than are allowed
- """
- _mock_counts.return_value = {'total': 4}
- # No System Exit is expected
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"total": 5}'})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_rule_violation_number_not_found(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint encounters an error parsing the xsslint output log for a
- given rule threshold that was set.
- """
- _mock_counts.return_value = {'total': 4}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_too_many_rule_violations(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint finds more rule violations than are allowed
- """
- _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
- with pytest.raises(SystemExit):
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 3}}'})
-
- @patch.object(pavelib.quality, '_write_metric')
- @patch.object(pavelib.quality, '_prepare_report_dir')
- @patch.object(pavelib.quality, '_get_xsslint_counts')
- def test_xsslint_under_rule_limit(self, _mock_counts, _mock_report_dir, _mock_write_metric):
- """
- run_xsslint finds fewer rule violations than are allowed
- """
- _mock_counts.return_value = {'total': 4, 'rules': {'javascript-escape': 4}}
- # No System Exit is expected
- call_task('pavelib.quality.run_xsslint', options={"thresholds": '{"rules": {"javascript-escape": 5}}'})
diff --git a/pavelib/paver_tests/utils.py b/pavelib/paver_tests/utils.py
deleted file mode 100644
index 1db26cf76a4c..000000000000
--- a/pavelib/paver_tests/utils.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""Unit tests for the Paver server tasks."""
-
-
-import os
-from unittest import TestCase
-from uuid import uuid4
-
-from paver import tasks
-from paver.easy import BuildFailure
-
-
-class PaverTestCase(TestCase):
- """
- Base class for Paver test cases.
- """
- def setUp(self):
- super().setUp()
-
- # Show full length diffs upon test failure
- self.maxDiff = None # pylint: disable=invalid-name
-
- # Create a mock Paver environment
- tasks.environment = MockEnvironment()
-
- # Don't run pre-reqs
- os.environ['NO_PREREQ_INSTALL'] = 'true'
-
- def tearDown(self):
- super().tearDown()
- tasks.environment = tasks.Environment()
- del os.environ['NO_PREREQ_INSTALL']
-
- @property
- def task_messages(self):
- """Returns the messages output by the Paver task."""
- return tasks.environment.messages
-
- @property
- def platform_root(self):
- """Returns the current platform's root directory."""
- return os.getcwd()
-
- def reset_task_messages(self):
- """Clear the recorded message"""
- tasks.environment.messages = []
-
-
-class MockEnvironment(tasks.Environment):
- """
- Mock environment that collects information about Paver commands.
- """
- def __init__(self):
- super().__init__()
- self.dry_run = True
- self.messages = []
-
- def info(self, message, *args):
- """Capture any messages that have been recorded"""
- if args:
- output = message % args
- else:
- output = message
- if not output.startswith("--->"):
- self.messages.append(str(output))
-
-
-def fail_on_eslint(*args, **kwargs):
- """
- For our tests, we need the call for diff-quality running eslint reports
- to fail, since that is what is going to fail when we pass in a
- percentage ("p") requirement.
- """
- if "eslint" in args[0]: # lint-amnesty, pylint: disable=no-else-raise
- raise BuildFailure('Subprocess return code: 1')
- else:
- if kwargs.get('capture', False):
- return uuid4().hex
- else:
- return
-
-
-def fail_on_npm_install():
- """
- Used to simulate an error when executing "npm install"
- """
- return 1
-
-
-def unexpected_fail_on_npm_install(*args, **kwargs): # pylint: disable=unused-argument
- """
- For our tests, we need the call for diff-quality running pycodestyle reports to fail, since that is what
- is going to fail when we pass in a percentage ("p") requirement.
- """
- if ["npm", "install", "--verbose"] == args[0]: # lint-amnesty, pylint: disable=no-else-raise
- raise BuildFailure('Subprocess return code: 50')
- else:
- return
diff --git a/pavelib/prereqs.py b/pavelib/prereqs.py
deleted file mode 100644
index 4453176c94da..000000000000
--- a/pavelib/prereqs.py
+++ /dev/null
@@ -1,351 +0,0 @@
-"""
-Install Python and Node prerequisites.
-"""
-
-
-import hashlib
-import os
-import re
-import subprocess
-import sys
-from distutils import sysconfig # pylint: disable=deprecated-module
-
-from paver.easy import sh, task # lint-amnesty, pylint: disable=unused-import
-
-from .utils.envs import Env
-from .utils.timer import timed
-
-PREREQS_STATE_DIR = os.getenv('PREREQ_CACHE_DIR', Env.REPO_ROOT / '.prereqs_cache')
-NO_PREREQ_MESSAGE = "NO_PREREQ_INSTALL is set, not installing prereqs"
-NO_PYTHON_UNINSTALL_MESSAGE = 'NO_PYTHON_UNINSTALL is set. No attempts will be made to uninstall old Python libs.'
-COVERAGE_REQ_FILE = 'requirements/edx/coverage.txt'
-
-# If you make any changes to this list you also need to make
-# a corresponding change to circle.yml, which is how the python
-# prerequisites are installed for builds on circleci.com
-toxenv = os.environ.get('TOXENV')
-if toxenv and toxenv != 'quality':
- PYTHON_REQ_FILES = ['requirements/edx/testing.txt']
-else:
- PYTHON_REQ_FILES = ['requirements/edx/development.txt']
-
-# Developers can have private requirements, for local copies of github repos,
-# or favorite debugging tools, etc.
-PRIVATE_REQS = 'requirements/edx/private.txt'
-if os.path.exists(PRIVATE_REQS):
- PYTHON_REQ_FILES.append(PRIVATE_REQS)
-
-
-def str2bool(s):
- s = str(s)
- return s.lower() in ('yes', 'true', 't', '1')
-
-
-def no_prereq_install():
- """
- Determine if NO_PREREQ_INSTALL should be truthy or falsy.
- """
- return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))
-
-
-def no_python_uninstall():
- """ Determine if we should run the uninstall_python_packages task. """
- return str2bool(os.environ.get('NO_PYTHON_UNINSTALL', 'False'))
-
-
-def create_prereqs_cache_dir():
- """Create the directory for storing the hashes, if it doesn't exist already."""
- try:
- os.makedirs(PREREQS_STATE_DIR)
- except OSError:
- if not os.path.isdir(PREREQS_STATE_DIR):
- raise
-
-
-def compute_fingerprint(path_list):
- """
- Hash the contents of all the files and directories in `path_list`.
- Returns the hex digest.
- """
-
- hasher = hashlib.sha1()
-
- for path_item in path_list:
-
- # For directories, create a hash based on the modification times
- # of first-level subdirectories
- if os.path.isdir(path_item):
- for dirname in sorted(os.listdir(path_item)):
- path_name = os.path.join(path_item, dirname)
- if os.path.isdir(path_name):
- hasher.update(str(os.stat(path_name).st_mtime).encode('utf-8'))
-
- # For files, hash the contents of the file
- if os.path.isfile(path_item):
- with open(path_item, "rb") as file_handle:
- hasher.update(file_handle.read())
-
- return hasher.hexdigest()
-
-
-def prereq_cache(cache_name, paths, install_func):
- """
- Conditionally execute `install_func()` only if the files/directories
- specified by `paths` have changed.
-
- If the code executes successfully (no exceptions are thrown), the cache
- is updated with the new hash.
- """
- # Retrieve the old hash
- cache_filename = cache_name.replace(" ", "_")
- cache_file_path = os.path.join(PREREQS_STATE_DIR, f"{cache_filename}.sha1")
- old_hash = None
- if os.path.isfile(cache_file_path):
- with open(cache_file_path) as cache_file:
- old_hash = cache_file.read()
-
- # Compare the old hash to the new hash
- # If they do not match (either the cache hasn't been created, or the files have changed),
- # then execute the code within the block.
- new_hash = compute_fingerprint(paths)
- if new_hash != old_hash:
- install_func()
-
- # Update the cache with the new hash
- # If the code executed within the context fails (throws an exception),
- # then this step won't get executed.
- create_prereqs_cache_dir()
- with open(cache_file_path, "wb") as cache_file:
- # Since the pip requirement files are modified during the install
- # process, we need to store the hash generated AFTER the installation
- post_install_hash = compute_fingerprint(paths)
- cache_file.write(post_install_hash.encode('utf-8'))
- else:
- print(f'{cache_name} unchanged, skipping...')
-
-
-def node_prereqs_installation():
- """
- Configures npm and installs Node prerequisites
- """
- # Before July 2023, these directories were created and written to
- # as root. Afterwards, they are created as being owned by the
- # `app` user -- but also need to be deleted by that user (due to
- # how npm runs post-install scripts.) Developers with an older
- # devstack installation who are reprovisioning will see errors
- # here if the files are still owned by root. Deleting the files in
- # advance prevents this error.
- #
- # This hack should probably be left in place for at least a year.
- # See ADR 17 for more background on the transition.
- sh("rm -rf common/static/common/js/vendor/ common/static/common/css/vendor/")
- # At the time of this writing, the js dir has git-versioned files
- # but the css dir does not, so the latter would have been created
- # as root-owned (in the process of creating the vendor
- # subdirectory). Delete it only if empty, just in case
- # git-versioned files are added later.
- sh("rmdir common/static/common/css || true")
-
- # NPM installs hang sporadically. Log the installation process so that we
- # determine if any packages are chronic offenders.
- npm_log_file_path = f'{Env.GEN_LOG_DIR}/npm-install.log'
- npm_log_file = open(npm_log_file_path, 'wb') # lint-amnesty, pylint: disable=consider-using-with
- npm_command = 'npm ci --verbose'.split()
-
- # The implementation of Paver's `sh` function returns before the forked
- # actually returns. Using a Popen object so that we can ensure that
- # the forked process has returned
- proc = subprocess.Popen(npm_command, stderr=npm_log_file) # lint-amnesty, pylint: disable=consider-using-with
- retcode = proc.wait()
- if retcode == 1:
- raise Exception(f"npm install failed: See {npm_log_file_path}")
- print("Successfully clean-installed NPM packages. Log found at {}".format(
- npm_log_file_path
- ))
-
-
-def python_prereqs_installation():
- """
- Installs Python prerequisites
- """
- # edx-platform installs some Python projects from within the edx-platform repo itself.
- sh("pip install -e .")
- for req_file in PYTHON_REQ_FILES:
- pip_install_req_file(req_file)
-
-
-def pip_install_req_file(req_file):
- """Pip install the requirements file."""
- pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'
- sh(f"{pip_cmd} -r {req_file}")
-
-
-@task
-@timed
-def install_node_prereqs():
- """
- Installs Node prerequisites
- """
- if no_prereq_install():
- print(NO_PREREQ_MESSAGE)
- return
-
- prereq_cache("Node prereqs", ["package.json", "package-lock.json"], node_prereqs_installation)
-
-
-# To add a package to the uninstall list, just add it to this list! No need
-# to touch any other part of this file.
-PACKAGES_TO_UNINSTALL = [
- "MySQL-python", # Because mysqlclient shares the same directory name
- "South", # Because it interferes with Django 1.8 migrations.
- "edxval", # Because it was bork-installed somehow.
- "django-storages",
- "django-oauth2-provider", # Because now it's called edx-django-oauth2-provider.
- "edx-oauth2-provider", # Because it moved from github to pypi
- "enum34", # Because enum34 is not needed in python>3.4
- "i18n-tools", # Because now it's called edx-i18n-tools
- "moto", # Because we no longer use it and it conflicts with recent jsondiff versions
- "python-saml", # Because python3-saml shares the same directory name
- "pytest-faulthandler", # Because it was bundled into pytest
- "djangorestframework-jwt", # Because now its called drf-jwt.
-]
-
-
-@task
-@timed
-def uninstall_python_packages():
- """
- Uninstall Python packages that need explicit uninstallation.
-
- Some Python packages that we no longer want need to be explicitly
- uninstalled, notably, South. Some other packages were once installed in
- ways that were resistant to being upgraded, like edxval. Also uninstall
- them.
- """
-
- if no_python_uninstall():
- print(NO_PYTHON_UNINSTALL_MESSAGE)
- return
-
- # So that we don't constantly uninstall things, use a hash of the packages
- # to be uninstalled. Check it, and skip this if we're up to date.
- hasher = hashlib.sha1()
- hasher.update(repr(PACKAGES_TO_UNINSTALL).encode('utf-8'))
- expected_version = hasher.hexdigest()
- state_file_path = os.path.join(PREREQS_STATE_DIR, "Python_uninstall.sha1")
- create_prereqs_cache_dir()
-
- if os.path.isfile(state_file_path):
- with open(state_file_path) as state_file:
- version = state_file.read()
- if version == expected_version:
- print('Python uninstalls unchanged, skipping...')
- return
-
- # Run pip to find the packages we need to get rid of. Believe it or not,
- # edx-val is installed in a way that it is present twice, so we have a loop
- # to really really get rid of it.
- for _ in range(3):
- uninstalled = False
- frozen = sh("pip freeze", capture=True)
-
- for package_name in PACKAGES_TO_UNINSTALL:
- if package_in_frozen(package_name, frozen):
- # Uninstall the pacakge
- sh(f"pip uninstall --disable-pip-version-check -y {package_name}")
- uninstalled = True
- if not uninstalled:
- break
- else:
- # We tried three times and didn't manage to get rid of the pests.
- print("Couldn't uninstall unwanted Python packages!")
- return
-
- # Write our version.
- with open(state_file_path, "wb") as state_file:
- state_file.write(expected_version.encode('utf-8'))
-
-
-def package_in_frozen(package_name, frozen_output):
- """Is this package in the output of 'pip freeze'?"""
- # Look for either:
- #
- # PACKAGE-NAME==
- #
- # or:
- #
- # blah_blah#egg=package_name-version
- #
- pattern = r"(?mi)^{pkg}==|#egg={pkg_under}-".format(
- pkg=re.escape(package_name),
- pkg_under=re.escape(package_name.replace("-", "_")),
- )
- return bool(re.search(pattern, frozen_output))
-
-
-@task
-@timed
-def install_coverage_prereqs():
- """ Install python prereqs for measuring coverage. """
- if no_prereq_install():
- print(NO_PREREQ_MESSAGE)
- return
- pip_install_req_file(COVERAGE_REQ_FILE)
-
-
-@task
-@timed
-def install_python_prereqs():
- """
- Installs Python prerequisites.
- """
- if no_prereq_install():
- print(NO_PREREQ_MESSAGE)
- return
-
- uninstall_python_packages()
-
- # Include all of the requirements files in the fingerprint.
- files_to_fingerprint = list(PYTHON_REQ_FILES)
-
- # Also fingerprint the directories where packages get installed:
- # ("/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages")
- files_to_fingerprint.append(sysconfig.get_python_lib())
-
- # In a virtualenv, "-e installs" get put in a src directory.
- if Env.PIP_SRC:
- src_dir = Env.PIP_SRC
- else:
- src_dir = os.path.join(sys.prefix, "src")
- if os.path.isdir(src_dir):
- files_to_fingerprint.append(src_dir)
-
- # Also fingerprint this source file, so that if the logic for installations
- # changes, we will redo the installation.
- this_file = __file__
- if this_file.endswith(".pyc"):
- this_file = this_file[:-1] # use the .py file instead of the .pyc
- files_to_fingerprint.append(this_file)
-
- prereq_cache("Python prereqs", files_to_fingerprint, python_prereqs_installation)
-
-
-@task
-@timed
-def install_prereqs():
- """
- Installs Node and Python prerequisites
- """
- if no_prereq_install():
- print(NO_PREREQ_MESSAGE)
- return
-
- if not str2bool(os.environ.get('SKIP_NPM_INSTALL', 'False')):
- install_node_prereqs()
- install_python_prereqs()
- log_installed_python_prereqs()
-
-
-def log_installed_python_prereqs():
- """ Logs output of pip freeze for debugging. """
- sh("pip freeze > {}".format(Env.GEN_LOG_DIR + "/pip_freeze.log"))
diff --git a/pavelib/quality.py b/pavelib/quality.py
deleted file mode 100644
index de2f6dc6665a..000000000000
--- a/pavelib/quality.py
+++ /dev/null
@@ -1,600 +0,0 @@
-""" # lint-amnesty, pylint: disable=django-not-configured
-Check code quality using pycodestyle, pylint, and diff_quality.
-"""
-
-import json
-import os
-import re
-from datetime import datetime
-from xml.sax.saxutils import quoteattr
-
-from paver.easy import BuildFailure, cmdopts, needs, sh, task
-
-from .utils.envs import Env
-from .utils.timer import timed
-
-ALL_SYSTEMS = 'lms,cms,common,openedx,pavelib,scripts'
-JUNIT_XML_TEMPLATE = """
-
-{failure_element}
-
-"""
-JUNIT_XML_FAILURE_TEMPLATE = ''
-START_TIME = datetime.utcnow()
-
-
-def write_junit_xml(name, message=None):
- """
- Write a JUnit results XML file describing the outcome of a quality check.
- """
- if message:
- failure_element = JUNIT_XML_FAILURE_TEMPLATE.format(message=quoteattr(message))
- else:
- failure_element = ''
- data = {
- 'failure_count': 1 if message else 0,
- 'failure_element': failure_element,
- 'name': name,
- 'seconds': (datetime.utcnow() - START_TIME).total_seconds(),
- }
- Env.QUALITY_DIR.makedirs_p()
- filename = Env.QUALITY_DIR / f'{name}.xml'
- with open(filename, 'w') as f:
- f.write(JUNIT_XML_TEMPLATE.format(**data))
-
-
-def fail_quality(name, message):
- """
- Fail the specified quality check by generating the JUnit XML results file
- and raising a ``BuildFailure``.
- """
- write_junit_xml(name, message)
- raise BuildFailure(message)
-
-
-def top_python_dirs(dirname):
- """
- Find the directories to start from in order to find all the Python files in `dirname`.
- """
- top_dirs = []
-
- dir_init = os.path.join(dirname, "__init__.py")
- if os.path.exists(dir_init):
- top_dirs.append(dirname)
-
- for directory in ['djangoapps', 'lib']:
- subdir = os.path.join(dirname, directory)
- subdir_init = os.path.join(subdir, "__init__.py")
- if os.path.exists(subdir) and not os.path.exists(subdir_init):
- dirs = os.listdir(subdir)
- top_dirs.extend(d for d in dirs if os.path.isdir(os.path.join(subdir, d)))
-
- modules_to_remove = ['__pycache__']
- for module in modules_to_remove:
- if module in top_dirs:
- top_dirs.remove(module)
-
- return top_dirs
-
-
-def _get_pep8_violations(clean=True):
- """
- Runs pycodestyle. Returns a tuple of (number_of_violations, violations_string)
- where violations_string is a string of all PEP 8 violations found, separated
- by new lines.
- """
- report_dir = (Env.REPORT_DIR / 'pep8')
- if clean:
- report_dir.rmtree(ignore_errors=True)
- report_dir.makedirs_p()
- report = report_dir / 'pep8.report'
-
- # Make sure the metrics subdirectory exists
- Env.METRICS_DIR.makedirs_p()
-
- if not report.exists():
- sh(f'pycodestyle . | tee {report} -a')
-
- violations_list = _pep8_violations(report)
-
- return len(violations_list), violations_list
-
-
-def _pep8_violations(report_file):
- """
- Returns the list of all PEP 8 violations in the given report_file.
- """
- with open(report_file) as f:
- return f.readlines()
-
-
-def run_pep8(options): # pylint: disable=unused-argument
- """
- Run pycodestyle on system code.
- Fail the task if any violations are found.
- """
- (count, violations_list) = _get_pep8_violations()
- violations_list = ''.join(violations_list)
-
- # Print number of violations to log
- violations_count_str = f"Number of PEP 8 violations: {count}"
- print(violations_count_str)
- print(violations_list)
-
- # Also write the number of violations to a file
- with open(Env.METRICS_DIR / "pep8", "w") as f:
- f.write(violations_count_str + '\n\n')
- f.write(violations_list)
-
- # Fail if any violations are found
- if count:
- failure_string = "FAILURE: Too many PEP 8 violations. " + violations_count_str
- failure_string += f"\n\nViolations:\n{violations_list}"
- fail_quality('pep8', failure_string)
- else:
- write_junit_xml('pep8')
-
-
-@task
-@needs(
- 'pavelib.utils.test.utils.ensure_clean_package_lock',
-)
-@cmdopts([
- ("limit=", "l", "limit for number of acceptable violations"),
-])
-@timed
-def run_eslint(options):
- """
- Runs eslint on static asset directories.
- If limit option is passed, fails build if more violations than the limit are found.
- """
- sh("npm clean-install")
-
- eslint_report_dir = (Env.REPORT_DIR / "eslint")
- eslint_report = eslint_report_dir / "eslint.report"
- _prepare_report_dir(eslint_report_dir)
- violations_limit = int(getattr(options, 'limit', -1))
-
- sh(
- "node --max_old_space_size=4096 node_modules/.bin/eslint "
- "--ext .js --ext .jsx --format=compact . | tee {eslint_report}".format(
- eslint_report=eslint_report
- ),
- ignore_error=True
- )
-
- try:
- num_violations = int(_get_count_from_last_line(eslint_report, "eslint"))
- except TypeError:
- fail_quality(
- 'eslint',
- "FAILURE: Number of eslint violations could not be found in {eslint_report}".format(
- eslint_report=eslint_report
- )
- )
-
- # Record the metric
- _write_metric(num_violations, (Env.METRICS_DIR / "eslint"))
-
- # Fail if number of violations is greater than the limit
- if num_violations > violations_limit > -1:
- fail_quality(
- 'eslint',
- "FAILURE: Too many eslint violations ({count}).\nThe limit is {violations_limit}.".format(
- count=num_violations, violations_limit=violations_limit
- )
- )
- else:
- write_junit_xml('eslint')
-
-
-def _get_stylelint_violations():
- """
- Returns the number of Stylelint violations.
- """
- stylelint_report_dir = (Env.REPORT_DIR / "stylelint")
- stylelint_report = stylelint_report_dir / "stylelint.report"
- _prepare_report_dir(stylelint_report_dir)
- formatter = 'node_modules/stylelint-formatter-pretty'
-
- sh(
- "stylelint **/*.scss --custom-formatter={formatter} | tee {stylelint_report}".format(
- formatter=formatter,
- stylelint_report=stylelint_report,
- ),
- ignore_error=True
- )
-
- try:
- return int(_get_count_from_last_line(stylelint_report, "stylelint"))
- except TypeError:
- fail_quality(
- 'stylelint',
- "FAILURE: Number of stylelint violations could not be found in {stylelint_report}".format(
- stylelint_report=stylelint_report
- )
- )
-
-
-@task
-@cmdopts([
- ("limit=", "l", "limit for number of acceptable violations"),
-])
-@timed
-def run_stylelint(options):
- """
- Runs stylelint on Sass files.
- If limit option is passed, fails build if more violations than the limit are found.
- """
- sh("npm clean-install")
-
- violations_limit = 0
- num_violations = _get_stylelint_violations()
-
- # Record the metric
- _write_metric(num_violations, (Env.METRICS_DIR / "stylelint"))
-
- # Fail if number of violations is greater than the limit
- if num_violations > violations_limit:
- fail_quality(
- 'stylelint',
- "FAILURE: Stylelint failed with too many violations: ({count}).\nThe limit is {violations_limit}.".format(
- count=num_violations,
- violations_limit=violations_limit,
- )
- )
- else:
- write_junit_xml('stylelint')
-
-
-@task
-@cmdopts([
- ("thresholds=", "t", "json containing limit for number of acceptable violations per rule"),
-])
-@timed
-def run_xsslint(options):
- """
- Runs xsslint/xss_linter.py on the codebase
- """
- sh("pip install -r requirements/edx/development.txt -e .")
-
- thresholds_option = getattr(options, 'thresholds', '{}')
- try:
- violation_thresholds = json.loads(thresholds_option)
- except ValueError:
- violation_thresholds = None
- if isinstance(violation_thresholds, dict) is False or \
- any(key not in ("total", "rules") for key in violation_thresholds.keys()):
-
- fail_quality(
- 'xsslint',
- """FAILURE: Thresholds option "{thresholds_option}" was not supplied using proper format.\n"""
- """Here is a properly formatted example, '{{"total":100,"rules":{{"javascript-escape":0}}}}' """
- """with property names in double-quotes.""".format(
- thresholds_option=thresholds_option
- )
- )
-
- xsslint_script = "xss_linter.py"
- xsslint_report_dir = (Env.REPORT_DIR / "xsslint")
- xsslint_report = xsslint_report_dir / "xsslint.report"
- _prepare_report_dir(xsslint_report_dir)
-
- sh(
- "{repo_root}/scripts/xsslint/{xsslint_script} --rule-totals --config={cfg_module} >> {xsslint_report}".format(
- repo_root=Env.REPO_ROOT,
- xsslint_script=xsslint_script,
- xsslint_report=xsslint_report,
- cfg_module='scripts.xsslint_config'
- ),
- ignore_error=True
- )
-
- xsslint_counts = _get_xsslint_counts(xsslint_report)
-
- try:
- metrics_str = "Number of {xsslint_script} violations: {num_violations}\n".format(
- xsslint_script=xsslint_script, num_violations=int(xsslint_counts['total'])
- )
- if 'rules' in xsslint_counts and any(xsslint_counts['rules']):
- metrics_str += "\n"
- rule_keys = sorted(xsslint_counts['rules'].keys())
- for rule in rule_keys:
- metrics_str += "{rule} violations: {count}\n".format(
- rule=rule,
- count=int(xsslint_counts['rules'][rule])
- )
- except TypeError:
- fail_quality(
- 'xsslint',
- "FAILURE: Number of {xsslint_script} violations could not be found in {xsslint_report}".format(
- xsslint_script=xsslint_script, xsslint_report=xsslint_report
- )
- )
-
- metrics_report = (Env.METRICS_DIR / "xsslint")
- # Record the metric
- _write_metric(metrics_str, metrics_report)
- # Print number of violations to log.
- sh(f"cat {metrics_report}", ignore_error=True)
-
- error_message = ""
-
- # Test total violations against threshold.
- if 'total' in list(violation_thresholds.keys()):
- if violation_thresholds['total'] < xsslint_counts['total']:
- error_message = "Too many violations total ({count}).\nThe limit is {violations_limit}.".format(
- count=xsslint_counts['total'], violations_limit=violation_thresholds['total']
- )
-
- # Test rule violations against thresholds.
- if 'rules' in violation_thresholds:
- threshold_keys = sorted(violation_thresholds['rules'].keys())
- for threshold_key in threshold_keys:
- if threshold_key not in xsslint_counts['rules']:
- error_message += (
- "\nNumber of {xsslint_script} violations for {rule} could not be found in "
- "{xsslint_report}."
- ).format(
- xsslint_script=xsslint_script, rule=threshold_key, xsslint_report=xsslint_report
- )
- elif violation_thresholds['rules'][threshold_key] < xsslint_counts['rules'][threshold_key]:
- error_message += \
- "\nToo many {rule} violations ({count}).\nThe {rule} limit is {violations_limit}.".format(
- rule=threshold_key, count=xsslint_counts['rules'][threshold_key],
- violations_limit=violation_thresholds['rules'][threshold_key],
- )
-
- if error_message:
- fail_quality(
- 'xsslint',
- "FAILURE: XSSLinter Failed.\n{error_message}\n"
- "See {xsslint_report} or run the following command to hone in on the problem:\n"
- " ./scripts/xss-commit-linter.sh -h".format(
- error_message=error_message, xsslint_report=xsslint_report
- )
- )
- else:
- write_junit_xml('xsslint')
-
-
-def _write_metric(metric, filename):
- """
- Write a given metric to a given file
- Used for things like reports/metrics/eslint, which will simply tell you the number of
- eslint violations found
- """
- Env.METRICS_DIR.makedirs_p()
-
- with open(filename, "w") as metric_file:
- metric_file.write(str(metric))
-
-
-def _prepare_report_dir(dir_name):
- """
- Sets a given directory to a created, but empty state
- """
- dir_name.rmtree_p()
- dir_name.mkdir_p()
-
-
-def _get_report_contents(filename, report_name, last_line_only=False):
- """
- Returns the contents of the given file. Use last_line_only to only return
- the last line, which can be used for getting output from quality output
- files.
-
- Arguments:
- last_line_only: True to return the last line only, False to return a
- string with full contents.
-
- Returns:
- String containing full contents of the report, or the last line.
-
- """
- if os.path.isfile(filename):
- with open(filename) as report_file:
- if last_line_only:
- lines = report_file.readlines()
- for line in reversed(lines):
- if line != '\n':
- return line
- return None
- else:
- return report_file.read()
- else:
- file_not_found_message = f"FAILURE: The following log file could not be found: {filename}"
- fail_quality(report_name, file_not_found_message)
-
-
-def _get_count_from_last_line(filename, file_type):
- """
- This will return the number in the last line of a file.
- It is returning only the value (as a floating number).
- """
- report_contents = _get_report_contents(filename, file_type, last_line_only=True)
-
- if report_contents is None:
- return 0
-
- last_line = report_contents.strip()
- # Example of the last line of a compact-formatted eslint report (for example): "62829 problems"
- regex = r'^\d+'
-
- try:
- return float(re.search(regex, last_line).group(0))
- # An AttributeError will occur if the regex finds no matches.
- # A ValueError will occur if the returned regex cannot be cast as a float.
- except (AttributeError, ValueError):
- return None
-
-
-def _get_xsslint_counts(filename):
- """
- This returns a dict of violations from the xsslint report.
-
- Arguments:
- filename: The name of the xsslint report.
-
- Returns:
- A dict containing the following:
- rules: A dict containing the count for each rule as follows:
- violation-rule-id: N, where N is the number of violations
- total: M, where M is the number of total violations
-
- """
- report_contents = _get_report_contents(filename, 'xsslint')
- rule_count_regex = re.compile(r"^(?P[a-z-]+):\s+(?P\d+) violations", re.MULTILINE)
- total_count_regex = re.compile(r"^(?P\d+) violations total", re.MULTILINE)
- violations = {'rules': {}}
- for violation_match in rule_count_regex.finditer(report_contents):
- try:
- violations['rules'][violation_match.group('rule_id')] = int(violation_match.group('count'))
- except ValueError:
- violations['rules'][violation_match.group('rule_id')] = None
- try:
- violations['total'] = int(total_count_regex.search(report_contents).group('count'))
- # An AttributeError will occur if the regex finds no matches.
- # A ValueError will occur if the returned regex cannot be cast as a float.
- except (AttributeError, ValueError):
- violations['total'] = None
- return violations
-
-
-def _extract_missing_pii_annotations(filename):
- """
- Returns the number of uncovered models from the stdout report of django_find_annotations.
-
- Arguments:
- filename: Filename where stdout of django_find_annotations was captured.
-
- Returns:
- three-tuple containing:
- 1. The number of uncovered models,
- 2. A bool indicating whether the coverage is still below the threshold, and
- 3. The full report as a string.
- """
- uncovered_models = 0
- pii_check_passed = True
- if os.path.isfile(filename):
- with open(filename) as report_file:
- lines = report_file.readlines()
-
- # Find the count of uncovered models.
- uncovered_regex = re.compile(r'^Coverage found ([\d]+) uncovered')
- for line in lines:
- uncovered_match = uncovered_regex.match(line)
- if uncovered_match:
- uncovered_models = int(uncovered_match.groups()[0])
- break
-
- # Find a message which suggests the check failed.
- failure_regex = re.compile(r'^Coverage threshold not met!')
- for line in lines:
- failure_match = failure_regex.match(line)
- if failure_match:
- pii_check_passed = False
- break
-
- # Each line in lines already contains a newline.
- full_log = ''.join(lines)
- else:
- fail_quality('pii', f'FAILURE: Log file could not be found: {filename}')
-
- return (uncovered_models, pii_check_passed, full_log)
-
-
-@task
-@cmdopts([
- ("report-dir=", "r", "Directory in which to put PII reports"),
-])
-@timed
-def run_pii_check(options):
- """
- Guarantee that all Django models are PII-annotated.
- """
- sh("pip install -r requirements/edx/development.txt -e .")
-
- pii_report_name = 'pii'
- default_report_dir = (Env.REPORT_DIR / pii_report_name)
- report_dir = getattr(options, 'report_dir', default_report_dir)
- output_file = os.path.join(report_dir, 'pii_check_{}.report')
- env_report = []
- pii_check_passed = True
- for env_name, env_settings_file in (("CMS", "cms.envs.test"), ("LMS", "lms.envs.test")):
- try:
- print()
- print(f"Running {env_name} PII Annotation check and report")
- print("-" * 45)
- run_output_file = str(output_file).format(env_name.lower())
- sh(
- "mkdir -p {} && " # lint-amnesty, pylint: disable=duplicate-string-formatting-argument
- "export DJANGO_SETTINGS_MODULE={}; "
- "code_annotations django_find_annotations "
- "--config_file .pii_annotations.yml --report_path {} --app_name {} "
- "--lint --report --coverage | tee {}".format(
- report_dir, env_settings_file, report_dir, env_name.lower(), run_output_file
- )
- )
- uncovered_model_count, pii_check_passed_env, full_log = _extract_missing_pii_annotations(run_output_file)
- env_report.append((
- uncovered_model_count,
- full_log,
- ))
-
- except BuildFailure as error_message:
- fail_quality(pii_report_name, f'FAILURE: {error_message}')
-
- if not pii_check_passed_env:
- pii_check_passed = False
-
- # Determine which suite is the worst offender by obtaining the max() keying off uncovered_count.
- uncovered_count, full_log = max(env_report, key=lambda r: r[0])
-
- # Write metric file.
- if uncovered_count is None:
- uncovered_count = 0
- metrics_str = f"Number of PII Annotation violations: {uncovered_count}\n"
- _write_metric(metrics_str, (Env.METRICS_DIR / pii_report_name))
-
- # Finally, fail the paver task if code_annotations suggests that the check failed.
- if not pii_check_passed:
- fail_quality('pii', full_log)
-
-
-@task
-@timed
-def check_keywords():
- """
- Check Django model fields for names that conflict with a list of reserved keywords
- """
- sh("pip install -r requirements/edx/development.txt -e .")
-
- report_path = os.path.join(Env.REPORT_DIR, 'reserved_keywords')
- sh(f"mkdir -p {report_path}")
-
- overall_status = True
- for env, env_settings_file in [('lms', 'lms.envs.test'), ('cms', 'cms.envs.test')]:
- report_file = f"{env}_reserved_keyword_report.csv"
- override_file = os.path.join(Env.REPO_ROOT, "db_keyword_overrides.yml")
- try:
- sh(
- "export DJANGO_SETTINGS_MODULE={settings_file}; "
- "python manage.py {app} check_reserved_keywords "
- "--override_file {override_file} "
- "--report_path {report_path} "
- "--report_file {report_file}".format(
- settings_file=env_settings_file, app=env, override_file=override_file,
- report_path=report_path, report_file=report_file
- )
- )
- except BuildFailure:
- overall_status = False
-
- if not overall_status:
- fail_quality(
- 'keywords',
- 'Failure: reserved keyword checker failed. Reports can be found here: {}'.format(
- report_path
- )
- )
diff --git a/pavelib/utils/__init__.py b/pavelib/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/pavelib/utils/cmd.py b/pavelib/utils/cmd.py
deleted file mode 100644
index ddb591301cc2..000000000000
--- a/pavelib/utils/cmd.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-Helper functions for constructing shell commands.
-"""
-
-
-def cmd(*args):
- """
- Concatenate the arguments into a space-separated shell command.
- """
- return " ".join(str(arg) for arg in args if arg)
diff --git a/pavelib/utils/envs.py b/pavelib/utils/envs.py
deleted file mode 100644
index 1e08e5dd6fd0..000000000000
--- a/pavelib/utils/envs.py
+++ /dev/null
@@ -1,133 +0,0 @@
-"""
-Helper functions for loading environment settings.
-"""
-import os
-import sys
-from time import sleep
-
-from path import Path as path
-
-
-def repo_root():
- """
- Get the root of the git repository (edx-platform).
-
- This sometimes fails on Docker Devstack, so it's been broken
- down with some additional error handling. It usually starts
- working within 30 seconds or so; for more details, see
- https://openedx.atlassian.net/browse/PLAT-1629 and
- https://github.com/docker/for-mac/issues/1509
- """
- file_path = path(__file__)
- attempt = 1
- while True:
- try:
- absolute_path = file_path.abspath()
- break
- except OSError:
- print(f'Attempt {attempt}/180 to get an absolute path failed')
- if attempt < 180:
- attempt += 1
- sleep(1)
- else:
- print('Unable to determine the absolute path of the edx-platform repo, aborting')
- raise
- return absolute_path.parent.parent.parent
-
-
-class Env:
- """
- Load information about the execution environment.
- """
-
- # Root of the git repository (edx-platform)
- REPO_ROOT = repo_root()
-
- # Reports Directory
- REPORT_DIR = REPO_ROOT / 'reports'
- METRICS_DIR = REPORT_DIR / 'metrics'
- QUALITY_DIR = REPORT_DIR / 'quality_junitxml'
-
- # Generic log dir
- GEN_LOG_DIR = REPO_ROOT / "test_root" / "log"
-
- # Python unittest dirs
- PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
-
- # Which Python version should be used in xdist workers?
- PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7")
-
- # Directory that videos are served from
- VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
-
- PRINT_SETTINGS_LOG_FILE = GEN_LOG_DIR / "print_settings.log"
-
- # Detect if in a Docker container, and if so which one
- FRONTEND_TEST_SERVER_HOST = os.environ.get('FRONTEND_TEST_SERVER_HOSTNAME', '0.0.0.0')
- USING_DOCKER = FRONTEND_TEST_SERVER_HOST != '0.0.0.0'
- DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack'
- TEST_SETTINGS = 'test'
-
- # Mongo databases that will be dropped before/after the tests run
- MONGO_HOST = 'localhost'
-
- # Test Ids Directory
- TEST_DIR = REPO_ROOT / ".testids"
-
- # Configured browser to use for the js test suites
- SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox')
- if USING_DOCKER:
- KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker'
- else:
- KARMA_BROWSER = 'FirefoxNoUpdates'
-
- # Files used to run each of the js test suites
- # TODO: Store this as a dict. Order seems to matter for some
- # reason. See issue TE-415.
- KARMA_CONFIG_FILES = [
- REPO_ROOT / 'cms/static/karma_cms.conf.js',
- REPO_ROOT / 'cms/static/karma_cms_squire.conf.js',
- REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js',
- REPO_ROOT / 'lms/static/karma_lms.conf.js',
- REPO_ROOT / 'xmodule/js/karma_xmodule.conf.js',
- REPO_ROOT / 'xmodule/js/karma_xmodule_webpack.conf.js',
- REPO_ROOT / 'common/static/karma_common.conf.js',
- REPO_ROOT / 'common/static/karma_common_requirejs.conf.js',
- ]
-
- JS_TEST_ID_KEYS = [
- 'cms',
- 'cms-squire',
- 'cms-webpack',
- 'lms',
- 'xmodule',
- 'xmodule-webpack',
- 'common',
- 'common-requirejs',
- 'jest-snapshot'
- ]
-
- JS_REPORT_DIR = REPORT_DIR / 'javascript'
-
- # Directories used for pavelib/ tests
- IGNORED_TEST_DIRS = ('__pycache__', '.cache', '.pytest_cache')
- LIB_TEST_DIRS = [path("pavelib/paver_tests"), path("scripts/xsslint/tests")]
-
- # Directory for i18n test reports
- I18N_REPORT_DIR = REPORT_DIR / 'i18n'
-
- # Directory for keeping src folder that comes with pip installation.
- # Setting this is equivalent to passing `--src ` to pip directly.
- PIP_SRC = os.environ.get("PIP_SRC")
-
- # Service variant (lms, cms, etc.) configured with an environment variable
- # We use this to determine which envs.json file to load.
- SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
-
- # If service variant not configured in env, then pass the correct
- # environment for lms / cms
- if not SERVICE_VARIANT: # this will intentionally catch "";
- if any(i in sys.argv[1:] for i in ('cms', 'studio')):
- SERVICE_VARIANT = 'cms'
- else:
- SERVICE_VARIANT = 'lms'
diff --git a/pavelib/utils/process.py b/pavelib/utils/process.py
deleted file mode 100644
index da2dafa8803d..000000000000
--- a/pavelib/utils/process.py
+++ /dev/null
@@ -1,121 +0,0 @@
-"""
-Helper functions for managing processes.
-"""
-
-
-import atexit
-import os
-import signal
-import subprocess
-import sys
-
-import psutil
-from paver import tasks
-
-
-def kill_process(proc):
- """
- Kill the process `proc` created with `subprocess`.
- """
- p1_group = psutil.Process(proc.pid)
- child_pids = p1_group.children(recursive=True)
-
- for child_pid in child_pids:
- os.kill(child_pid.pid, signal.SIGKILL)
-
-
-def run_multi_processes(cmd_list, out_log=None, err_log=None):
- """
- Run each shell command in `cmd_list` in a separate process,
- piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
-
- Terminates the processes on CTRL-C and ensures the processes are killed
- if an error occurs.
- """
- kwargs = {'shell': True, 'cwd': None}
- pids = []
-
- if out_log:
- out_log_file = open(out_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
- kwargs['stdout'] = out_log_file
-
- if err_log:
- err_log_file = open(err_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
- kwargs['stderr'] = err_log_file
-
- # If the user is performing a dry run of a task, then just log
- # the command strings and return so that no destructive operations
- # are performed.
- if tasks.environment.dry_run:
- for cmd in cmd_list:
- tasks.environment.info(cmd)
- return
-
- try:
- for cmd in cmd_list:
- pids.extend([subprocess.Popen(cmd, **kwargs)])
-
- # pylint: disable=unused-argument
- def _signal_handler(*args):
- """
- What to do when process is ended
- """
- print("\nEnding...")
-
- signal.signal(signal.SIGINT, _signal_handler)
- print("Enter CTL-C to end")
- signal.pause()
- print("Processes ending")
-
- # pylint: disable=broad-except
- except Exception as err:
- print(f"Error running process {err}", file=sys.stderr)
-
- finally:
- for pid in pids:
- kill_process(pid)
-
-
-def run_process(cmd, out_log=None, err_log=None):
- """
- Run the shell command `cmd` in a separate process,
- piping stdout to `out_log` (a path) and stderr to `err_log` (also a path).
-
- Terminates the process on CTRL-C or if an error occurs.
- """
- return run_multi_processes([cmd], out_log=out_log, err_log=err_log)
-
-
-def run_background_process(cmd, out_log=None, err_log=None, cwd=None):
- """
- Runs a command as a background process. Sends SIGINT at exit.
- """
-
- kwargs = {'shell': True, 'cwd': cwd}
- if out_log:
- out_log_file = open(out_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
- kwargs['stdout'] = out_log_file
-
- if err_log:
- err_log_file = open(err_log, 'w') # lint-amnesty, pylint: disable=consider-using-with
- kwargs['stderr'] = err_log_file
-
- proc = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with
-
- def exit_handler():
- """
- Send SIGINT to the process's children. This is important
- for running commands under coverage, as coverage will not
- produce the correct artifacts if the child process isn't
- killed properly.
- """
- p1_group = psutil.Process(proc.pid)
- child_pids = p1_group.children(recursive=True)
-
- for child_pid in child_pids:
- os.kill(child_pid.pid, signal.SIGINT)
-
- # Wait for process to actually finish
- proc.wait()
-
- atexit.register(exit_handler)
diff --git a/pavelib/utils/test/__init__.py b/pavelib/utils/test/__init__.py
deleted file mode 100644
index e69de29bb2d1..000000000000
diff --git a/pavelib/utils/test/suites/__init__.py b/pavelib/utils/test/suites/__init__.py
deleted file mode 100644
index 34ecd49c1c74..000000000000
--- a/pavelib/utils/test/suites/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-TestSuite class and subclasses
-"""
-from .js_suite import JestSnapshotTestSuite, JsTestSuite
-from .suite import TestSuite
diff --git a/pavelib/utils/test/suites/js_suite.py b/pavelib/utils/test/suites/js_suite.py
deleted file mode 100644
index 4e53d454fee5..000000000000
--- a/pavelib/utils/test/suites/js_suite.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Javascript test tasks
-"""
-
-
-from paver import tasks
-
-from pavelib.utils.envs import Env
-from pavelib.utils.test import utils as test_utils
-from pavelib.utils.test.suites.suite import TestSuite
-
-__test__ = False # do not collect
-
-
-class JsTestSuite(TestSuite):
- """
- A class for running JavaScript tests.
- """
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.run_under_coverage = kwargs.get('with_coverage', True)
- self.mode = kwargs.get('mode', 'run')
- self.report_dir = Env.JS_REPORT_DIR
- self.opts = kwargs
-
- suite = args[0]
- self.subsuites = self._default_subsuites if suite == 'all' else [JsTestSubSuite(*args, **kwargs)]
-
- def __enter__(self):
- super().__enter__()
- if tasks.environment.dry_run:
- tasks.environment.info("make report_dir")
- else:
- self.report_dir.makedirs_p()
- if not self.skip_clean:
- test_utils.clean_test_files()
-
- if self.mode == 'run' and not self.run_under_coverage:
- test_utils.clean_dir(self.report_dir)
-
- @property
- def _default_subsuites(self):
- """
- Returns all JS test suites
- """
- return [JsTestSubSuite(test_id, **self.opts) for test_id in Env.JS_TEST_ID_KEYS if test_id != 'jest-snapshot']
-
-
-class JsTestSubSuite(TestSuite):
- """
- Class for JS suites like cms, cms-squire, lms, common,
- common-requirejs and xmodule
- """
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.test_id = args[0]
- self.run_under_coverage = kwargs.get('with_coverage', True)
- self.mode = kwargs.get('mode', 'run')
- self.port = kwargs.get('port')
- self.root = self.root + ' javascript'
- self.report_dir = Env.JS_REPORT_DIR
-
- try:
- self.test_conf_file = Env.KARMA_CONFIG_FILES[Env.JS_TEST_ID_KEYS.index(self.test_id)]
- except ValueError:
- self.test_conf_file = Env.KARMA_CONFIG_FILES[0]
-
- self.coverage_report = self.report_dir / f'coverage-{self.test_id}.xml'
- self.xunit_report = self.report_dir / f'javascript_xunit-{self.test_id}.xml'
-
- @property
- def cmd(self):
- """
- Run the tests using karma runner.
- """
- cmd = [
- "node",
- "--max_old_space_size=4096",
- "node_modules/.bin/karma",
- "start",
- self.test_conf_file,
- "--single-run={}".format('false' if self.mode == 'dev' else 'true'),
- "--capture-timeout=60000",
- f"--junitreportpath={self.xunit_report}",
- f"--browsers={Env.KARMA_BROWSER}",
- ]
-
- if self.port:
- cmd.append(f"--port={self.port}")
-
- if self.run_under_coverage:
- cmd.extend([
- "--coverage",
- f"--coveragereportpath={self.coverage_report}",
- ])
-
- return cmd
-
-
-class JestSnapshotTestSuite(TestSuite):
- """
- A class for running Jest Snapshot tests.
- """
- @property
- def cmd(self):
- """
- Run the tests using Jest.
- """
- return ["jest"]
diff --git a/pavelib/utils/test/suites/suite.py b/pavelib/utils/test/suites/suite.py
deleted file mode 100644
index 5a423c827c21..000000000000
--- a/pavelib/utils/test/suites/suite.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-A class used for defining and running test suites
-"""
-
-
-import os
-import subprocess
-import sys
-
-from paver import tasks
-
-from pavelib.utils.process import kill_process
-
-try:
- from pygments.console import colorize
-except ImportError:
- colorize = lambda color, text: text
-
-__test__ = False # do not collect
-
-
-class TestSuite:
- """
- TestSuite is a class that defines how groups of tests run.
- """
- def __init__(self, *args, **kwargs):
- self.root = args[0]
- self.subsuites = kwargs.get('subsuites', [])
- self.failed_suites = []
- self.verbosity = int(kwargs.get('verbosity', 1))
- self.skip_clean = kwargs.get('skip_clean', False)
- self.passthrough_options = kwargs.get('passthrough_options', [])
-
- def __enter__(self):
- """
- This will run before the test suite is run with the run_suite_tests method.
- If self.run_test is called directly, it should be run in a 'with' block to
- ensure that the proper context is created.
-
- Specific setup tasks should be defined in each subsuite.
-
- i.e. Checking for and defining required directories.
- """
- print(f"\nSetting up for {self.root}")
- self.failed_suites = []
-
- def __exit__(self, exc_type, exc_value, traceback):
- """
- This is run after the tests run with the run_suite_tests method finish.
- Specific clean up tasks should be defined in each subsuite.
-
- If self.run_test is called directly, it should be run in a 'with' block
- to ensure that clean up happens properly.
-
- i.e. Cleaning mongo after the lms tests run.
- """
- print(f"\nCleaning up after {self.root}")
-
- @property
- def cmd(self):
- """
- The command to run tests (as a string). For this base class there is none.
- """
- return None
-
- @staticmethod
- def is_success(exit_code):
- """
- Determine if the given exit code represents a success of the test
- suite. By default, only a zero counts as a success.
- """
- return exit_code == 0
-
- def run_test(self):
- """
- Runs a self.cmd in a subprocess and waits for it to finish.
- It returns False if errors or failures occur. Otherwise, it
- returns True.
- """
- cmd = " ".join(self.cmd)
-
- if tasks.environment.dry_run:
- tasks.environment.info(cmd)
- return
-
- sys.stdout.write(cmd)
-
- msg = colorize(
- 'green',
- '\n{bar}\n Running tests for {suite_name} \n{bar}\n'.format(suite_name=self.root, bar='=' * 40),
- )
-
- sys.stdout.write(msg)
- sys.stdout.flush()
-
- if 'TEST_SUITE' not in os.environ:
- os.environ['TEST_SUITE'] = self.root.replace("/", "_")
- kwargs = {'shell': True, 'cwd': None}
- process = None
-
- try:
- process = subprocess.Popen(cmd, **kwargs) # lint-amnesty, pylint: disable=consider-using-with
- return self.is_success(process.wait())
- except KeyboardInterrupt:
- kill_process(process)
- sys.exit(1)
-
- def run_suite_tests(self):
- """
- Runs each of the suites in self.subsuites while tracking failures
- """
- # Uses __enter__ and __exit__ for context
- with self:
- # run the tests for this class, and for all subsuites
- if self.cmd:
- passed = self.run_test()
- if not passed:
- self.failed_suites.append(self)
-
- for suite in self.subsuites:
- suite.run_suite_tests()
- if suite.failed_suites:
- self.failed_suites.extend(suite.failed_suites)
-
- def report_test_results(self):
- """
- Writes a list of failed_suites to sys.stderr
- """
- if self.failed_suites:
- msg = colorize('red', "\n\n{bar}\nTests failed in the following suites:\n* ".format(bar="=" * 48))
- msg += colorize('red', '\n* '.join([s.root for s in self.failed_suites]) + '\n\n')
- else:
- msg = colorize('green', "\n\n{bar}\nNo test failures ".format(bar="=" * 48))
-
- print(msg)
-
- def run(self):
- """
- Runs the tests in the suite while tracking and reporting failures.
- """
- self.run_suite_tests()
-
- if tasks.environment.dry_run:
- return
-
- self.report_test_results()
-
- if self.failed_suites:
- sys.exit(1)
diff --git a/pavelib/utils/test/utils.py b/pavelib/utils/test/utils.py
deleted file mode 100644
index 0851251e2222..000000000000
--- a/pavelib/utils/test/utils.py
+++ /dev/null
@@ -1,91 +0,0 @@
-"""
-Helper functions for test tasks
-"""
-
-
-import os
-
-from paver.easy import cmdopts, sh, task
-
-from pavelib.utils.envs import Env
-from pavelib.utils.timer import timed
-
-
-MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017'))
-
-COVERAGE_CACHE_BUCKET = "edx-tools-coverage-caches"
-COVERAGE_CACHE_BASEPATH = "test_root/who_tests_what"
-COVERAGE_CACHE_BASELINE = "who_tests_what.{}.baseline".format(os.environ.get('WTW_CONTEXT', 'all'))
-WHO_TESTS_WHAT_DIFF = "who_tests_what.diff"
-
-
-__test__ = False # do not collect
-
-
-@task
-@timed
-def clean_test_files():
- """
- Clean fixture files used by tests and .pyc files
- """
- sh("git clean -fqdx test_root/logs test_root/data test_root/staticfiles test_root/uploads")
- # This find command removes all the *.pyc files that aren't in the .git
- # directory. See this blog post for more details:
- # http://nedbatchelder.com/blog/201505/be_careful_deleting_files_around_git.html
- sh(r"find . -name '.git' -prune -o -name '*.pyc' -exec rm {} \;")
- sh("rm -rf test_root/log/auto_screenshots/*")
- sh("rm -rf /tmp/mako_[cl]ms")
-
-
-@task
-@timed
-def ensure_clean_package_lock():
- """
- Ensure no untracked changes have been made in the current git context.
- """
- sh("""
- git diff --name-only --exit-code package-lock.json ||
- (echo \"Dirty package-lock.json, run 'npm install' and commit the generated changes\" && exit 1)
- """)
-
-
-def clean_dir(directory):
- """
- Delete all the files from the specified directory.
- """
- # We delete the files but preserve the directory structure
- # so that coverage.py has a place to put the reports.
- sh(f'find {directory} -type f -delete')
-
-
-@task
-@cmdopts([
- ('skip-clean', 'C', 'skip cleaning repository before running tests'),
- ('skip_clean', None, 'deprecated in favor of skip-clean'),
-])
-@timed
-def clean_reports_dir(options):
- """
- Clean coverage files, to ensure that we don't use stale data to generate reports.
- """
- if getattr(options, 'skip_clean', False):
- print('--skip-clean is set, skipping...')
- return
-
- # We delete the files but preserve the directory structure
- # so that coverage.py has a place to put the reports.
- reports_dir = Env.REPORT_DIR.makedirs_p()
- clean_dir(reports_dir)
-
-
-@task
-@timed
-def clean_mongo():
- """
- Clean mongo test databases
- """
- sh("mongo {host}:{port} {repo_root}/scripts/delete-mongo-test-dbs.js".format(
- host=Env.MONGO_HOST,
- port=MONGO_PORT_NUM,
- repo_root=Env.REPO_ROOT,
- ))
diff --git a/pavelib/utils/timer.py b/pavelib/utils/timer.py
deleted file mode 100644
index fc6f3003736a..000000000000
--- a/pavelib/utils/timer.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-Tools for timing paver tasks
-"""
-
-
-import json
-import logging
-import os
-import sys
-import traceback
-from datetime import datetime
-from os.path import dirname, exists
-
-import wrapt
-
-LOGGER = logging.getLogger(__file__)
-PAVER_TIMER_LOG = os.environ.get('PAVER_TIMER_LOG')
-
-
-@wrapt.decorator
-def timed(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
- """
- Log execution time for a function to a log file.
-
- Logging is only actually executed if the PAVER_TIMER_LOG environment variable
- is set. That variable is expanded for the current user and current
- environment variables. It also can have :meth:`~Datetime.strftime` format
- identifiers which are substituted using the time when the task started.
-
- For example, ``PAVER_TIMER_LOG='~/.paver.logs/%Y-%d-%m.log'`` will create a new
- log file every day containing reconds for paver tasks run that day, and
- will put those log files in the ``.paver.logs`` directory inside the users
- home.
-
- Must be earlier in the decorator stack than the paver task declaration.
- """
- start = datetime.utcnow()
- exception_info = {}
- try:
- return wrapped(*args, **kwargs)
- except Exception as exc:
- exception_info = {
- 'exception': "".join(traceback.format_exception_only(type(exc), exc)).strip()
- }
- raise
- finally:
- end = datetime.utcnow()
-
- # N.B. This is intended to provide a consistent interface and message format
- # across all of Open edX tooling, so it deliberately eschews standard
- # python logging infrastructure.
- if PAVER_TIMER_LOG is not None:
-
- log_path = start.strftime(PAVER_TIMER_LOG)
-
- log_message = {
- 'python_version': sys.version,
- 'task': f"{wrapped.__module__}.{wrapped.__name__}",
- 'args': [repr(arg) for arg in args],
- 'kwargs': {key: repr(value) for key, value in kwargs.items()},
- 'started_at': start.isoformat(' '),
- 'ended_at': end.isoformat(' '),
- 'duration': (end - start).total_seconds(),
- }
- log_message.update(exception_info)
-
- try:
- log_dir = dirname(log_path)
- if log_dir and not exists(log_dir):
- os.makedirs(log_dir)
-
- with open(log_path, 'a') as outfile:
- json.dump(
- log_message,
- outfile,
- separators=(',', ':'),
- sort_keys=True,
- )
- outfile.write('\n')
- except OSError:
- # Squelch OSErrors, because we expect them and they shouldn't
- # interrupt the rest of the process.
- LOGGER.exception("Unable to write timing logs")
diff --git a/requirements/edx/bundled.in b/requirements/edx/bundled.in
index b4685a3a2cbe..fd3257593fe5 100644
--- a/requirements/edx/bundled.in
+++ b/requirements/edx/bundled.in
@@ -25,7 +25,6 @@
# Follow up issue to remove this fork: https://github.com/openedx/edx-platform/issues/33456
https://github.com/overhangio/py2neo/releases/download/2021.2.3/py2neo-2021.2.3.tar.gz
-pygments # Used to support colors in paver command output
# i18n_tool is needed at build time for pulling translations
edx-i18n-tools>=0.4.6 # Commands for developers and translators to extract, compile and validate translations
diff --git a/requirements/edx/paver.in b/requirements/edx/paver.in
deleted file mode 100644
index a4d779722091..000000000000
--- a/requirements/edx/paver.in
+++ /dev/null
@@ -1,18 +0,0 @@
-# Requirements to run and test Paver
-#
-# DON'T JUST ADD NEW DEPENDENCIES!!!
-#
-# If you open a pull request that adds a new dependency, you should:
-# * verify that the dependency has a license compatible with AGPLv3
-# * confirm that it has no system requirements beyond what we already install
-# * run "make upgrade" to update the detailed requirements files
-#
-
--c ../constraints.txt
-
-path # Easier manipulation of filesystem paths
-paver # Build, distribution and deployment scripting tool
-psutil # Library for retrieving information on running processes and system utilization
-python-memcached # Python interface to the memcached memory cache daemon
-pymemcache # Python interface to the memcached memory cache daemon
-wrapt # Decorator utilities used in the @timed paver task decorator
diff --git a/requirements/edx/paver.txt b/requirements/edx/paver.txt
deleted file mode 100644
index 4d7bfc7291d0..000000000000
--- a/requirements/edx/paver.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-# make upgrade
-#
-path==16.11.0
- # via
- # -c requirements/edx/../constraints.txt
- # -r requirements/edx/paver.in
-paver==1.3.4
- # via -r requirements/edx/paver.in
-psutil==5.9.8
- # via -r requirements/edx/paver.in
-pymemcache==4.0.0
- # via -r requirements/edx/paver.in
-python-memcached==1.62
- # via -r requirements/edx/paver.in
-six==1.16.0
- # via paver
-wrapt==1.16.0
- # via -r requirements/edx/paver.in
diff --git a/requirements/edx/testing.in b/requirements/edx/testing.in
index 79d7964607ba..b903768f4de6 100644
--- a/requirements/edx/testing.in
+++ b/requirements/edx/testing.in
@@ -16,7 +16,6 @@
-r base.txt # Core edx-platform production dependencies
-r coverage.txt # Utilities for calculating test coverage
--r paver.txt # Quality and JS testing, for now (https://github.com/openedx/edx-platform/issues/@@TODO)
beautifulsoup4 # Library for extracting data from HTML and XML files
code-annotations # Perform code annotation checking, such as for PII annotations