diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..ed618d0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: unconfirmed +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Run command '...' +3. Scroll down to '...' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**System (please complete the following information):** +- `VOLTTRON Historian Base` version: [e.g. 0.2.1] +- Python version: [e.g. 3.8] +- OS: [Windows/Linux] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..4fe86d5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: feature +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/auto-assign-project.yml b/.github/workflows/auto-assign-project.yml new file mode 100644 index 0000000..8427606 --- /dev/null +++ b/.github/workflows/auto-assign-project.yml @@ -0,0 +1,22 @@ +name: Add bugs to bugs project + +on: + issues: + types: + - opened + +jobs: + add-to-project: + name: Add issue to project + runs-on: ubuntu-latest + steps: + - uses: actions/add-to-project@v0.3.0 + with: + # You can target a repository in a different organization + # to the issue + project-url: https://github.com/orgs/eclipse-volttron/projects/3 + # project-url: https://github.com/orgs//projects/ + # github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} + github-token: ${{ secrets.AUTO_PROJECT_PAT }} + # labeled: bug, needs-triage + # label-operator: OR \ No newline at end of file diff --git a/.github/workflows/code-analysis.yml b/.github/workflows/code-analysis.yml new file mode 100644 index 0000000..e64de06 --- /dev/null +++ b/.github/workflows/code-analysis.yml @@ -0,0 +1,70 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main, develop, releases ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main, develop, releases ] + schedule: + - cron: '34 10 * * 4' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # πŸ“š https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 \ No newline at end of file diff --git a/.github/workflows/deploy-pre-release.yml b/.github/workflows/deploy-pre-release.yml new file mode 100644 index 0000000..f06a4c1 --- /dev/null +++ b/.github/workflows/deploy-pre-release.yml @@ -0,0 +1,126 @@ +name: Deploy Pre-Release Artifacts + +on: + push: + branches: + - develop + +defaults: + run: + shell: bash + +env: + LANG: en_US.utf-8 + LC_ALL: en_US.utf-8 + PYTHON_VERSION: '3.10' + PROJECT_NAME: volttron-postgresql-historian + POETRY_VERSION: '1.2.2' + +jobs: + + deploy-pre-release: + if: github.ref_name != 'main' + runs-on: ubuntu-22.04 + steps: + - run: echo "πŸŽ‰ The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "πŸ”Ž The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + + #---------------------------------------------- + # check-out repo and set-up python + #---------------------------------------------- + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Python ${{ env.PYTHON_VERSION }} + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + #---------------------------------------------- + # ----- install & configure poetry ----- + #---------------------------------------------- + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + version: ${{ env.POETRY_VERSION }} + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + + #---------------------------------------------- + # load cached venv if cache exists + #---------------------------------------------- + - name: Load cached venv + id: cached-poetry-dependencies + uses: actions/cache@v3 + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} + + #---------------------------------------------- + # install dependencies if cache does not exist + #---------------------------------------------- + - name: Install dependencies + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root + + #---------------------------------------------- + # install your root project, if required + #---------------------------------------------- + - name: Install library + run: | + poetry install --no-interaction + + #---------------------------------------------- + # bump version number for patch + #---------------------------------------------- + - name: Bump Version + run: | + # current_tag is the last tagged release in the repository. From there + # we need to remove the v from the beginning of the tag. + if ! $(git tag -l "v*" = ''); then + # uses -V which is version sort to keep it monotonically increasing. + current_tag=$(git tag -l "v*" | grep --invert-match '-' | sort --reverse -V | sed -n 1p) + else + current_tag=v0.1.0 + fi + + current_tag=${current_tag#?} + + # current_tag is now the version we want to set our poetry version so + # that we can bump the version + poetry version ${current_tag} + poetry version prerelease --no-interaction + + NEW_TAG=v$(poetry version --short) + + # Finally because we want to be able to use the variable in later + # steps we set a NEW_TAG environmental variable + echo "NEW_TAG=$(echo ${NEW_TAG})" >> $GITHUB_ENV + + #--------------------------------------------------------------- + # create build artifacts to be included as part of release + #--------------------------------------------------------------- + - name: Create build artifacts + run: | + poetry build -vvv + + - uses: ncipollo/release-action@v1 + with: + artifacts: "dist/*.gz,dist/*.whl" + artifactErrorsFailBuild: true + generateReleaseNotes: true + commit: ${{ github.ref }} + prerelease: true + tag: ${{ env.NEW_TAG }} + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Publish pre-release to pypi + if: github.repository == "eclipse-volttron/${{env.PROJECT_NAME}}" + run: | + poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} + poetry publish diff --git a/.github/workflows/make-release.yml b/.github/workflows/make-release.yml index 4ae4745..96d093d 100644 --- a/.github/workflows/make-release.yml +++ b/.github/workflows/make-release.yml @@ -1,59 +1,292 @@ -name: Make full release - -on: - workflow_dispatch: - inputs: - publish-to-test-pypi: - description: 'Publish to test pypi instead of pypi' - required: false - default: false - type: boolean - bump-rule: - description: 'Rule for computing next release version' - required: false - default: 'prerelease' - type: choice - options: - - patch - - minor - - major - - prepatch - - preminor - - premajor - - prerelease - release-version: - description: 'Version number to use(instead of computing). Should be of the format x.y.z[rcn]. Do not use hyphens.' - required: false - default: '' - type: string - merge-strategy: - description: 'Merge strategy and strategy options. Used only in case of merge conflicts' - required: false - default: '' - type: string - -defaults: - run: - shell: bash - -env: - LANG: en_US.utf-8 - LC_ALL: en_US.utf-8 - PYTHON_VERSION: '3.10' - -jobs: - call-deploy-release: - permissions: - contents: write # To push a branch - pull-requests: write # To create a PR from that branch - - uses: eclipse-volttron/github-tooling/.github/workflows/deploy-release.yml@main - with: - merge-strategy: ${{ inputs.merge-strategy }} - release-version: ${{ inputs.release-version }} - bump-rule: ${{ inputs.bump-rule }} - run-tests-wait: 600 - publish-to-test-pypi: false - secrets: - git-token: ${{ secrets.AUTO_PROJECT_PAT }} - pypi-token: ${{ secrets.PYPI_TOKEN }} \ No newline at end of file +name: Make full release + +on: + workflow_dispatch: + inputs: + publish-to-test-pypi: + description: 'Publish to test pypi instead of pypi' + required: false + default: false + type: boolean + bump-rule: + description: 'Rule for computing next release version' + required: false + default: 'prerelease' + type: choice + options: + - patch + - minor + - major + - prepatch + - preminor + - premajor + - prerelease + release-version: + description: 'Version number to use(instead of computing). Should be of the format x.y.z[rcn]. Do not use hyphens.' + required: false + default: '' + type: string + merge-strategy: + description: 'Merge strategy and strategy options. Used only in case of merge conflicts' + required: false + default: '' + type: string + +defaults: + run: + shell: bash + +env: + LANG: en_US.utf-8 + LC_ALL: en_US.utf-8 + PYTHON_VERSION: '3.10' + PGPASSWORD: 'postgres' + +jobs: + deploy-release: + runs-on: ubuntu-22.04 + + permissions: + contents: write # To push a branch + pull-requests: write # To create a PR from that branch + + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 + + steps: + - run: echo "πŸŽ‰ The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "πŸ”Ž The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + + #---------------------------------------------- + # check-out repo and set-up python + #---------------------------------------------- + - name: Checkout code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: develop + token: ${{ secrets.AUTO_PROJECT_PAT }} + + - name: Do a git merge dry run + id: merge-dry-run + run: | + git config --global user.email "deploy-release-action@pnl.gov" + git config --global user.name "Deploy Release Github Action" + git checkout main + git merge --no-commit --no-ff develop + continue-on-error: true + + - name: Abort merge dry-run + run: | + git merge --abort + + - name: Check if merge had conflicts. + # if there is conflict and there is no merge strategy set then abort merge and exit + if: steps.merge-dry-run.outcome != 'success' && github.event.inputs.merge-strategy == '' + run: | + echo "merge strategy is ${{ inputs.merge-strategy }}" + echo "Merge to main has conflicts. Either do a manual merge and release or set input merge-strategy and re-run action" + exit 1 + + - name: Recheckout develop + run: | + git checkout develop + + #------------------ + # setup database + #---------------- + - run: | + psql -h localhost -U postgres -c "CREATE DATABASE test_historian;" + psql -h localhost -U postgres -c "CREATE USER historian with encrypted password 'historian';" + psql -h localhost -U postgres -c "GRANT ALL PRIVILEGES on database test_historian to historian;" + + - name: Set up Python ${{ env.PYTHON_VERSION }} + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: ${{ env.PYTHON_VERSION }} + + #---------------------------------------------- + # ----- install & configure poetry ----- + #---------------------------------------------- + - name: Install Poetry + uses: snok/install-poetry@v1.3.3 + with: + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + + + #---------------------------------------------- + # install your root project, if required + #---------------------------------------------- + - name: Install library + run: | + poetry lock --no-update + poetry install --no-interaction + + - name: Use given release-version number + if: inputs.release-version != '' + run: | + echo "Using given release version is ${{ inputs.release-version }}" + poetry version ${{ inputs.release-version }} + + NEW_TAG=v$(poetry version --short) + + # we want to be able to use the variable in later + # steps we set a NEW_TAG environmental variable + echo "NEW_TAG=$(echo ${NEW_TAG})" >> $GITHUB_ENV + # we don't want to update pyproject.toml yet. don't want this change to create merge conflict. + # we don't really persist right version in pyproject.toml to figure out the next version. we use git tags. + git restore pyproject.toml + + #---------------------------------------------- + # bump version number for patch + #---------------------------------------------- + - name: Bump Version + if: inputs.release-version == '' + run: | + # current_tag is the last tagged release in the repository. From there + # we need to remove the v from the beginning of the tag. + echo "Bump rule is ${{ inputs.bump-rule }}" + echo "Given release version is ${{ inputs.release-version }}" + if ! $(git tag -l "v*" = ''); then + # uses -V which is version sort to keep it monotonically increasing. + current_tag=$(git tag -l "v*" | grep --invert-match '-' | sort --reverse -V | sed -n 1p) + echo "current git tag is ${current_tag}" + current_tag=${current_tag#?} + # current_tag is now the version we want to set our poetry version so + # that we can bump the version + poetry version ${current_tag} + poetry version ${{ inputs.bump-rule }} --no-interaction + + else + # very first release. start with inputs.release-version + echo "First release. Setting tag as 0.1.0rc0" + current_tag='0.1.0rc0' + poetry version ${current_tag} + fi + + NEW_TAG=v$(poetry version --short) + + # Finally because we want to be able to use the variable in later + # steps we set a NEW_TAG environmental variable + echo "NEW_TAG=$(echo ${NEW_TAG})" >> $GITHUB_ENV + # we don't want to update pyproject.toml yet. don't want this change to create merge conflict. + # we don't really persist right version in pyproject.toml to figure out the next version. we use git tags. + git restore pyproject.toml + + + #-------------------------------------------------------------- + # Create a new releases/new_tag + #-------------------------------------------------------------- + - name: Create a new releases branch + run: | + git checkout -b releases/${NEW_TAG} + git push --set-upstream origin releases/${NEW_TAG} + + #-------------------------------------------------------------- + # merge changes back to main + #-------------------------------------------------------------- + - name: Merge changes back to main + run: | + git checkout main + git merge ${{ inputs.merge-strategy }} releases/${NEW_TAG} + git push + + - name: Run tests on main branch + id: run-tests-on-main + run: | + if [[ -d tests ]]; then + poetry add pytest-timeout --group dev + poetry install --no-interaction + poetry run pytest --timeout=600 tests + fi + continue-on-error: true + + - name: Do something with a failing build + if: steps.run-tests-on-main.outcome != 'success' + run: | + echo "tests on main did not succeed. Outcome is ${{ steps.run-tests-on-main.outcome }}" + git reset --hard HEAD~1 + git push origin HEAD --force + git branch -d releases/${NEW_TAG} + git push origin --delete releases/${NEW_TAG} + echo "reverted changes to main and removed release branch" + exit 1 + + - name: Create build artifacts + run: | + # set the right version in pyproject.toml before build and publish + poetry version ${NEW_TAG#?} + poetry build -vvv + + - uses: ncipollo/release-action@v1 + with: + artifacts: "dist/*.gz,dist/*.whl" + artifactErrorsFailBuild: true + generateReleaseNotes: true + commit: ${{ github.ref }} + # check bump-rule and set accordingly + prerelease: ${{ inputs.bump-rule == 'prerelease' }} + tag: ${{ env.NEW_TAG }} + token: ${{ secrets.AUTO_PROJECT_PAT }} + + - name: Publish to pypi + id: publish-to-pypi + if: ${{ ! inputs.publish-to-test-pypi }} + run: | + echo "POETRY_PUBLISH_OPTIONS=''" >> $GITHUB_ENV + poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} + poetry publish + continue-on-error: true + + - name: Publish to test-pypi + id: publish-to-test-pypi + if: ${{ inputs.publish-to-test-pypi }} + run: | + poetry config repositories.test-pypi https://test.pypi.org/legacy/ + poetry config pypi-token.test-pypi ${{ secrets.TEST_PYPI_TOKEN }} + poetry publish -r test-pypi + continue-on-error: true + + - name: if publish to pypi/test-pypi failed revert main and delete release branch + if: ${{ steps.publish-to-pypi.outcome != 'success' && steps.publish-to-test-pypi.outcome != 'success' }} + run: | + echo "publish to pypi/test-pypi did not succeed. Outcome for pypi = ${{ steps.publish-to-pypi.outcome }} outcome for test-pypi= ${{ steps.publish-to-test-pypi.outcome }}" + git reset --hard HEAD~1 + git push origin HEAD --force + git branch -d releases/${NEW_TAG} + git push origin --delete releases/${NEW_TAG} + echo "reverted changes to main and removed release branch" + + - name: if publish to pypi/test-pypi failed delete release and tag on github + if: ${{ ! (steps.publish-to-pypi.outcome == 'success' || steps.publish-to-test-pypi.outcome == 'success') }} + uses: dev-drprasad/delete-tag-and-release@v0.2.1 + env: + GITHUB_TOKEN: ${{ secrets.AUTO_PROJECT_PAT }} + with: + tag_name: ${{ env.NEW_TAG }} + + - name: if publish to pypi/test-pypi failed exit with exit code 1 + if: ${{ steps.publish-to-pypi.outcome != 'success' && steps.publish-to-test-pypi.outcome != 'success' }} + run: | + exit 1 + #-------------------------------------------------------------- + # merge changes back to develop + #-------------------------------------------------------------- + - name: Merge changes back to develop + run: | + git checkout develop + git merge develop main + git push \ No newline at end of file diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml new file mode 100644 index 0000000..9b56959 --- /dev/null +++ b/.github/workflows/run-tests.yml @@ -0,0 +1,96 @@ +name: Run Pytests + +on: + push: + paths: + - '**.py' + - '**.yaml' + - '**.yml' + - '**.toml' + pull_request: + types: + - opened + - synchronize + - reopened + paths: + - '**.py' + - '**.yaml' + - '**.yml' + - '**.toml' + +defaults: + run: + shell: bash + +env: + LANG: en_US.utf-8 + LC_ALL: en_US.utf-8 + PYTHON_VERSION: '3.10' + PROJECT_NAME: volttron-postgresql-historian + PGPASSWORD: 'postgres' + +jobs: + + run-tests: + strategy: + matrix: + os: [ "ubuntu-22.04" ] + python: [ "3.10", "3.11" ] + + runs-on: ${{ matrix.os }} + + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 + + steps: + - run: echo "πŸŽ‰ The job was automatically triggered by a ${{ github.event_name }} event." + - run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by GitHub!" + - run: echo "πŸ”Ž The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." + + #---------------------------------------------- + # check-out repo and set-up python + #---------------------------------------------- + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python }} + id: setup-python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + #---------------------------------------------- + # ----- install & configure poetry ----- + #---------------------------------------------- + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + installer-parallel: true + + #------------------ + # setup database + #---------------- + - run: | + psql -h localhost -U postgres -c "CREATE DATABASE test_historian;" + psql -h localhost -U postgres -c "CREATE USER historian with encrypted password 'historian';" + psql -h localhost -U postgres -c "GRANT ALL PRIVILEGES on database test_historian to historian;" + + #---------------------------------------------- + # install your root project, and run tests. + #---------------------------------------------- + - name: Install library and run tests + run: | + poetry install --no-interaction + poetry add --group dev pytest-github-actions-annotate-failures + poetry run pytest tests/test_postgresql_historian_integration.py diff --git a/README.md b/README.md old mode 100644 new mode 100755 index f1f9136..b55b39b --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ +[![Eclipse VOLTTRONβ„’](https://img.shields.io/badge/Eclips%20VOLTTRON--red.svg)](https://volttron.readthedocs.io/en/latest/) +![Python 3.10](https://img.shields.io/badge/python-3.10-blue.svg) +![Python 3.11](https://img.shields.io/badge/python-3.11-blue.svg) [![Run Pytests](https://github.com/eclipse-volttron/volttron-postgresql-historian/actions/workflows/run-test.yml/badge.svg)](https://github.com/eclipse-volttron/volttron-postgresql-historian/actions/workflows/run-test.yml) [![pypi version](https://img.shields.io/pypi/v/volttron-postgresql-historian.svg)](https://pypi.org/project/volttron-postgresql-historian/) +![Passing?](https://github.com/VOLTTRON/volttron-postgresql-historian/actions/workflows/run-tests.yml/badge.svg) VOLTTRON historian agent that stores data into a PostgreSQL database @@ -22,15 +26,12 @@ VOLTTRON historian agent that stores data into a PostgreSQL database PostgreSQL historian supports two configuration parameters - - connection - This is a mandatory parameter with type indicating the type of sql historian (i.e. postgresql) and - params containing the database access details - - - tables_def - Optional parameter to provide custom table names for topics, data, and metadata. + - connection - This is a mandatory parameter with type indicating the type of sql historian (i.e. postgresql) and params containing the database access details + - tables_def - Optional parameter to provide custom table names for topics, data, and metadata. The configuration can be in a json or yaml formatted file. The following examples show minimal connection -configurations for a psycopg2-based historian. Other options are available and are -[documented here](https://www.psycopg.org/docs/module.html#psycopg2.connect) **Not all parameters have been tested, -use at your own risk**. +configurations for a psycopg2-based historian. Other options are available and are [documented here](https://www.psycopg.org/docs/module.html#psycopg2.connect) +**Not all parameters have been tested, use at your own risk**. #### Local PostgreSQL Database @@ -109,7 +110,7 @@ add \'timescale_dialect: true\' to the connection params in the agent config as ## Requirements - - Python >= 3.8 + - Python >= 3.10 - psycopg2 library ## Installation @@ -136,14 +137,14 @@ add \'timescale_dialect: true\' to the connection params in the agent config as a user with appropriate permissions. This way the database user used by the historian need not have CREATE privileges Postgres historian expects two tables a. A topics tables that stores the list of unique topics and its metadata. The default name is "topics". If you use - a different name please specify it as part of "tables_def" configuration parameter in agent config. See (#Yaml-Format) + a different name please specify it as part of "tables_def" configuration parameter in agent config. See [example configuration](#yaml-format) b. A data table that stores the timeseries data and refers to the topic table using a topic id. The default name is "data". If you use a different name please specify it as part of "tables_def" configuration parameter in - agent config. See (#Yaml-Format) + agent config. See [example configuration](#yaml-format) Below are the sql statements to create database and tables Create Database - ``` + ``` CREATE DATABASE volttron ``` TOPICS tables: @@ -155,8 +156,7 @@ add \'timescale_dialect: true\' to the connection params in the agent config as UNIQUE (topic_name) ) ``` - - DATA table: + DATA table: ``` CREATE TABLE IF NOT EXISTS data ( ts TIMESTAMP NOT NULL, @@ -165,16 +165,16 @@ add \'timescale_dialect: true\' to the connection params in the agent config as UNIQUE (topic_id, ts) ) ``` - Optional timescale hypertable + Optional timescale hypertable ``` SELECT create_hypertable(data, 'ts', if_not_exists => true) ``` - Create index to speed up data access + Create index to speed up data access If using hypertables: ``` CREATE INDEX IF NOT EXISTS idx_data ON data (topic_id, ts) ``` - If not using hypertables: + If not using hypertables: ``` CREATE INDEX IF NOT EXISTS idx_data ON data (ts ASC) ``` @@ -190,7 +190,7 @@ add \'timescale_dialect: true\' to the connection params in the agent config as 4. Create an agent configuration file - Create an agent configuration with appropriate connection parameters as described in [Configurations section](#Configuration) + Create an agent configuration with appropriate connection parameters as described in the [Configurations section](#Configuration) 5. Install and start the volttron-postgresql-historian. diff --git a/dev/pyproject.toml b/dev/pyproject.toml deleted file mode 100644 index f045d72..0000000 --- a/dev/pyproject.toml +++ /dev/null @@ -1,55 +0,0 @@ -[tool.poetry] -name = "volttron-postgresql-historian" -version = "0.1.0-rc0" -description = "VOLTTRON historian agent that stores data in a PostgreSQL database. It extends the SQLHistorian class." -authors = ["VOLTTRON Team "] -license = "Apache License 2.0" -readme = "../README.md" -repository = "https://github.com/eclipse-volttron/volttron-postgresql-historian" -homepage = "https://github.com/eclipse-volttron/volttron-postgresql-historian" -keywords = [] -packages = [ { include = "historian", from = "../src" } ] - -[tool.poetry.dependencies] -python = ">=3.10,<4.0" -psycopg2-binary = "^2.9.5" - -[tool.poetry.group.dev.dependencies] -# formatting, quality, tests -pytest = "^6.2.5" -mock = "^4.0.3" -pre-commit = "^2.17.0" -yapf = "^0.32.0" -toml = "^0.10.2" -isort = "^5.10.1" -safety = "^1.10.3" -mypy = "^0.942" -coverage = "^6.3.2" -pytest-cov = "^3.0.0" -Sphinx = "^4.5.0" -sphinx-rtd-theme = "^1.0.0" -volttron = {path = "../../volttron-core", develop = true} -volttron-testing = {path = "../../volttron-testing", develop = true} -volttron-lib-base-historian = {path = "../../volttron-lib-base-historian", develop = true} -volttron-lib-sql-historian = {path = "../../volttron-lib-sql-historian", develop = true} - -[tool.yapfignore] -ignore_patterns = [ - ".venv/**", - ".pytest_cache/**", - "dist/**", - "docs/**" -] - -[tool.yapf] -based_on_style = "pep8" -spaces_before_comment = 4 -column_limit = 99 -split_before_logical_operator = true - -[tool.poetry.scripts] -volttron-postgresql-historian = "historian.sql.historian:main" - -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" diff --git a/pyproject.toml b/pyproject.toml index 7864eb7..1e42351 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "volttron-postgresql-historian" -version = "0.1.0-rc0" +version = "0.1.0" description = "VOLTTRON historian agent that stores data in a PostgreSQL database. It extends the SQLHistorian class." authors = ["VOLTTRON Team "] license = "Apache License 2.0" @@ -12,7 +12,7 @@ packages = [ { include = "historian", from = "src" } ] [tool.poetry.dependencies] python = ">=3.10,<4.0" -volttron-lib-sql-historian="^0.2.0rc0" +volttron-lib-sql-historian="^0.1.1a8" psycopg2-binary = "^2.9.5" [tool.poetry.group.dev.dependencies] @@ -27,9 +27,9 @@ safety = "^1.10.3" mypy = "^0.942" coverage = "^6.3.2" pytest-cov = "^3.0.0" -Sphinx = "^4.5.0" -sphinx-rtd-theme = "^1.0.0" -volttron-testing = "^0.4.0rc1" +Sphinx = "^6.0.0" +sphinx-rtd-theme = "^1.2.0" +volttron-testing = "^0.4.0rc3" [tool.yapfignore] ignore_patterns = [ @@ -49,5 +49,5 @@ split_before_logical_operator = true volttron-postgresql-historian = "historian.sql.historian:main" [build-system] -requires = ["poetry-core>=1.0.0"] +requires = ["poetry-core>=1.2.0"] build-backend = "poetry.core.masonry.api" diff --git a/src/historian/postgresql/postgresqlfuncts.py b/src/historian/postgresql/postgresqlfuncts.py index c4b61c3..5e8ba9e 100644 --- a/src/historian/postgresql/postgresqlfuncts.py +++ b/src/historian/postgresql/postgresqlfuncts.py @@ -51,18 +51,25 @@ class PostgreSqlFuncts(DbDriver): def __init__(self, connect_params, table_names): - if table_names: - self.data_table = table_names['data_table'] - self.topics_table = table_names['topics_table'] - self.meta_table = table_names['meta_table'] - self.agg_topics_table = table_names.get('agg_topics_table') - self.agg_meta_table = table_names.get('agg_meta_table') - connect_params = copy.deepcopy(connect_params) - if "timescale_dialect" in connect_params: - self.timescale_dialect = connect_params.get("timescale_dialect", False) - del connect_params["timescale_dialect"] - else: - self.timescale_dialect = False + try: + self.db_name = connect_params.get('dbname') + if not self.db_name: + raise ValueError("Connection parameter does not include the parameter 'db_name'") + if table_names: + self.data_table = table_names['data_table'] + self.topics_table = table_names['topics_table'] + self.meta_table = table_names['meta_table'] + self.agg_topics_table = table_names.get('agg_topics_table') + self.agg_meta_table = table_names.get('agg_meta_table') + connect_params = copy.deepcopy(connect_params) + if "timescale_dialect" in connect_params: + self.timescale_dialect = connect_params.get("timescale_dialect", False) + del connect_params["timescale_dialect"] + else: + self.timescale_dialect = False + _log.debug(f"init of postgres functs. connection params is {connect_params}") + except BaseException as e: + _log.error(f"Exception in init of postgres with {connect_params} : {e}") def connect(): connection = psycopg2.connect(**connect_params) @@ -154,7 +161,7 @@ def rollback(self): def setup_historian_tables(self): rows = self.select(f"""SELECT table_name FROM information_schema.tables - WHERE table_catalog = 'test_historian' and table_schema = 'public' + WHERE table_catalog = '{self.db_name}' and table_schema = 'public' AND table_name = '{self.data_table}'""") if rows: _log.debug("Found table {}. Historian table exists".format( @@ -165,6 +172,7 @@ def setup_historian_tables(self): # metadata is in topics table self.meta_table = self.topics_table else: + _log.debug("Creating topic and data tables") self.execute_stmt(SQL( 'CREATE TABLE IF NOT EXISTS {} (' 'ts TIMESTAMP NOT NULL, ' @@ -219,51 +227,55 @@ def setup_aggregate_historian_tables(self): def query(self, topic_ids, id_name_map, start=None, end=None, skip=0, agg_type=None, agg_period=None, count=None, order='FIRST_TO_LAST'): - if agg_type and agg_period: - table_name = agg_type + '_' + agg_period - value_col = 'agg_value' - else: - table_name = self.data_table - value_col = 'value_string' - - topic_id = Literal(0) - query = [SQL( - '''SELECT to_char(ts, 'YYYY-MM-DD"T"HH24:MI:SS.USOF:00'), ''' + value_col + ' \n' - 'FROM {}\n' - 'WHERE topic_id = {}' - ).format(Identifier(table_name), topic_id)] - if start and start.tzinfo != pytz.UTC: - start = start.astimezone(pytz.UTC) - if end and end.tzinfo != pytz.UTC: - end = end.astimezone(pytz.UTC) - if start and start == end: - query.append(SQL(' AND ts = {}').format(Literal(start))) - else: - if start: - query.append(SQL(' AND ts >= {}').format(Literal(start))) - if end: - query.append(SQL(' AND ts < {}').format(Literal(end))) - query.append(SQL('ORDER BY ts {}'.format( - 'DESC' if order == 'LAST_TO_FIRST' else 'ASC'))) - if skip or count: - query.append(SQL('LIMIT {} OFFSET {}').format( - Literal(None if not count or count < 0 else count), - Literal(None if not skip or skip < 0 else skip))) - query = SQL('\n').join(query) - values = {} - if value_col == 'agg_value': - for topic_id._wrapped in topic_ids: - name = id_name_map[topic_id.wrapped] - with self.select(query, fetch_all=False) as cursor: - values[name] = [(ts, value) - for ts, value in cursor] - else: - for topic_id._wrapped in topic_ids: - name = id_name_map[topic_id.wrapped] - with self.select(query, fetch_all=False) as cursor: - values[name] = [(ts, jsonapi.loads(value)) - for ts, value in cursor] - return values + try: + if agg_type and agg_period: + table_name = agg_type + '_' + agg_period + value_col = 'agg_value' + else: + table_name = self.data_table + value_col = 'value_string' + + topic_id = Literal(0) + query = [SQL( + '''SELECT to_char(ts, 'YYYY-MM-DD"T"HH24:MI:SS.USOF:00'), ''' + value_col + ' \n' + 'FROM {}\n' + 'WHERE topic_id = {}' + ).format(Identifier(table_name), topic_id)] + if start and start.tzinfo != pytz.UTC: + start = start.astimezone(pytz.UTC) + if end and end.tzinfo != pytz.UTC: + end = end.astimezone(pytz.UTC) + if start and start == end: + query.append(SQL(' AND ts = {}').format(Literal(start))) + else: + if start: + query.append(SQL(' AND ts >= {}').format(Literal(start))) + if end: + query.append(SQL(' AND ts < {}').format(Literal(end))) + query.append(SQL('ORDER BY ts {}'.format( + 'DESC' if order == 'LAST_TO_FIRST' else 'ASC'))) + if skip or count: + query.append(SQL('LIMIT {} OFFSET {}').format( + Literal(None if not count or count < 0 else count), + Literal(None if not skip or skip < 0 else skip))) + query = SQL('\n').join(query) + values = {} + if value_col == 'agg_value': + for topic_id._wrapped in topic_ids: + name = id_name_map[topic_id.wrapped] + with self.select(query, fetch_all=False) as cursor: + values[name] = [(ts, value) + for ts, value in cursor] + else: + for topic_id._wrapped in topic_ids: + name = id_name_map[topic_id.wrapped] + with self.select(query, fetch_all=False) as cursor: + values[name] = [(ts, jsonapi.loads(value)) + for ts, value in cursor] + _log.debug(f"Returning values: {values}") + return values + except BaseException as e: + _log.error(f"Got exception while query {e}") def insert_topic(self, topic, **kwargs): meta = kwargs.get('metadata') diff --git a/tests/conftest.py b/tests/conftest.py index 7e07da1..2f9938c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,11 +22,9 @@ # ===----------------------------------------------------------------------=== # }}} -import os + from pathlib import Path -import shutil import sys -import tempfile import pytest from volttrontesting.fixtures.volttron_platform_fixtures import volttron_instance diff --git a/tests/test_postgresql_historian_integration.py b/tests/test_postgresql_historian_integration.py new file mode 100644 index 0000000..1de149f --- /dev/null +++ b/tests/test_postgresql_historian_integration.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- {{{ +# ===----------------------------------------------------------------------=== +# +# Installable Component of Eclipse VOLTTRON +# +# ===----------------------------------------------------------------------=== +# +# Copyright 2022 Battelle Memorial Institute +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# ===----------------------------------------------------------------------=== +# }}} +import os + +import gevent +import pytest +from pathlib import Path +try: + import psycopg2 + from psycopg2.sql import SQL, Identifier +except ImportError: + pytest.skip( + "Required imports for testing are not installed; Run pip install psycopg2-binary before running tests", + allow_module_level=True, + ) + +from historian.testing.integration_test_interface import HistorianTestInterface + + +class TestPostgresqlIntegration(HistorianTestInterface): + + @pytest.fixture(scope="module") + def historian(self, volttron_instance): + user = 'historian' + password = 'historian' + if os.environ['GITHUB_ACTIONS']: + user = 'postgres' + password = 'postgres' + historian_config = { + "connection": { + "type": "postgresql", + "params": { + 'dbname': 'test_historian', + 'port': 5432, + 'host': 'localhost', + 'user': user, + 'password': password + } + } + } + table_names = { + "table_prefix": "", + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta" + } + + historian_version = ">=4.0.0" + self.setup_db(historian_config["connection"]["params"], table_names, historian_version) + agent_path = Path(__file__).parents[1] + historian_uuid = volttron_instance.install_agent( + vip_identity='platform.historian', + agent_dir=agent_path, + config_file=historian_config, + start=True) + print("agent id: ", historian_uuid) + gevent.sleep(1) + yield "platform.historian", 6 + if volttron_instance.is_running() and volttron_instance.is_agent_running(historian_uuid): + volttron_instance.stop_agent(historian_uuid) + volttron_instance.remove_agent(historian_uuid) + gevent.sleep(5) + + def setup_db(self, connection_params, table_names, historian_version): + self.db_connection = psycopg2.connect(**connection_params) + self.db_connection.autocommit = True + try: + self.cleanup_tables(table_names.values(), drop_tables=True) + except Exception as exc: + print('Error truncating existing tables: {}'.format(exc)) + if historian_version == "<4.0.0": + # test for backward compatibility + # explicitly create tables based on old schema - i.e separate topics and meta table - so that historian + # does not create tables with new schema on startup + print("Setting up for version <4.0.0") + cursor = self.db_connection.cursor() + cursor.execute(SQL( + 'CREATE TABLE IF NOT EXISTS {} (' + 'ts TIMESTAMP NOT NULL, ' + 'topic_id INTEGER NOT NULL, ' + 'value_string TEXT NOT NULL, ' + 'UNIQUE (topic_id, ts)' + ')').format(Identifier(table_names['data_table']))) + cursor.execute(SQL( + 'CREATE INDEX IF NOT EXISTS {} ON {} (ts ASC)').format( + Identifier('idx_' + table_names['data_table']), + Identifier(table_names['data_table']))) + cursor.execute(SQL( + 'CREATE TABLE IF NOT EXISTS {} (' + 'topic_id SERIAL PRIMARY KEY NOT NULL, ' + 'topic_name VARCHAR(512) NOT NULL, ' + 'UNIQUE (topic_name)' + ')').format(Identifier(table_names['topics_table']))) + cursor.execute(SQL( + 'CREATE TABLE IF NOT EXISTS {} (' + 'topic_id INTEGER PRIMARY KEY NOT NULL, ' + 'metadata TEXT NOT NULL' + ')').format(Identifier(table_names['meta_table']))) + self.db_connection.commit() + gevent.sleep(5) + + def cleanup_tables(self, truncate_tables, drop_tables=False): + cursor = self.db_connection.cursor() + if truncate_tables is None: + truncate_tables = self.select_all_postgresql_tables() + + if drop_tables: + for table in truncate_tables: + if table: + cursor.execute(SQL('DROP TABLE IF EXISTS {}').format( + Identifier(table))) + else: + for table in truncate_tables: + if table: + cursor.execute(psycopg2.SQL('TRUNCATE TABLE {}').format( + Identifier(table))) + + self.db_connection.commit() + cursor.close() + + def select_all_postgresql_tables(self): + cursor = self.db_connection.cursor() + tables = [] + try: + cursor.execute(f"""SELECT table_name FROM information_schema.tables + WHERE table_catalog = 'test_historian' and table_schema = 'public'""") + rows = cursor.fetchall() + print(f"table names {rows}") + tables = [columns[0] for columns in rows] + except Exception as e: + print("Error getting list of {}".format(e)) + finally: + if cursor: + cursor.close() + return tables diff --git a/tests/test_postgresqlfuncts_unit.py b/tests/test_postgresqlfuncts_unit.py new file mode 100644 index 0000000..d44e961 --- /dev/null +++ b/tests/test_postgresqlfuncts_unit.py @@ -0,0 +1,786 @@ +# -*- coding: utf-8 -*- {{{ +# ===----------------------------------------------------------------------=== +# +# Installable Component of Eclipse VOLTTRON +# +# ===----------------------------------------------------------------------=== +# +# Copyright 2022 Battelle Memorial Institute +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# ===----------------------------------------------------------------------=== +# }}} + +import datetime + +import itertools +import os +import logging +import pytest +from time import time + + +try: + import psycopg2 + from psycopg2.sql import SQL, Identifier +except ImportError: + pytest.skip( + "Required imports for testing are not installed; Run pip install psycopg2-binary before running tests", + allow_module_level=True, + ) +from volttron.utils import jsonapi +from historian.postgresql.postgresqlfuncts import PostgreSqlFuncts +from volttrontesting.fixtures.docker_wrapper import create_container +from volttrontesting.utils import get_rand_port + +IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" +if IN_GITHUB_ACTIONS: + pytest.skip("skipping unite tests on github. ", allow_module_level=True) + + +logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO) +pytestmark = [pytest.mark.postgresqlfuncts, pytest.mark.dbutils, pytest.mark.unit] + + +IMAGES = ["postgres:13"] +if "CI" in os.environ: + IMAGES.extend( + ["postgres:12", "postgres:11"] + ) + +ALLOW_CONNECTION_TIME = 10 +CONNECTION_HOST = "localhost" +TEST_DATABASE = "test_historian" +ROOT_USER = "postgres" +ROOT_PASSWORD = "password" +ENV_POSTGRESQL = { + "POSTGRES_USER": ROOT_USER, # defining user not necessary but added to be explicit + "POSTGRES_PASSWORD": ROOT_PASSWORD, + "POSTGRES_DB": TEST_DATABASE, +} +DATA_TABLE = "data" +TOPICS_TABLE = "topics" +META_TABLE = "meta" +AGG_TOPICS_TABLE = "aggregate_topics" +AGG_META_TABLE = "aggregate_meta" + + +def test_insert_meta_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + if historian_version != "<4.0.0": + pytest.skip("insert_meta() is called by historian only for schema <4.0.0") + topic_id = "44" + metadata = "foobar44" + expected_data = (44, '"foobar44"') + + res = sqlfuncts.insert_meta(topic_id, metadata) + + assert res is True + assert get_data_in_table(connection_port, "meta")[0] == expected_data + + +def test_update_meta_should_succeed(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + metadata = {"units": "count"} + metadata_s = jsonapi.dumps(metadata) + topic = "foobar" + + id = sqlfuncts.insert_topic(topic) + sqlfuncts.insert_meta(id, {"fdjlj": "XXXX"}) + assert metadata_s not in get_data_in_table(connection_port, TOPICS_TABLE)[0] + + res = sqlfuncts.update_meta(id, metadata) + + expected_lt_4 = [(1, metadata_s)] + expected_gteq_4 = [(1, topic, metadata_s)] + assert res is True + if historian_version == "<4.0.0": + assert get_data_in_table(connection_port, META_TABLE) == expected_lt_4 + else: + assert get_data_in_table(connection_port, TOPICS_TABLE) == expected_gteq_4 + + +def test_setup_historian_tables_should_create_tables(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + # get_container initializes db and sqlfuncts + # to test setup explicitly drop tables and see if tables get created correctly + drop_all_tables(connection_port) + + tables_before_setup = get_tables(connection_port) + assert tables_before_setup == set() + expected_tables = set(["data", "topics"]) + sqlfuncts.setup_historian_tables() + actual_tables = get_tables(connection_port) + assert actual_tables == expected_tables + + +def test_setup_aggregate_historian_tables_should_create_aggregate_tables(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + # get_container initializes db and sqlfuncts to test setup explicitly drop tables and see if tables get created + drop_all_tables(connection_port) + create_historian_tables(container, historian_version) + agg_topic_table = "aggregate_topics" + agg_meta_table = "aggregate_meta" + + original_tables = get_tables(connection_port) + assert agg_topic_table not in original_tables + assert agg_meta_table not in original_tables + + expected_agg_topic_fields = { + "agg_topic_id", + "agg_topic_name", + "agg_time_period", + "agg_type", + } + expected_agg_meta_fields = {"agg_topic_id", "metadata"} + + sqlfuncts.setup_aggregate_historian_tables() + + updated_tables = get_tables(connection_port) + assert agg_topic_table in updated_tables + assert agg_meta_table in updated_tables + assert ( + describe_table(connection_port, agg_topic_table) + == expected_agg_topic_fields + ) + assert ( + describe_table(connection_port, agg_meta_table) == expected_agg_meta_fields + ) + assert sqlfuncts.agg_topics_table == agg_topic_table + assert sqlfuncts.agg_meta_table == agg_meta_table + assert sqlfuncts.data_table == DATA_TABLE + assert sqlfuncts.topics_table == TOPICS_TABLE + if sqlfuncts.meta_table != TOPICS_TABLE: + assert sqlfuncts.meta_table == META_TABLE + + +@pytest.mark.parametrize( + "topic_ids, id_name_map, expected_values", + [ + ([42], {42: "topic42"}, {"topic42": []}), + ( + [43], + {43: "topic43"}, + {"topic43": [("2020-06-01T12:30:59.000000+00:00", [2, 3])]}, + ), + ], +) +def test_query_should_return_data(get_container_func, topic_ids, id_name_map, expected_values): + container, sqlfuncts, connection_port, historian_version = get_container_func + + query = f""" + INSERT INTO {DATA_TABLE} VALUES ('2020-06-01 12:30:59', 43, '[2,3]') + """ + seed_database(container, query) + actual_values = sqlfuncts.query(topic_ids, id_name_map) + assert actual_values == expected_values + + +def test_insert_topic_should_return_topic_id(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "football" + expected_topic_id = 1 + actual_topic_id = sqlfuncts.insert_topic(topic) + assert actual_topic_id == expected_topic_id + + +def test_insert_topic_and_meta_query_should_succeed(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + if historian_version == "<4.0.0": + pytest.skip("Not relevant for historian schema before 4.0.0") + topic = "football" + metadata = {"units": "count"} + actual_id = sqlfuncts.insert_topic(topic, metadata=metadata) + + assert isinstance(actual_id, int) + result = get_data_in_table(connection_port, "topics")[0] + assert (actual_id, topic) == result[0:2] + assert metadata == jsonapi.loads(result[2]) + + +def test_insert_agg_topic_should_return_agg_topic_id(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + expected_data = (1, "some_agg_topic", "AVG", "2019") + + actual_id = sqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(connection_port, AGG_TOPICS_TABLE)[0] == expected_data + + +def test_insert_data_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + ts = "2001-09-11 08:46:00" + topic_id = "11" + data = "1wtc" + expected_data = [(datetime.datetime(2001, 9, 11, 8, 46), 11, '"1wtc"')] + + res = sqlfuncts.insert_data(ts, topic_id, data) + + assert res is True + assert get_data_in_table(connection_port, "data") == expected_data + + +def test_update_topic_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "football" + actual_id = sqlfuncts.insert_topic(topic) + assert isinstance(actual_id, int) + + result = sqlfuncts.update_topic("soccer", actual_id) + assert result is True + assert (actual_id, "soccer") == get_data_in_table(connection_port, "topics")[0][0:2] + + +def test_update_topic_and_metadata_should_succeed(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + if historian_version == "<4.0.0": + pytest.skip("Not relevant for historian schema before 4.0.0") + topic = "football" + actual_id = sqlfuncts.insert_topic(topic) + + assert isinstance(actual_id, int) + + result = sqlfuncts.update_topic("soccer", actual_id, metadata={"test": "test value"}) + + assert result is True + assert (actual_id, "soccer", '{"test": "test value"}') == get_data_in_table(connection_port, "topics")[0] + + + +def test_get_aggregation_list_should_return_list(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + expected_list = [ + "AVG", + "MIN", + "MAX", + "COUNT", + "SUM", + "BIT_AND", + "BIT_OR", + "BOOL_AND", + "BOOL_OR", + "MEDIAN", + "STDDEV", + "STDDEV_POP", + "STDDEV_SAMP", + "VAR_POP", + "VAR_SAMP", + "VARIANCE", + ] + + assert sqlfuncts.get_aggregation_list() == expected_list + + +def test_insert_agg_topic_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + expected_data = (1, "some_agg_topic", "AVG", "2019") + + actual_id = sqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(connection_port, AGG_TOPICS_TABLE)[0] == expected_data + + +def test_update_agg_topic_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "cars" + agg_type = "SUM" + agg_time_period = "2100ZULU" + expected_data = (1, "cars", "SUM", "2100ZULU") + + actual_id = sqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(connection_port, AGG_TOPICS_TABLE)[0] == expected_data + + new_agg_topic_name = "boats" + expected_data = (1, "boats", "SUM", "2100ZULU") + + result = sqlfuncts.update_agg_topic(actual_id, new_agg_topic_name) + + assert result is True + assert get_data_in_table(connection_port, AGG_TOPICS_TABLE)[0] == expected_data + + +def test_insert_agg_meta_should_return_true(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic_id = 42 + # metadata must be in the following convention because aggregation methods, i.e. get_agg_topics, rely on metadata having a key called "configured_topics" + metadata = {"configured_topics": "meaning of life"} + expected_data = (42, '{"configured_topics": "meaning of life"}') + + result = sqlfuncts.insert_agg_meta(topic_id, metadata) + + assert result is True + assert get_data_in_table(connection_port, AGG_META_TABLE)[0] == expected_data + + +def test_get_topic_map_should_return_maps(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + query = """ + INSERT INTO topics (topic_name) + VALUES ('football'); + INSERT INTO topics (topic_name) + VALUES ('baseball'); + """ + seed_database(container, query) + expected = ( + {"baseball": 2, "football": 1}, + {"baseball": "baseball", "football": "football"}, + ) + + actual = sqlfuncts.get_topic_map() + + assert actual == expected + + +def test_get_topic_meta_map_should_return_maps(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + if historian_version == "<4.0.0": + pytest.skip("method applied only to version >=4.0.0") + else: + query = """ + INSERT INTO topics (topic_name) + VALUES ('football'); + INSERT INTO topics (topic_name, metadata) + VALUES ('baseball', '{\\"meta\\":\\"value\\"}'); + """ + seed_database(container, query) + expected = {1: None, 2: {"meta": "value"}} + actual = sqlfuncts.get_topic_meta_map() + assert actual == expected + + +def test_get_agg_topics_should_return_list(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + topic_id = sqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + metadata = {"configured_topics": "meaning of life"} + sqlfuncts.insert_agg_meta(topic_id, metadata) + expected_list = [("some_agg_topic", "AVG", "2019", "meaning of life")] + + actual_list = sqlfuncts.get_agg_topics() + + assert actual_list == expected_list + + +def test_get_agg_topic_map_should_return_dict(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + query = f""" + INSERT INTO {AGG_TOPICS_TABLE} + (agg_topic_name, agg_type, agg_time_period) + VALUES ('topic_name', 'AVG', '2001'); + """ + seed_database(container, query) + expected = {("topic_name", "AVG", "2001"): 1} + + actual = sqlfuncts.get_agg_topic_map() + + assert actual == expected + + +@pytest.mark.parametrize( + "topic_1, topic_2, topic_3, topic_pattern, expected_result", + [ + ("'football'", "'foobar'", "'xzxzxccx'", "foo", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ba", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ccx", {"xzxzxccx": 3}), + ("'fotball'", "'foobar'", "'xzxzxccx'", "foo", {"foobar": 2}), + ("'football'", "'foooobar'", "'xzxzxccx'", "foooo", {"foooobar": 2}), + ( + "'FOOtball'", + "'ABCFOOoXYZ'", + "'XXXfOoOo'", + "foo", + {"FOOtball": 1, "ABCFOOoXYZ": 2, "XXXfOoOo": 3}, + ), + ], +) +def test_query_topics_by_pattern_should_return_matching_results( + get_container_func, + topic_1, + topic_2, + topic_3, + topic_pattern, + expected_result +): + container, sqlfuncts, connection_port, historian_version = get_container_func + + query = f""" + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ({topic_1}); + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ({topic_2}); + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ({topic_3}); + """ + seed_database(container, query) + + actual_result = sqlfuncts.query_topics_by_pattern(topic_pattern) + assert actual_result == expected_result + + +def test_create_aggregate_store_should_succeed(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + agg_type = "AVG" + agg_time_period = "1984" + expected_aggregate_table = "AVG_1984" + expected_fields = {"topics_list", "agg_value", "topic_id", "ts"} + + sqlfuncts.create_aggregate_store(agg_type, agg_time_period) + + assert expected_aggregate_table in get_tables(connection_port) + assert describe_table(connection_port, expected_aggregate_table) == expected_fields + + +def test_insert_aggregate_stmt_should_succeed(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + # be aware that Postgresql will automatically fold unquoted names into lower case + # From : https://www.postgresql.org/docs/current/sql-syntax-lexical.html + # Quoting an identifier also makes it case-sensitive, whereas unquoted names are always folded to lower case. + # For example, the identifiers FOO, foo, and "foo" are considered the same by PostgreSQL, + # but "Foo" and "FOO" are different from these three and each other. + # (The folding of unquoted names to lower case in PostgreSQL is incompatible with the SQL standard, + # which says that unquoted names should be folded to upper case. + # Thus, foo should be equivalent to "FOO" not "foo" according to the standard. + # If you want to write portable applications you are advised to always quote a particular name or never quote it.) + query = """ + CREATE TABLE AVG_1776 ( + ts timestamp NOT NULL, + topic_id INTEGER NOT NULL, + agg_value DOUBLE PRECISION NOT NULL, + topics_list TEXT, + UNIQUE(ts, topic_id)); + CREATE INDEX IF NOT EXISTS idx_avg_1776 ON avg_1776 (ts ASC); + """ + seed_database(container, query) + + agg_topic_id = 42 + agg_type = "avg" + period = "1776" + ts = "2020-06-01 12:30:59" + data = 44.42 + topic_ids = [12, 54, 65] + expected_data = ( + datetime.datetime(2020, 6, 1, 12, 30, 59), + 42, + 44.42, + "[12, 54, 65]", + ) + + res = sqlfuncts.insert_aggregate( + agg_topic_id, agg_type, period, ts, data, topic_ids + ) + + assert res is True + assert get_data_in_table(connection_port, "avg_1776")[0] == expected_data + + +def test_collect_aggregate_stmt_should_return_rows(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + query = f""" + INSERT INTO {DATA_TABLE} + VALUES ('2020-06-01 12:30:59', 42, '2'); + INSERT INTO {DATA_TABLE} + VALUES ('2020-06-01 12:31:59', 43, '8') + """ + seed_database(container, query) + + topic_ids = [42, 43] + agg_type = "avg" + expected_aggregate = (5.0, 2) + + actual_aggregate = sqlfuncts.collect_aggregate(topic_ids, agg_type) + + assert actual_aggregate == expected_aggregate + + +def test_collect_aggregate_stmt_should_raise_value_error(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + + with pytest.raises(ValueError): + sqlfuncts.collect_aggregate("dfdfadfdadf", "Invalid agg type") + + +def get_postgresqlfuncts(port): + connect_params = { + "dbname": TEST_DATABASE, + "user": ROOT_USER, + "password": ROOT_PASSWORD, + "host": "localhost", + "port": port, + } + + table_names = { + "data_table": DATA_TABLE, + "topics_table": TOPICS_TABLE, + "meta_table": META_TABLE, + "agg_topics_table": AGG_TOPICS_TABLE, + "agg_meta_table": AGG_META_TABLE, + } + + return PostgreSqlFuncts(connect_params, table_names) + + +@pytest.fixture(scope="module", params=itertools.product( + IMAGES, + [ + '<4.0.0', + '>=4.0.0' + ])) +def get_container_func(request): + global CONNECTION_HOST + historian_version = request.param[1] + kwargs = {'env': ENV_POSTGRESQL} + if os.path.exists("/.dockerenv"): + print("Running test within docker container.") + connection_port = 5432 + CONNECTION_HOST = 'postgresql_test' + kwargs['hostname'] = CONNECTION_HOST + else: + ports_dict = ports_config() + kwargs['ports'] = ports_dict["ports"] + connection_port = ports_dict["port_on_host"] + CONNECTION_HOST = 'localhost' + + with create_container(request.param[0], **kwargs) as container: + wait_for_connection(container, connection_port) + create_all_tables(container, historian_version) + postgresfuncts = get_postgresqlfuncts(connection_port) + postgresfuncts.setup_historian_tables() + yield container, postgresfuncts, connection_port, historian_version + + +def ports_config(): + port_on_host = get_rand_port(ip="5432") + return {"port_on_host": port_on_host, "ports": {"5432/tcp": port_on_host}} + + +def create_all_tables(container, historian_version): + create_historian_tables(container, historian_version) + create_aggregate_tables(container, historian_version) + + +def create_historian_tables(container, historian_version): + if historian_version == "<4.0.0": + query = f""" + CREATE TABLE IF NOT EXISTS {DATA_TABLE} ( + ts TIMESTAMP NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, + UNIQUE (topic_id, ts)); + CREATE TABLE IF NOT EXISTS {TOPICS_TABLE} ( + topic_id SERIAL PRIMARY KEY NOT NULL, + topic_name VARCHAR(512) NOT NULL, + UNIQUE (topic_name)); + CREATE TABLE IF NOT EXISTS {META_TABLE} ( + topic_id INTEGER PRIMARY KEY NOT NULL, + metadata TEXT NOT NULL); + """ + else: + query = f""" + CREATE TABLE IF NOT EXISTS {DATA_TABLE} ( + ts TIMESTAMP NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, + UNIQUE (topic_id, ts)); + CREATE TABLE IF NOT EXISTS {TOPICS_TABLE} ( + topic_id SERIAL PRIMARY KEY NOT NULL, + topic_name VARCHAR(512) NOT NULL, + metadata TEXT, + UNIQUE (topic_name)); + """ + seed_database(container, query) + return + + +def create_aggregate_tables(container, historian_version): + if historian_version == "<4.0.0": + query = f""" + CREATE TABLE IF NOT EXISTS {AGG_TOPICS_TABLE} ( + agg_topic_id SERIAL PRIMARY KEY NOT NULL, + agg_topic_name VARCHAR(512) NOT NULL, + agg_type VARCHAR(512) NOT NULL, + agg_time_period VARCHAR(512) NOT NULL, + UNIQUE (agg_topic_name, agg_type, agg_time_period)); + CREATE TABLE IF NOT EXISTS {AGG_META_TABLE} ( + agg_topic_id INTEGER PRIMARY KEY NOT NULL, + metadata TEXT NOT NULL); + """ + else: + query = f""" + CREATE TABLE IF NOT EXISTS {AGG_TOPICS_TABLE} ( + agg_topic_id SERIAL PRIMARY KEY NOT NULL, + agg_topic_name VARCHAR(512) NOT NULL, + agg_type VARCHAR(20) NOT NULL, + agg_time_period VARCHAR(20) NOT NULL, + UNIQUE (agg_topic_name, agg_type, agg_time_period)); + CREATE TABLE IF NOT EXISTS {AGG_META_TABLE} ( + agg_topic_id INTEGER PRIMARY KEY NOT NULL, + metadata TEXT NOT NULL); + """ + seed_database(container, query) + return + + +def seed_database(container, query): + command = ( + f'psql --username="{ROOT_USER}" --dbname="{TEST_DATABASE}" --command="{query}"' + ) + r = container.exec_run(cmd=command, tty=True) + print(r) + if r[0] == 1: + raise RuntimeError( + f"SQL query did not successfully complete on the container: \n {r}" + ) + return + + +def get_tables(port): + cnx, cursor = get_cnx_cursor(port) + # unlike MYSQL, Postgresql does not have a "SHOW TABLES" shortcut + # we have to create the query ourselves + query = SQL( + "SELECT table_name " + "FROM information_schema.tables " + "WHERE table_type = 'BASE TABLE' and " + "table_schema not in ('pg_catalog', 'information_schema')" + ) + results = execute_statement(cnx, cursor, query) + + return {t[0] for t in results} + + +def describe_table(port, table): + cnx, cursor = get_cnx_cursor(port) + query = SQL( + "SELECT column_name " "FROM information_schema.columns " "WHERE table_name = %s" + ) + + results = execute_statement(cnx, cursor, query, args=[table]) + + return {t[0] for t in results} + + +def get_data_in_table(port, table): + cnx, cursor = get_cnx_cursor(port) + query = SQL("SELECT * " "FROM {table_name}").format(table_name=Identifier(table)) + + results = execute_statement(cnx, cursor, query) + + return results + + +def execute_statement(cnx, cursor, query, args=None): + cursor.execute(query, vars=args) + + results = cursor.fetchall() + + cursor.close() + cnx.close() + + return results + + +def get_cnx_cursor(port): + connect_params = { + "database": TEST_DATABASE, + "user": ROOT_USER, + "password": ROOT_PASSWORD, + "host": "localhost", + "port": port, + } + + cnx = psycopg2.connect(**connect_params) + cursor = cnx.cursor() + + return cnx, cursor + + +def wait_for_connection(container, port): + start_time = time() + while time() - start_time < ALLOW_CONNECTION_TIME: + command = f"psql --user={ROOT_USER} --dbname={TEST_DATABASE} --port={port}" + command = "psql -V" + response = container.exec_run(command, environment=ENV_POSTGRESQL, tty=True) + # https://www.postgresql.org/docs/10/app-psql.html#id-1.9.4.18.7 + # psql returns 0 to the shell if it finished normally, + # 1 if a fatal error of its own occurs (e.g. out of memory, file not found), + # 2 if the connection to the server went bad and the session was not interactive, + # and 3 if an error occurred in a script and the variable ON_ERROR_STOP was set. + exit_code = response[0] + + if exit_code == 0: + return + elif exit_code == 1: + raise RuntimeError(response) + elif exit_code == 2: + continue + elif exit_code == 3: + raise RuntimeError(response) + + # if we break out of the loop, we assume that connection has been verified given enough sleep time + return + + +def drop_all_tables(port): + tables = get_tables(port) + cnx, cursor = get_cnx_cursor(port) + try: + for t in tables: + cursor.execute(SQL(f'DROP TABLE {t}')) + cnx.commit() + except Exception as e: + print("Error deleting tables {}".format(e)) + finally: + if cursor: + cursor.close() + + +@pytest.fixture(autouse=True) +def cleanup_tables(get_container_func): + container, sqlfuncts, connection_port, historian_version = get_container_func + drop_all_tables(connection_port) + create_all_tables(container, historian_version)