diff --git a/.github/workflows/build_and_test_with_resty_events.yml b/.github/workflows/build_and_test_with_resty_events.yml new file mode 100644 index 00000000..cb7abf07 --- /dev/null +++ b/.github/workflows/build_and_test_with_resty_events.yml @@ -0,0 +1,150 @@ +name: Build and test - with resty_events + +concurrency: + # for PR's cancel the running task, if another commit is pushed + group: ${{ github.workflow }} ${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +on: + pull_request: {} + workflow_dispatch: {} + push: + branches: + - main + - master + - release/** + +jobs: + build: + name: CI using lua-resty-events + runs-on: ubuntu-20.04 + strategy: + matrix: + openresty-version: [1.19.9.1, 1.21.4.1] + + steps: + - name: Update and install OS dependencies + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert + sudo systemctl disable nginx + sudo systemctl stop nginx + + + - name: Set environment variables + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + RESTY_EVENTS_VER: 0.1.2 + LUAROCKS_VER: 3.9.0 + OPENSSL_VER: 1.1.1q + PCRE_VER: 8.45 + run: | + echo "INSTALL_ROOT=/home/runner/work/cache/install-root" >> $GITHUB_ENV + echo "DOWNLOAD_ROOT=/home/runner/work/cache/download-root" >> $GITHUB_ENV + echo "OPENRESTY=$OPENRESTY_VER" >> $GITHUB_ENV + echo "LUAROCKS=$LUAROCKS_VER" >> $GITHUB_ENV + echo "OPENSSL=$OPENSSL_VER" >> $GITHUB_ENV + echo "PCRE=$PCRE_VER" >> $GITHUB_ENV + echo "RESTY_EVENTS=$RESTY_EVENTS_VER" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$HOME/install-root/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV + + - name: Checkout lua-resty-healthcheck + uses: actions/checkout@v3 + + - name: Lookup build cache + uses: actions/cache@v3 + id: cache-deps + with: + path: | + /home/runner/work/cache/install-root + /home/runner/work/cache/download-root + key: ${{ runner.os }}-${{ hashFiles('**/.github/workflows/build_and_test_with_resty_events.yml') }}-${{ matrix.openresty-version }} + + - name: Add to Path + if: steps.cache-deps.outputs.cache-hit != 'true' + run: echo "$INSTALL_ROOT/bin:$INSTALL_ROOT/nginx/sbin:$INSTALL_ROOT/luajit/bin:/usr/bin" >> $GITHUB_PATH + + - name: Build and install OpenSSL + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + curl -sSLO https://www.openssl.org/source/openssl-$OPENSSL.tar.gz + tar -xzf openssl-$OPENSSL.tar.gz + cd openssl-$OPENSSL + ./config -g shared -DPURIFY no-threads --prefix=$INSTALL_ROOT --openssldir=$INSTALL_ROOT no-unit-test + make + make install_sw + + - name: Checkout lua-resty-events + uses: actions/checkout@v3 + if: steps.cache-deps.outputs.cache-hit != 'true' + with: + repository: Kong/lua-resty-events + ref: refs/tags/0.1.0 + path: lua-resty-events + + - name: Build and install OpenResty + if: steps.cache-deps.outputs.cache-hit != true + run: | + curl -sSLO https://openresty.org/download/openresty-$OPENRESTY.tar.gz + tar -xzf openresty-$OPENRESTY.tar.gz + cd openresty-$OPENRESTY + ./configure \ + --prefix=$INSTALL_ROOT \ + --with-cc-opt='-I$INSTALL_ROOT/include' \ + --with-ld-opt='-L$INSTALL_ROOT/lib -Wl,-rpath,$INSTALL_ROOT/lib' \ + --with-pcre-jit \ + --with-http_ssl_module \ + --with-http_realip_module \ + --with-http_stub_status_module \ + --with-http_v2_module \ + --without-http_encrypted_session_module \ + --with-stream_realip_module \ + --with-stream_ssl_preread_module \ + --add-module=../lua-resty-events \ + --with-pcre + make + make install + make install LUA_LIBDIR=$INSTALL_ROOT/lualib + + - name: Install LuaRocks + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + curl -sSLO https://luarocks.org/releases/luarocks-$LUAROCKS.tar.gz + tar -xzf luarocks-$LUAROCKS.tar.gz + cd luarocks-$LUAROCKS + ./configure \ + --prefix=$INSTALL_ROOT \ + --lua-suffix=jit \ + --with-lua=$INSTALL_ROOT/luajit \ + --with-lua-include=$INSTALL_ROOT/luajit/include/luajit-2.1 + make build + make install + + - name: Install manual dependencies + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + luarocks install luacheck + + - name: Install Test::NGINX + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + sudo apt-get install cpanminus + cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + cpanm --notest Test::Nginx + + - name: Install lua-resty-events + if: steps.cache-deps.outputs.cache-hit != 'true' + run: | + cd lua-resty-events + OPENRESTY_PREFIX=$INSTALL_ROOT PREFIX=$INSTALL_ROOT LUA_LIB_DIR=$INSTALL_ROOT/lualib make install + + - name: Install lua-resty-healthcheck + run: luarocks make + + - name: Run tests + env: + PATH: ${{ env.INSTALL_ROOT }}/bin:${{ env.INSTALL_ROOT }}/nginx/sbin:${{ env.INSTALL_ROOT }}/luajit/bin:/usr/bin + TEST_NGINX_BINARY: ${{ env.INSTALL_ROOT }}/nginx/sbin/nginx + run: | + eval `luarocks path` + eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + TEST_NGINX_RANDOMIZE=1 prove -I. -r t/with_resty-events diff --git a/.github/workflows/build_and_test_with_worker_events.yml b/.github/workflows/build_and_test_with_worker_events.yml new file mode 100644 index 00000000..581c9265 --- /dev/null +++ b/.github/workflows/build_and_test_with_worker_events.yml @@ -0,0 +1,75 @@ +name: Build and test - with worker_events + +concurrency: + # for PR's cancel the running task, if another commit is pushed + group: ${{ github.workflow }} ${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +on: + pull_request: {} + workflow_dispatch: {} + push: + branches: + - main + - master + +jobs: + build: + name: CI using lua-resty-worker-events + runs-on: ubuntu-20.04 + strategy: + matrix: + openresty-version: [1.19.9.1, 1.21.4.1] + + steps: + - name: Update and install OS dependencies + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert + sudo systemctl disable nginx + sudo systemctl stop nginx + + + - name: Set environment variables + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + run: | + echo "/usr/local/openresty/nginx/sbin" >> $GITHUB_PATH + + - name: Checkout lua-resty-healthcheck + uses: actions/checkout@v3 + + - name: Install OpenResty ${{ matrix.openresty-version }} + env: + OPENRESTY_VER: ${{ matrix.openresty-version }} + run: | + sudo apt-get -y install --no-install-recommends wget gnupg ca-certificates + wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - + echo "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/openresty.list + sudo apt-get update + sudo apt-get -y install openresty=$OPENRESTY_VER-1~focal1 + + - name: Install LuaRocks + run: sudo apt-get install -y luarocks + + - name: Install manual dependencies + run: | + sudo luarocks install luacheck + sudo luarocks install lua-resty-worker-events 1.0.0 + + - name: Install Test::NGINX + run: | + sudo apt-get install cpanminus + cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + cpanm --notest Test::Nginx + + - name: Checkout lua-resty-healthcheck + uses: actions/checkout@v3 + + - name: Install lua-resty-healthcheck + run: sudo luarocks make + + - name: Run tests + run: | + eval `luarocks path` + eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) + TEST_NGINX_RANDOMIZE=1 prove -I. -r t/with_worker-events diff --git a/.github/workflows/latest_os.yml b/.github/workflows/latest_os.yml deleted file mode 100644 index b4e5e5e4..00000000 --- a/.github/workflows/latest_os.yml +++ /dev/null @@ -1,189 +0,0 @@ -name: Build and test for Ubuntu latest - -on: [push, pull_request] - -jobs: - build: - name: Build and install dependencies - runs-on: ubuntu-latest - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - - steps: - - name: Update and install OS dependencies - run: sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Create needed paths - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - mkdir -p $DOWNLOAD_PATH - mkdir -p $INSTALL_PATH - - - name: Build and install OpenResty ${{ matrix.openresty-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/openresty-$OPENRESTY_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz" - wget -O $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz - echo "tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz" - tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz - echo "result: $?" - pushd openresty-$OPENRESTY_VER - ./configure --prefix=$OPENRESTY_PREFIX - make - make install - popd - popd - fi - - - name: Build and install LuaRocks ${{ matrix.luarocks-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/luarocks-$LUAROCKS_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz" - wget -O $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz - tar -zxf $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz - pushd luarocks-$LUAROCKS_VER - ./configure --prefix=$LUAROCKS_PREFIX --with-lua=$OPENRESTY_PREFIX/luajit --with-lua-include=$OPENRESTY_PREFIX/luajit/include/luajit-2.1 --lua-suffix=jit - make build - make install - popd - luarocks install luacheck - popd - fi - - - name: Install Test::NGINX - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -f $DOWNLOAD_PATH/cpanm ]; - then - wget -O $DOWNLOAD_PATH/cpanm https://cpanmin.us/ - chmod +x $DOWNLOAD_PATH/cpanm - cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest Test::Nginx - fi - - lint: - name: Static code analysis - runs-on: ubuntu-latest - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Lint code - run: | - eval `luarocks path` - luacheck lib - - install-and-test: - name: Test lua-resty-healthcheck - runs-on: ubuntu-latest - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/latest_os.yml') }} - - - name: Install lua-resty-healthcheck - run: luarocks make - - - name: Run tests - run: | - eval `luarocks path` - eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - TEST_NGINX_RANDOMIZE=1 prove -I. -r t diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fad9f649..1c21b062 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,5 +1,10 @@ name: Lint +concurrency: + # for PR's cancel the running task, if another commit is pushed + group: ${{ github.workflow }} ${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + on: pull_request: {} workflow_dispatch: {} @@ -37,7 +42,7 @@ jobs: - name: Lua Check if: steps.changed-files.outputs.any_changed == 'true' - uses: Kong/public-shared-actions/code-check-actions/lua-lint@33449c46c6766a3d3c8f167cc383381225862b36 + uses: Kong/public-shared-actions/code-check-actions/lua-lint@c03e30a36e8a2dde5cbd463229a96aaad7ccad24 with: additional_args: '--no-default-config --config .luacheckrc' files: ${{ steps.changed-files.outputs.all_changed_files }} diff --git a/.github/workflows/old_os.yml b/.github/workflows/old_os.yml deleted file mode 100644 index c93e8102..00000000 --- a/.github/workflows/old_os.yml +++ /dev/null @@ -1,189 +0,0 @@ -name: Build and test for Ubuntu 20.04 - -on: [push, pull_request] - -jobs: - build: - name: Build and install dependencies - runs-on: ubuntu-20.04 - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - - steps: - - name: Update and install OS dependencies - run: sudo apt-get update && sudo apt-get install -y libssl-dev ssl-cert - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Create needed paths - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - mkdir -p $DOWNLOAD_PATH - mkdir -p $INSTALL_PATH - - - name: Build and install OpenResty ${{ matrix.openresty-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/openresty-$OPENRESTY_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz" - wget -O $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz http://openresty.org/download/openresty-$OPENRESTY_VER.tar.gz - echo "tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz" - tar -zxf $DOWNLOAD_PATH/openresty-$OPENRESTY_VER.tar.gz - echo "result: $?" - pushd openresty-$OPENRESTY_VER - ./configure --prefix=$OPENRESTY_PREFIX - make - make install - popd - popd - fi - - - name: Build and install LuaRocks ${{ matrix.luarocks-version }} - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -d $INSTALL_PATH/luarocks-$LUAROCKS_VER ]; - then - pushd $DOWNLOAD_PATH - echo "Downloading from https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz" - wget -O $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz https://luarocks.github.io/luarocks/releases/luarocks-$LUAROCKS_VER.tar.gz - tar -zxf $DOWNLOAD_PATH/luarocks-$LUAROCKS_VER.tar.gz - pushd luarocks-$LUAROCKS_VER - ./configure --prefix=$LUAROCKS_PREFIX --with-lua=$OPENRESTY_PREFIX/luajit --with-lua-include=$OPENRESTY_PREFIX/luajit/include/luajit-2.1 --lua-suffix=jit - make build - make install - popd - luarocks install luacheck - popd - fi - - - name: Install Test::NGINX - if: steps.cache-deps.outputs.cache-hit != 'true' - run: | - if [ ! -f $DOWNLOAD_PATH/cpanm ]; - then - wget -O $DOWNLOAD_PATH/cpanm https://cpanmin.us/ - chmod +x $DOWNLOAD_PATH/cpanm - cpanm --notest --local-lib=$HOME/perl5 local::lib && eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - cpanm --notest Test::Nginx - fi - - lint: - name: Static code analysis - runs-on: ubuntu-20.04 - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Lint code - run: | - eval `luarocks path` - luacheck lib - - install-and-test: - name: Test lua-resty-healthcheck - runs-on: ubuntu-20.04 - needs: build - strategy: - matrix: - openresty-version: [1.21.4.1, 1.19.9.1] - luarocks-version: [3.8.0] - steps: - - name: Checkout lua-resty-healthcheck - uses: actions/checkout@v2 - - - name: Set environment variables - env: - LUAROCKS_VER: ${{ matrix.luarocks-version }} - OPENRESTY_VER: ${{ matrix.openresty-version }} - run: | - echo "DOWNLOAD_PATH=$HOME/download-root" >> $GITHUB_ENV - export DOWNLOAD_PATH=$HOME/download-root - echo "INSTALL_PATH=$HOME/install-root" >> $GITHUB_ENV - export INSTALL_PATH=$HOME/install-root - echo "LUAROCKS_VER=$LUAROCKS_VER" >> $GITHUB_ENV - echo "OPENRESTY_VER=$OPENRESTY_VER" >> $GITHUB_ENV - export LUAROCKS_PREFIX=$INSTALL_PATH/luarocks-$LUAROCKS_VER - echo "LUAROCKS_PREFIX=$LUAROCKS_PREFIX" >> $GITHUB_ENV - export OPENRESTY_PREFIX=$INSTALL_PATH/openresty-$OPENRESTY_VER - echo "OPENRESTY_PREFIX=$OPENRESTY_PREFIX" >> $GITHUB_ENV - echo "PATH=$DOWNLOAD_PATH:$LUAROCKS_PREFIX/bin:$OPENRESTY_PREFIX/nginx/sbin:$DOWNLOAD_PATH/cpanm:$PATH" >> $GITHUB_ENV - - - name: Lookup build cache - uses: actions/cache@v2 - id: cache-deps - with: - path: | - ${{ env.INSTALL_PATH }} - ~/perl5 - key: ${{ runner.os }}-${{ matrix.openresty-version }}-${{ hashFiles('.github/workflows/old_os.yml') }} - - - name: Install lua-resty-healthcheck - run: luarocks make - - - name: Run tests - run: | - eval `luarocks path` - eval $(perl -I $HOME/perl5/lib/perl5/ -Mlocal::lib) - TEST_NGINX_RANDOMIZE=1 prove -I. -r t diff --git a/.github/workflows/sast.yml b/.github/workflows/sast.yml index 17738a65..b5fe5dc5 100644 --- a/.github/workflows/sast.yml +++ b/.github/workflows/sast.yml @@ -1,5 +1,10 @@ name: SAST +concurrency: + # for PR's cancel the running task, if another commit is pushed + group: ${{ github.workflow }} ${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + on: pull_request: paths: @@ -28,4 +33,4 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: Kong/public-shared-actions/security-actions/semgrep@33449c46c6766a3d3c8f167cc383381225862b36 + - uses: Kong/public-shared-actions/security-actions/semgrep@c03e30a36e8a2dde5cbd463229a96aaad7ccad24 diff --git a/README.md b/README.md index 9eb784ff..53c54bb0 100644 --- a/README.md +++ b/README.md @@ -88,20 +88,6 @@ programmatic API using functions such as `checker:report_http_status(host, port, See the [online LDoc documentation](http://kong.github.io/lua-resty-healthcheck) for the complete API. -## Async behaviour - -Since this library heavily uses the SHM to share data between workers, it must -use locks. The locks themselves need access to `ngx.sleep` which is not available -in all contexts. Most notably not during startup; `init` and `init_worker`. - -The library will try and acquire the lock and update, but if it fails it will -schedule an async update (timer with delay 0). - -One workaround for this in the initial phases would be to replace `ngx.sleep` with -a version that does a blocking sleep in `init`/`init_worker`. This will enable -the usage of locks in those phases. - - ## History Versioning is strictly based on [Semantic Versioning](https://semver.org/) @@ -117,33 +103,43 @@ Versioning is strictly based on [Semantic Versioning](https://semver.org/) * push commit and tag * upload rock to luarocks: `luarocks upload rockspecs/[name] --api-key=abc` +### 3.0.0 (12-Oct-2023) + +* Perf: optimize by localizing some functions [#92](https://github.com/Kong/lua-resty-healthcheck/pull/92) (backport) +* Fix: Generate fresh default http_statuses within new() [#83](https://github.com/Kong/lua-resty-healthcheck/pull/83) (backport) + ### 2.0.0 (22-Sep-2020) -* BREAKING: fallback for deprecated top-level field `type` is now removed +**Note:** +Changes in this version has been discarded from current & future development. +Below you can see it's changelog but be aware that these changes might not be present in `3.y.z` unless they are explicitly stated in `3.y.z`, `1.6.3` or previous releases. Read more at: [release 3.0.0 (#142)](https://github.com/Kong/lua-resty-healthcheck/pull/142) and [chore(*): realign master branch to 3.0.0 release (#144)](https://github.com/Kong/lua-resty-healthcheck/pull/144) + +> * BREAKING: fallback for deprecated top-level field `type` is now removed (deprecated since `0.5.0`) [#56](https://github.com/Kong/lua-resty-healthcheck/pull/56) -* BREAKING: Bump `lua-resty-worker-events` dependency to `2.0.0`. This makes +> * BREAKING: Bump `lua-resty-worker-events` dependency to `2.0.0`. This makes a lot of the APIs in this library asynchronous as the worker events `post` and `post_local` won't anymore call `poll` on a running worker automatically, for more information, see: https://github.com/Kong/lua-resty-worker-events#200-16-september-2020 -* BREAKING: tcp_failures can no longer be 0 on http(s) checks (unless http(s)_failures +> * BREAKING: tcp_failures can no longer be 0 on http(s) checks (unless http(s)_failures are also set to 0) [#55](https://github.com/Kong/lua-resty-healthcheck/pull/55) -* feature: Added support for https_sni [#49](https://github.com/Kong/lua-resty-healthcheck/pull/49) -* fix: properly log line numbers by using tail calls [#29](https://github.com/Kong/lua-resty-healthcheck/pull/29) -* fix: when not providing a hostname, use IP [#48](https://github.com/Kong/lua-resty-healthcheck/pull/48) -* fix: makefile; make install -* feature: added a status version field [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) -* feature: add headers for probe request [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) -* fix: exit early when reloading during a probe [#47](https://github.com/Kong/lua-resty-healthcheck/pull/47) -* fix: prevent target-list from being nil, due to async behaviour [#44](https://github.com/Kong/lua-resty-healthcheck/pull/44) -* fix: replace timer and node-wide locks with resty-timer, to prevent interval +> * feature: Added support for https_sni [#49](https://github.com/Kong/lua-resty-healthcheck/pull/49) +> * fix: properly log line numbers by using tail calls [#29](https://github.com/Kong/lua-resty-healthcheck/pull/29) +> * fix: when not providing a hostname, use IP [#48](https://github.com/Kong/lua-resty-healthcheck/pull/48) +> * fix: makefile; make install +> * feature: added a status version field [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) +> * feature: add headers for probe request [#54](https://github.com/Kong/lua-resty-healthcheck/pull/54) +> * fix: exit early when reloading during a probe [#47](https://github.com/Kong/lua-resty-healthcheck/pull/47) +> * fix: prevent target-list from being nil, due to async behaviour [#44](https://github.com/Kong/lua-resty-healthcheck/pull/44) +> * fix: replace timer and node-wide locks with resty-timer, to prevent interval skips [#59](https://github.com/Kong/lua-resty-healthcheck/pull/59) -* change: added additional logging on posting events [#25](https://github.com/Kong/lua-resty-healthcheck/issues/25) -* fix: do not run out of timers during init/init_worker when adding a vast +> * change: added additional logging on posting events [#25](https://github.com/Kong/lua-resty-healthcheck/issues/25) +> * fix: do not run out of timers during init/init_worker when adding a vast amount of targets [#57](https://github.com/Kong/lua-resty-healthcheck/pull/57) -* fix: do not call on the module table, but use a method for locks. Also in +> * fix: do not call on the module table, but use a method for locks. Also in [#57](https://github.com/Kong/lua-resty-healthcheck/pull/57) + ### 1.6.3 (06-Sep-2023) * Feature: Added support for https_sni [#49](https://github.com/Kong/lua-resty-healthcheck/pull/49) (backport) diff --git a/docs/index.html b/docs/index.html index 6492afd6..ec07accb 100644 --- a/docs/index.html +++ b/docs/index.html @@ -27,8 +27,10 @@

lua-resty-healthcheck

+

Contents

Topics

@@ -51,34 +53,42 @@

Topics

Module resty.healthcheck

Healthcheck library for OpenResty.

-

- - -

Some notes on the usage of this library:

+

Some notes on the usage of this library:

+

Info:

+

Functions

+ + + + + +
run_locked (self, key, fn, ...)Acquire a lock and run a function

+ +

The function call itself is wrapped with pcall to protect against + exception.

Tables

@@ -97,6 +107,10 @@

Node management

+ + + + @@ -156,6 +170,61 @@

Initializing


+

Functions

+ +
+
+ + run_locked (self, key, fn, ...) +
+
+

Acquire a lock and run a function

+ +

The function call itself is wrapped with pcall to protect against + exception.

+ +

This function exhibits some special behavior when called during a + non-yieldable phase such as init_worker or log:

+ +
    +
  1. The lock timeout is set to 0 to ensure that resty.lock does not +attempt to sleep/yield
  2. +
  3. If acquiring the lock fails due to a timeout, run_locked +(this function) is re-scheduled to run in a timer. In this case, +the function returns "scheduled"
  4. +
+ + + + +

Parameters:

+ + +

Returns:

+
    + + The results of the function; or nil and an error message + in case it fails locking. +
+ + + + +
+

Tables

@@ -186,12 +255,12 @@

Fields:

  • mostly_healthy This event is raised when the target status is - still healthy but it started to receive "unhealthy" updates via active or + still healthy but it started to receive “unhealthy” updates via active or passive checks.
  • mostly_unhealthy This event is raised when the target status is - still unhealthy but it started to receive "healthy" updates via active or + still unhealthy but it started to receive “healthy” updates via active or passive checks.
  • @@ -215,7 +284,7 @@

    Usage:

    end end -worker_events.register(event_callback, my_checker.EVENT_SOURCE) +worker_events.register(event_callback, my_checker.EVENT_SOURCE) @@ -230,9 +299,7 @@

    Node management

    Add a target to the healthchecker. When the ip + port + hostname combination already exists, it will simply - return success (without updating is_healthy status).

    - -

    NOTE: in non-yieldable contexts, this will be executed async. + return success (without updating is_healthy status).

    Parameters:

    @@ -272,12 +339,36 @@

    Returns:

    checker:clear ()
    - Clear all healthcheck data.

    + Clear all healthcheck data. + + + +

    Returns:

    +
      + + true on success, or nil + error on failure. +
    -

    NOTE: in non-yieldable contexts, this will be executed async. +

    +
    + + checker:delayed_clear (delay) +
    +
    + Clear all healthcheck data after a period of time. + Useful for keeping target status between configuration reloads. + + +

    Parameters:

    + +

    Returns:

      @@ -325,9 +416,7 @@

      Returns:

      Remove a target from the healthchecker. - The target not existing is not considered an error.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + The target not existing is not considered an error.

      Parameters:

      @@ -367,13 +456,11 @@

      Healt
      Report a health failure. Reports a health failure which will count against the number of occurrences - required to make a target "fall". The type of healthchecker, - "tcp" or "http" (see new) determines against which counter the occurence goes. + required to make a target “fall”. The type of healthchecker, + “tcp” or “http” (see new) determines against which counter the occurence goes. If unhealthy.tcp_failures (for TCP failures) or unhealthy.http_failures is set to zero in the configuration, this function is a no-op - and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + and returns true.

      Parameters:

      @@ -388,7 +475,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    1. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    2. @@ -413,9 +500,7 @@

      Returns:

      If healthy.successes (for healthy HTTP status codes) or unhealthy.http_failures (fur unhealthy HTTP status codes) is set to zero in the configuration, this function is a no-op - and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + and returns true.

      Parameters:

      @@ -433,7 +518,7 @@

      Parameters:

      the http statuscode, or nil to report an invalid http response.
    3. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    4. @@ -455,11 +540,9 @@

      Returns:

      Report a health success. Reports a health success which will count against the number of occurrences - required to make a target "rise". + required to make a target “rise”. If healthy.successes is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -474,7 +557,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    5. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    6. @@ -495,9 +578,7 @@

      Returns:

      Report a failure on TCP level. If unhealthy.tcp_failures is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -513,13 +594,13 @@

      Parameters:

    7. operation The socket operation that failed: - "connect", "send" or "receive". + “connect”, “send” or “receive”. TODO check what kind of information we get from the OpenResty layer in order to tell these error conditions apart - https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/balancer.md#getlastfailure + https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/balancer.md#get_last_failure
    8. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    9. @@ -540,9 +621,7 @@

      Returns:

      Report a timeout failure. If unhealthy.timeouts is set to zero in the configuration, - this function is a no-op and returns true.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + this function is a no-op and returns true.

      Parameters:

      @@ -557,7 +636,7 @@

      Parameters:

      (optional) hostname of the target being checked.
    10. check - (optional) the type of check, either "passive" or "active", default "passive". + (optional) the type of check, either “passive” or “active”, default “passive”.
    11. @@ -576,9 +655,7 @@

      Returns:

      checker:set_all_target_statuses_for_hostname (hostname, port, is_healthy)
      - Sets the current status of all targets with the given hostname and port.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + Sets the current status of all targets with the given hostname and port.

      Parameters:

      @@ -610,9 +687,7 @@

      Returns:

      Sets the current status of the target. - This will set the status and clear its counters.

      - -

      NOTE: in non-yieldable contexts, this will be executed async. + This will immediately set the status and clear its counters.

      Parameters:

      @@ -694,10 +769,7 @@

      Returns:

      It will be started upon creation.

      NOTE: the returned checker object must be anchored, if not it will be - removed by Lua's garbage collector and the healthchecks will cease to run.

      - -

      NOTE: in non-yieldable contexts, the initial loading of the target - statusses will be executed async. + removed by Lua’s garbage collector and the healthchecks will cease to run.

      Parameters:

      @@ -706,34 +778,35 @@

      Parameters:

      table with checker options. Options are:

        -
      • name: name of the health checker
      • -
      • shm_name: the name of the lua_shared_dict specified in the Nginx configuration to use
      • -
      • ssl_cert: certificate for mTLS connections (string or parsed object)
      • -
      • ssl_key: key for mTLS connections (string or parsed object)
      • -
      • checks.active.type: "http", "https" or "tcp" (default is "http")
      • -
      • checks.active.timeout: socket timeout for active checks (in seconds)
      • -
      • checks.active.concurrency: number of targets to check concurrently
      • -
      • checks.active.http_path: path to use in GET HTTP request to run on active checks
      • -
      • checks.active.https_sni: SNI server name incase of HTTPS
      • -
      • checks.active.https_verify_certificate: boolean indicating whether to verify the HTTPS certificate
      • -
      • checks.active.hheaders: an array of headers (no hash-table! must be pre-formatted)
      • -
      • checks.active.healthy.interval: interval between checks for healthy targets (in seconds)
      • -
      • checks.active.healthy.http_statuses: which HTTP statuses to consider a success
      • -
      • checks.active.healthy.successes: number of successes to consider a target healthy
      • -
      • checks.active.unhealthy.interval: interval between checks for unhealthy targets (in seconds)
      • -
      • checks.active.unhealthy.http_statuses: which HTTP statuses to consider a failure
      • -
      • checks.active.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • -
      • checks.active.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • -
      • checks.active.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • -
      • checks.passive.type: "http", "https" or "tcp" (default is "http"; for passive checks, "http" and "https" are equivalent)
      • -
      • checks.passive.healthy.http_statuses: which HTTP statuses to consider a failure
      • -
      • checks.passive.healthy.successes: number of successes to consider a target healthy
      • -
      • checks.passive.unhealthy.http_statuses: which HTTP statuses to consider a success
      • -
      • checks.passive.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • -
      • checks.passive.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • -
      • checks.passive.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • +
      • name: name of the health checker
      • +
      • shm_name: the name of the lua_shared_dict specified in the Nginx configuration to use
      • +
      • ssl_cert: certificate for mTLS connections (string or parsed object)
      • +
      • ssl_key: key for mTLS connections (string or parsed object)
      • +
      • checks.active.type: “http”, “https” or “tcp” (default is “http”)
      • +
      • checks.active.timeout: socket timeout for active checks (in seconds)
      • +
      • checks.active.concurrency: number of targets to check concurrently
      • +
      • checks.active.http_path: path to use in GET HTTP request to run on active checks
      • +
      • checks.active.https_sni: SNI server name incase of HTTPS
      • +
      • checks.active.https_verify_certificate: boolean indicating whether to verify the HTTPS certificate
      • +
      • checks.active.headers: one or more lists of values indexed by header name
      • +
      • checks.active.healthy.interval: interval between checks for healthy targets (in seconds)
      • +
      • checks.active.healthy.http_statuses: which HTTP statuses to consider a success
      • +
      • checks.active.healthy.successes: number of successes to consider a target healthy
      • +
      • checks.active.unhealthy.interval: interval between checks for unhealthy targets (in seconds)
      • +
      • checks.active.unhealthy.http_statuses: which HTTP statuses to consider a failure
      • +
      • checks.active.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • +
      • checks.active.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • +
      • checks.active.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      • +
      • checks.passive.type: “http”, “https” or “tcp” (default is “http”; for passive checks, “http” and “https” are equivalent)
      • +
      • checks.passive.healthy.http_statuses: which HTTP statuses to consider a failure
      • +
      • checks.passive.healthy.successes: number of successes to consider a target healthy
      • +
      • checks.passive.unhealthy.http_statuses: which HTTP statuses to consider a success
      • +
      • checks.passive.unhealthy.tcp_failures: number of TCP failures to consider a target unhealthy
      • +
      • checks.passive.unhealthy.timeouts: number of timeouts to consider a target unhealthy
      • +
      • checks.passive.unhealthy.http_failures: number of HTTP failures to consider a target unhealthy
      +

      If any of the health counters above (e.g. checks.passive.unhealthy.timeouts) is set to zero, the according category of checks is not taken to account. This way active or passive health checks can be disabled selectively. @@ -756,8 +829,8 @@

      Returns:

      -generated by LDoc 1.4.6 -Last updated 2020-09-22 15:00:30 +generated by LDoc 1.5.0 +Last updated 2023-09-06 09:49:32
      diff --git a/docs/topics/readme.md.html b/docs/topics/readme.md.html index 84eafdbe..9895d8a6 100644 --- a/docs/topics/readme.md.html +++ b/docs/topics/readme.md.html @@ -27,11 +27,11 @@

      lua-resty-healthcheck

      +

      Contents

      @@ -39,7 +39,7 @@

      Contents

      Topics

        -
      • README
      • +
      • readme

      Modules

        @@ -50,35 +50,37 @@

        Modules

        +

        lua-resty-healthcheck

        -

        lua-resty-healthcheck

        - -

        Build Status

        +

        legacy version +Release 1.6.x +License +Twitter Follow

        A health check library for OpenResty.

        -

        Synopsis

        +

        Synopsis

        -http {
        +http {
             lua_shared_dict test_shm 8m;
             lua_shared_dict my_worker_events 8m;
        -    init_worker_by_lua_block {
        +    init_worker_by_lua_block {
         
                 local we = require "resty.worker.events"
        -        local ok, err = we.configure({
        +        local ok, err = we.configure({
                     shm = "my_worker_events",
                     interval = 0.1
                 })
                 if not ok then
        -            ngx.log(ngx.ERR, "failed to configure worker events: ", err)
        +            ngx.log(ngx.ERR, "failed to configure worker events: ", err)
                     return
                 end
         
                 local healthcheck = require("resty.healthcheck")
        -        local checker = healthcheck.new({
        +        local checker = healthcheck.new({
                     name = "testing",
                     shm_name = "test_shm",
                     checks = {
        @@ -97,21 +99,21 @@ 

        Synopsis

        } }) - local ok, err = checker:add_target("127.0.0.1", 8080, "example.com", false) + local ok, err = checker:add_target("127.0.0.1", 8080, "example.com", false) local handler = function(target, eventname, sourcename, pid) - ngx.log(ngx.DEBUG,"Event from: ", sourcename) + ngx.log(ngx.DEBUG,"Event from: ", sourcename) if eventname == checker.events.remove -- a target was removed - ngx.log(ngx.DEBUG,"Target removed: ", + ngx.log(ngx.DEBUG,"Target removed: ", target.ip, ":", target.port, " ", target.hostname) elseif eventname == checker.events.healthy -- target changed state, or was added - ngx.log(ngx.DEBUG,"Target switched to healthy: ", + ngx.log(ngx.DEBUG,"Target switched to healthy: ", target.ip, ":", target.port, " ", target.hostname) elseif eventname == checker.events.unhealthy -- target changed state, or was added - ngx.log(ngx.DEBUG,"Target switched to unhealthy: ", + ngx.log(ngx.DEBUG,"Target switched to unhealthy: ", target.ip, ":", target.port, " ", target.hostname) else -- unknown event @@ -123,6 +125,7 @@

        Synopsis

        +

        Description

        This library supports performing active and passive health checks on arbitrary hosts.

        @@ -131,197 +134,252 @@

        Description

        happens via the lua-resty-worker-events library.

        Targets are added using checker:add_target(host, port). -Changes in status ("healthy" or "unhealthy") are broadcasted via worker-events.

        +Changes in status (“healthy” or “unhealthy”) are broadcasted via worker-events.

        Active checks are executed in the background based on the specified timer intervals.

        For passive health checks, the library receives explicit notifications via its programmatic API using functions such as checker:report_http_status(host, port, status).

        -

        See the online LDoc documentation +

        See the online LDoc documentation for the complete API.

        -

        -

        Async behaviour

        +

        + +

        History

        -

        Since this library heavily uses the SHM to share data between workers, it must -use locks. The locks themselves need access to ngx.sleep which is not available -in all contexts. Most notably not during startup; init and init_worker.

        +

        Versioning is strictly based on Semantic Versioning

        -

        The library will try and acquire the lock and update, but if it fails it will -schedule an async update (timer with delay 0).

        +

        1.6.2 (17-Nov-2022)

        -

        One workaround for this in the initial phases would be to replace ngx.sleep with -a version that does a blocking sleep in init/init_worker. This will enable -the usage of locks in those phases.

        +
          +
        • Fix: avoid raising worker events for new targets that were marked for delayed +removal, i.e. targets that already exist in memory only need the removal flag +cleared when added back. #122
        • +
        -

        -

        History

        +

        1.6.1 (25-Jul-2022)

        -

        Versioning is strictly based on Semantic Versioning

        +
          +
        • Fix: improvements to ensure the proper securing of shared resources to avoid +race conditions and clearly report failure states. +#112, +#113, +#114.
        • +
        • Fix: reduce the frequency of checking for unused targets, reducing the number +of locks created. #116
        • +
        • Fix accept any lua-resty-events +0.1.x release. #118
        • +
        + + +

        1.6.0 (27-Jun-2022)

        + +
          +
        • Feature: introduce support to lua-resty-events +module in addition to lua-resty-worker-events +support. With this addition, the lua-resty-healthcheck luarocks package does +not require a specific event-sharing module anymore, but you are still +required to provide either lua-resty-worker-events or lua-resty-events. +#105
        • +
        • Change: if available, lua-resty-healthcheck now uses string.buffer, the new LuaJIT’s +serialization API. If it is unavailable, lua-resty-healthcheck fallbacks to +cjson. #109
        • +
        + + +

        1.5.1 (23-Mar-2022)

        -

        Releasing new versions:

        +
          +
        • Fix: avoid breaking active health checks when adding or removing targets. +#93
        • +
        + + +

        1.5.0 (09-Feb-2022)

          -
        • update changelog below (PR's should be merged including a changelog entry)
        • -
        • based on changelog determine new SemVer version
        • -
        • create a new rockspec
        • -
        • render the docs using ldoc (don't do this within PR's)
        • -
        • commit as "release x.x.x" (do not include rockspec revision)
        • -
        • tag the commit with "x.x.x" (do not include rockspec revision)
        • -
        • push commit and tag
        • -
        • upload rock to luarocks: luarocks upload rockspecs/[name] --api-key=abc
        • +
        • New option checks.active.headers supports one or more lists of values indexed by +header name. #87
        • +
        • Introduce dealyed_clear() function, used to remove addresses after a time interval. +This function may be used when an address is being removed but may be added again +before the interval expires, keeping its health status. +#88
        -

        2.0.0 (22-Sep-2020)

        + +

        1.4.3 (31-Mar-2022)

          -
        • BREAKING: fallback for deprecated top-level field type is now removed - (deprecated since 0.5.0) #56
        • -
        • BREAKING: Bump lua-resty-worker-events dependency to 2.0.0. This makes - a lot of the APIs in this library asynchronous as the worker events post - and post_local won't anymore call poll on a running worker automatically, - for more information, see: - https://github.com/Kong/lua-resty-worker-events#200-16-september-2020
        • -
        • BREAKING: tcpfailures can no longer be 0 on http(s) checks (unless http(s)failures - are also set to 0) #55
        • -
        • feature: Added support for https_sni #49
        • -
        • fix: properly log line numbers by using tail calls #29
        • -
        • fix: when not providing a hostname, use IP #48
        • -
        • fix: makefile; make install
        • -
        • feature: added a status version field #54
        • -
        • feature: add headers for probe request #54
        • -
        • fix: exit early when reloading during a probe #47
        • -
        • fix: prevent target-list from being nil, due to async behaviour #44
        • -
        • fix: replace timer and node-wide locks with resty-timer, to prevent interval - skips #59
        • -
        • change: added additional logging on posting events #25
        • -
        • fix: do not run out of timers during init/init_worker when adding a vast - amount of targets #57
        • -
        • fix: do not call on the module table, but use a method for locks. Also in - #57
        • +
        • Fix: avoid breaking active health checks when adding or removing targets. +#100
        + +

        1.4.2 (29-Jun-2021)

        + +
          +
        • Fix: prevent new active checks being scheduled while a health check is running. +#72
        • +
        • Fix: remove event watcher when stopping an active health check. +#74; fixes Kong issue +#7406
        • +
        + + +

        1.4.1 (17-Feb-2021)

        + +
          +
        • Fix: make sure that a single worker will actively check hosts' statuses. +#67
        • +
        + + +

        1.4.0 (07-Jan-2021)

        + +
          +
        • Use a single timer to actively health check targets. This reduces the number +of timers used by health checkers, as they used to use two timers by each +target. #62
        • +
        + +

        1.3.0 (17-Jun-2020)

          -
        • Adds support to mTLS to active healthchecks. This feature can be used adding - the fields ssl_cert and ssl_key, with certificate and key respectively, - when creating a new healthcheck object. - #41
        • +
        • Adds support to mTLS to active healthchecks. This feature can be used adding +the fields ssl_cert and ssl_key, with certificate and key respectively, +when creating a new healthcheck object. +#41
        +

        1.2.0 (13-Feb-2020)

        +

        1.1.2 (19-Dec-2019)

          -
        • Fix: when ngx.sleep API is not available (e.g. in the log phase) it is not - possible to lock using lua-resty-lock and any function that needs exclusive - access would fail. This fix adds a retry method that starts a new light - thread, which has access to ngx.sleep, to lock the critical path. - #37;
        • +
        • Fix: when ngx.sleep API is not available (e.g. in the log phase) it is not +possible to lock using lua-resty-lock and any function that needs exclusive +access would fail. This fix adds a retry method that starts a new light +thread, which has access to ngx.sleep, to lock the critical path. +#37;
        +

        1.1.1 (14-Nov-2019)

          -
        • Fix: fail when it is not possible to get exclusive access to the list of - targets. This fix prevents that workers get to an inconsistent state. - #34;
        • +
        • Fix: fail when it is not possible to get exclusive access to the list of +targets. This fix prevents that workers get to an inconsistent state. +#34;
        +

        1.1.0 (30-Sep-2019)

          -
        • Add support for setting the custom Host header to be used for active checks.
        • -
        • Fix: log error on SSL Handshake failure - #28;
        • +
        • Add support for setting the custom Host header to be used for active checks.
        • +
        • Fix: log error on SSL Handshake failure +#28;
        +

        1.0.0 (05-Jul-2019)

          -
        • BREAKING: all API functions related to hosts require a hostname argument - now. This way different hostnames listening on the same IP and ports - combination do not have an effect on each other.
        • -
        • Fix: fix reporting active TCP probe successes - #20; - fixes issue #19
        • +
        • BREAKING: all API functions related to hosts require a hostname argument +now. This way different hostnames listening on the same IP and ports +combination do not have an effect on each other.
        • +
        • Fix: fix reporting active TCP probe successes +#20; +fixes issue #19
        +

        0.6.1 (04-Apr-2019)

          -
        • Fix: set up event callback only after target list is loaded - #18; - fixes Kong issue #4453
        • +
        • Fix: set up event callback only after target list is loaded +#18; +fixes Kong issue #4453
        +

        0.6.0 (26-Sep-2018)

          -
        • Introduce checks.active.https_verify_certificate field. - It is true by default; setting it to false disables certificate - verification in active healthchecks over HTTPS.
        • +
        • Introduce checks.active.https_verify_certificate field. +It is true by default; setting it to false disables certificate +verification in active healthchecks over HTTPS.
        +

        0.5.0 (25-Jul-2018)

          -
        • Add support for https -- thanks @gaetanfl for the PR!
        • -
        • Introduce separate checks.active.type and checks.passive.type fields; - the top-level type field is still supported as a fallback but is now - deprecated.
        • +
        • Add support for https — thanks @gaetanfl for the PR!
        • +
        • Introduce separate checks.active.type and checks.passive.type fields; +the top-level type field is still supported as a fallback but is now +deprecated.
        +

        0.4.2 (23-May-2018)

          -
        • Fix Host header in active healthchecks
        • +
        • Fix Host header in active healthchecks
        +

        0.4.1 (21-May-2018)

          -
        • Fix internal management of healthcheck counters
        • +
        • Fix internal management of healthcheck counters
        +

        0.4.0 (20-Mar-2018)

          -
        • Correct setting of defaults in http_statuses
        • -
        • Type and bounds checking to checks table
        • +
        • Correct setting of defaults in http_statuses
        • +
        • Type and bounds checking to checks table
        +

        0.3.0 (18-Dec-2017)

          -
        • Disable individual checks by setting their counters to 0
        • +
        • Disable individual checks by setting their counters to 0
        +

        0.2.0 (30-Nov-2017)

        +

        0.1.0 (27-Nov-2017) Initial release

          -
        • Initial upload
        • +
        • Initial upload
        +

        +

        Copyright and License

        -
         Copyright 2017-2020 Kong Inc.
        +
         Copyright 2017-2022 Kong Inc.
         
          Licensed under the Apache License, Version 2.0 (the "License");
          you may not use this file except in compliance with the License.
        @@ -337,13 +395,11 @@ 

        Copyright and License

        - -
        -generated by LDoc 1.4.6 -Last updated 2020-09-22 15:00:30 +generated by LDoc 1.5.0 +Last updated 2023-09-06 09:49:32
        diff --git a/lib/resty/healthcheck.lua b/lib/resty/healthcheck.lua index 776f3df1..6fc84cc9 100644 --- a/lib/resty/healthcheck.lua +++ b/lib/resty/healthcheck.lua @@ -20,38 +20,44 @@ -- - Events will be raised in every worker, see [lua-resty-worker-events](https://github.com/Kong/lua-resty-worker-events) -- for details. -- --- @copyright 2017-2020 Kong Inc. +-- @copyright 2017-2023 Kong Inc. -- @author Hisham Muhammad, Thijs Schreijer -- @license Apache 2.0 -local bit = require("bit") -local cjson = require("cjson.safe").new() -local resty_timer = require("resty.timer") -local ssl = require("ngx.ssl") -local worker_events = require("resty.worker.events") --- local resty_lock = require("resty.lock") -- required later in the file" - local ERR = ngx.ERR local WARN = ngx.WARN local DEBUG = ngx.DEBUG local ngx_log = ngx.log -local re_find = ngx.re.find -local ngx_worker_exiting = ngx.worker.exiting -local get_phase = ngx.get_phase - local tostring = tostring local ipairs = ipairs +local table_insert = table.insert +local table_remove = table.remove +local table_concat = table.concat +local string_format = string.format +local ssl = require("ngx.ssl") +local resty_timer = require "resty.timer" +local bit = require("bit") +local re_find = ngx.re.find +local ngx_now = ngx.now +local ngx_worker_id = ngx.worker.id +local ngx_worker_pid = ngx.worker.pid local pcall = pcall +local get_phase = ngx.get_phase local type = type local assert = assert -local table_remove = table.remove -local table_concat = table.concat -local string_format = string.format + +local RESTY_EVENTS_VER = [[^0\.1\.\d+$]] +local RESTY_WORKER_EVENTS_VER = "0.3.3" + local new_tab local nkeys local is_array +local codec + + +local TESTING = _G.__TESTING_HEALTHCHECKER or false do local ok @@ -86,8 +92,38 @@ do return true end end + + ok, codec = pcall(require, "string.buffer") + if not ok then + codec = require("cjson.safe").new() + end end + +local worker_events +--- This function loads the worker events module received as arg. It will throw +-- error() if it is not possible to load the module. +local function load_events_module(self) + if self.events_module == "resty.worker.events" then + worker_events = require("resty.worker.events") + assert(worker_events, "could not load lua-resty-worker-events") + assert(worker_events._VERSION == RESTY_WORKER_EVENTS_VER, + "unsupported lua-resty-worker-events version") + + elseif self.events_module == "resty.events" then + worker_events = require("resty.events.compat") + local version_match = ngx.re.match(worker_events._VERSION, RESTY_EVENTS_VER, "o") + assert(version_match, "unsupported lua-resty-events version") + + else + error("unknown events module") + end + + assert(worker_events.configured(), "please configure the '" .. + self.events_module .. "' module before using 'lua-resty-healthcheck'") +end + + -- constants local EVENT_SOURCE_PREFIX = "lua-resty-healthcheck" local LOG_PREFIX = "[healthcheck] " @@ -98,6 +134,17 @@ local EMPTY = setmetatable({},{ end }) +--- timer constants +-- evaluate active checks every 0.1s +local CHECK_INTERVAL = 0.1 +-- use a 10% jitter to start each worker timer +local CHECK_JITTER = CHECK_INTERVAL * 0.1 +-- lock valid period: the worker which acquires the lock owns it for 15 times +-- the check interval. If it does not update the shm during this period, we +-- consider that it is not able to continue checking (the worker probably was killed) +local LOCK_PERIOD = CHECK_INTERVAL * 15 +-- interval between stale targets cleanup +local CLEANUP_INTERVAL = CHECK_INTERVAL * 25 -- Counters: a 32-bit shm integer can hold up to four 8-bit counters. local CTR_SUCCESS = 0x00000001 @@ -177,57 +224,37 @@ end -- Some color for demo purposes local use_color = false local id = function(x) return x end -local worker_color = use_color and function(str) return ("\027["..tostring(31 + ngx.worker.pid() % 5).."m"..str.."\027[0m") end or id +local worker_color = use_color and function(str) return ("\027["..tostring(31 + ngx_worker_pid() % 5).."m"..str.."\027[0m") end or id -- Debug function local function dump(...) print(require("pl.pretty").write({...})) end -- luacheck: ignore 211 --- cache timers in "init", "init_worker" phases so we use only a single timer --- and do not run the risk of exhausting them for large sets --- see https://github.com/Kong/lua-resty-healthcheck/issues/40 --- Below we'll temporarily use a patched version of ngx.timer.at, until we're --- past the init and init_worker phases, after which we'll return to the regular --- ngx.timer.at implementation -local ngx_timer_at do - local callback_list = {} +local _M = {} - local function handler(premature) - if premature then - return - end +-- checker objects (weak) table +local hcs = setmetatable({}, { + __mode = "v", +}) - local list = callback_list - callback_list = {} +local active_check_timer +local last_cleanup_check - for _, args in ipairs(list) do - local ok, err = pcall(args[1], ngx_worker_exiting(), unpack(args, 2, args.n)) - if not ok then - ngx_log(ERR, "timer failure: ", err) - end - end - end +-- serialize a table to a string +local serialize = codec.encode - ngx_timer_at = function(...) - local phase = get_phase() - if phase ~= "init" and phase ~= "init_worker" then - -- we're past init/init_worker, so replace this temp function with the - -- real-deal again, so from here on we run regular timers. - ngx_timer_at = ngx.timer.at - return ngx.timer.at(...) - end - local n = #callback_list - callback_list[n+1] = { n = select("#", ...), ... } - if n == 0 then - -- first one, so schedule the actual timer - return ngx.timer.at(0, handler) - end - return true - end +-- deserialize a string to a table +local deserialize = codec.decode + +local function key_for(key_prefix, ip, port, hostname) + return string_format("%s:%s:%s%s", key_prefix, ip, port, hostname and ":" .. hostname or "") end +-- resty.lock timeout when yieldable +local LOCK_TIMEOUT = 5 + local run_locked do -- resty_lock is restricted to this scope in order to keep sensitive @@ -247,23 +274,31 @@ do timer = true, } - local function run_in_timer(premature, fn, ...) - if not premature then - fn(...) + local function run_in_timer(premature, self, key, fn, ...) + if premature then + return end - end - local function schedule(fn, ...) - return ngx_timer_at(0, run_in_timer, fn, ...) + local ok, err = run_locked(self, key, fn, ...) + if not ok then + self:log(ERR, "locked function for key '", key, "' failed in timer: ", err) + end end - -- timeout when yieldable - local timeout = 5 + local function schedule(self, key, fn, ...) + local ok, err = ngx.timer.at(0, run_in_timer, self, key, fn, ...) + if not ok then + return nil, "failed scheduling locked function for key '" .. key .. + "', " .. err + end + + return "scheduled" + end -- resty.lock consumes these options immediately, so this table can be reused local opts = { - exptime = 10, -- timeout after which lock is released anyway - timeout = timeout, -- max wait time to acquire lock + exptime = 10, -- timeout after which lock is released anyway + timeout = LOCK_TIMEOUT, -- max wait time to acquire lock } --- @@ -279,8 +314,7 @@ do -- attempt to sleep/yield -- 2. If acquiring the lock fails due to a timeout, `run_locked` -- (this function) is re-scheduled to run in a timer. In this case, - -- the function returns `"scheduled"` instead of the return value of - -- the locked function + -- the function returns `"scheduled"` -- -- @param self The checker object -- @param key the key/identifier to acquire a lock for @@ -302,7 +336,7 @@ do local yield = yieldable[get_phase()] if yield then - opts.timeout = timeout + opts.timeout = LOCK_TIMEOUT else -- if yielding is not possible in the current phase, use a zero timeout -- so that resty.lock will return `nil, "timeout"` immediately instead of @@ -321,12 +355,7 @@ do if not elapsed and err == "timeout" and not yield then -- yielding is not possible in the current phase, so retry in a timer - local ok, terr = schedule(run_locked, self, key, fn, ...) - if not ok then - return nil, terr - end - - return "scheduled" + return schedule(self, key, fn, ...) elseif not elapsed then return nil, "failed acquiring lock for '" .. key .. "', " .. err @@ -341,36 +370,12 @@ do end if not pok then - return nil, perr - else - return perr, res + return nil, "locked function threw an exception: " .. tostring(perr) end - end -end - - - -local _M = {} - --- TODO: improve serialization speed --- serialize a table to a string -local function serialize(t) - return cjson.encode(t) -end - - --- deserialize a string to a table -local function deserialize(s) - return cjson.decode(s) -end - - -local function key_for(key_prefix, ip, port, hostname) - return string_format("%s:%s:%s%s", key_prefix, ip, port, hostname and ":" .. hostname or "") + return perr, res + end end - - local checker = {} @@ -407,8 +412,8 @@ end --- Run the given function holding a lock on the target list. -- @param self The checker object -- @param fn The function to execute --- @return The results of the function; "scheduled" if the function was --- scheduled in a timer, or nil and an error message in case of failure +-- @return The results of the function; or nil and an error message +-- in case it fails locking. local function locking_target_list(self, fn) local ok, err = run_locked(self, self.TARGET_LIST_LOCK, with_target_list, self, fn) @@ -429,8 +434,6 @@ end --- Add a target to the healthchecker. -- When the ip + port + hostname combination already exists, it will simply -- return success (without updating `is_healthy` status). --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target to check. -- @param port the port to check against. -- @param hostname (optional) hostname to set as the host header in the HTTP @@ -443,6 +446,7 @@ end function checker:add_target(ip, port, hostname, is_healthy, hostheader) ip = tostring(assert(ip, "no ip address provided")) port = assert(tonumber(port), "no port number provided") + hostname = hostname or ip if is_healthy == nil then is_healthy = true end @@ -450,13 +454,21 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) local internal_health = is_healthy and "healthy" or "unhealthy" local ok, err = locking_target_list(self, function(target_list) + local found = false -- check whether we already have this target for _, target in ipairs(target_list) do - if target.ip == ip and target.port == port and target.hostname == hostname then - self:log(DEBUG, "adding an existing target: ", hostname or "", " ", ip, - ":", port, " (ignoring)") - return false + if target.ip == ip and target.port == port and target.hostname == (hostname) then + if target.purge_time == nil then + self:log(DEBUG, "adding an existing target: ", hostname or "", " ", ip, + ":", port, " (ignoring)") + return false + end + target.purge_time = nil + found = true + internal_health = self:get_target_status(ip, port, hostname) and + "healthy" or "unhealthy" + break end end @@ -470,12 +482,14 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) end -- target does not exist, go add it - target_list[#target_list + 1] = { - ip = ip, - port = port, - hostname = hostname, - hostheader = hostheader, - } + if not found then + target_list[#target_list + 1] = { + ip = ip, + port = port, + hostname = hostname, + hostheader = hostheader, + } + end target_list = serialize(target_list) ok, err = self.shm:set(self.TARGET_LIST, target_list) @@ -484,7 +498,9 @@ function checker:add_target(ip, port, hostname, is_healthy, hostheader) end -- raise event for our newly added target - self:raise_event(self.events[internal_health], ip, port, hostname) + if not found then + self:raise_event(self.events[internal_health], ip, port, hostname) + end return true end) @@ -518,8 +534,6 @@ end --- Remove a target from the healthchecker. -- The target not existing is not considered an error. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -566,8 +580,6 @@ end --- Clear all healthcheck data. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @return `true` on success, or `nil + error` on failure. function checker:clear() @@ -599,6 +611,32 @@ function checker:clear() end +--- Clear all healthcheck data after a period of time. +-- Useful for keeping target status between configuration reloads. +-- @param delay delay in seconds before purging target state. +-- @return `true` on success, or `nil + error` on failure. +function checker:delayed_clear(delay) + assert(tonumber(delay), "no delay provided") + + return locking_target_list(self, function(target_list) + local purge_time = ngx_now() + delay + + -- add purge time to all targets + for _, target in ipairs(target_list) do + target.purge_time = purge_time + end + + target_list = serialize(target_list) + local ok, err = self.shm:set(self.TARGET_LIST, target_list) + if not ok then + return nil, "failed to store target_list in shm: " .. err + end + + return true + end) +end + + --- Get the current status of the target. -- @param ip IP address of the target being checked. -- @param port the port being checked against. @@ -629,7 +667,7 @@ end -- @param port Target port -- @param hostname Target hostname -- @param fn The function to execute --- @return The results of the function; or "scheduled" in case it fails locking and +-- @return The results of the function; or true in case it fails locking and -- will retry asynchronously; or nil+err in case it fails to retry. local function locking_target(self, ip, port, hostname, fn) local key = key_for(self.TARGET_LOCK, ip, port, hostname) @@ -656,8 +694,6 @@ end -- Increment the healthy or unhealthy counter. If the threshold of occurrences -- is reached, it changes the status of the target in the shm and posts an -- event. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param self The checker object -- @param health_report "healthy" for the success counter that drives a target -- towards the healthy state; "unhealthy" for the failure counter. @@ -743,8 +779,6 @@ end -- If `unhealthy.tcp_failures` (for TCP failures) or `unhealthy.http_failures` -- is set to zero in the configuration, this function is a no-op -- and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -772,8 +806,6 @@ end -- required to make a target "rise". -- If `healthy.successes` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -795,8 +827,6 @@ end -- or `unhealthy.http_failures` (fur unhealthy HTTP status codes) -- is set to zero in the configuration, this function is a no-op -- and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -830,8 +860,6 @@ end --- Report a failure on TCP level. -- If `unhealthy.tcp_failures` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname hostname of the target being checked. @@ -855,8 +883,6 @@ end --- Report a timeout failure. -- If `unhealthy.timeouts` is set to zero in the configuration, -- this function is a no-op and returns `true`. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param ip IP address of the target being checked. -- @param port the port being checked against. -- @param hostname (optional) hostname of the target being checked. @@ -872,8 +898,6 @@ end --- Sets the current status of all targets with the given hostname and port. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. -- @param hostname hostname being checked. -- @param port the port being checked against -- @param is_healthy boolean: `true` for healthy, `false` for unhealthy @@ -900,9 +924,7 @@ end --- Sets the current status of the target. --- This will set the status and clear its counters. --- --- *NOTE*: in non-yieldable contexts, this will be executed async. +-- This will immediately set the status and clear its counters. -- @param ip IP address of the target being checked -- @param port the port being checked against -- @param hostname (optional) hostname of the target being checked. @@ -1053,7 +1075,7 @@ function checker:run_single_check(ip, port, hostname, hostheader) local bytes bytes, err = sock:send(request) if not bytes then - self:log(ERR, "failed to send http request to '", hostname or "", " (", ip, ":", port, ")': ", err) + self:log(ERR, "failed to send http request to '", hostname, " (", ip, ":", port, ")': ", err) if err == "timeout" then sock:close() -- timeout errors do not close the socket. return self:report_timeout(ip, port, hostname, "active") @@ -1064,7 +1086,7 @@ function checker:run_single_check(ip, port, hostname, hostheader) local status_line status_line, err = sock:receive() if not status_line then - self:log(ERR, "failed to receive status line from '", hostname or "", " (",ip, ":", port, ")': ", err) + self:log(ERR, "failed to receive status line from '", hostname, " (",ip, ":", port, ")': ", err) if err == "timeout" then sock:close() -- timeout errors do not close the socket. return self:report_timeout(ip, port, hostname, "active") @@ -1079,13 +1101,13 @@ function checker:run_single_check(ip, port, hostname, hostheader) if from then status = tonumber(status_line:sub(from, to)) else - self:log(ERR, "bad status line from '", hostname or "", " (", ip, ":", port, ")': ", status_line) + self:log(ERR, "bad status line from '", hostname, " (", ip, ":", port, ")': ", status_line) -- note: 'status' will be reported as 'nil' end sock:close() - self:log(DEBUG, "Reporting '", hostname or "", " (", ip, ":", port, ")' (got HTTP ", status, ")") + self:log(DEBUG, "Reporting '", hostname, " (", ip, ":", port, ")' (got HTTP ", status, ")") return self:report_http_status(ip, port, hostname, status, "active") end @@ -1093,11 +1115,7 @@ end -- executes a work package (a list of checks) sequentially function checker:run_work_package(work_package) for _, work_item in ipairs(work_package) do - if ngx_worker_exiting() then - self:log(DEBUG, "worker exting, skip check") - break - end - self:log(DEBUG, "Checking ", work_item.hostname or "", " ", + self:log(DEBUG, "Checking ", work_item.hostname, " ", work_item.hostheader and "(host header: ".. work_item.hostheader .. ")" or "", work_item.ip, ":", work_item.port, " (currently ", work_item.debug_health, ")") @@ -1146,12 +1164,51 @@ end -- results of the checks. +-- @return `true` on success, or false if the lock was not acquired, or `nil + error` +-- in case of errors +local function get_periodic_lock(shm, key) + local my_pid = ngx_worker_pid() + local checker_pid = shm:get(key) + + if checker_pid == nil then + -- no worker is checking, try to acquire the lock + local ok, err = shm:add(key, my_pid, LOCK_PERIOD) + if not ok then + if err == "exists" then + -- another worker got the lock before + return false + end + ngx_log(ERR, "failed to add key '", key, "': ", err) + return nil, err + end + elseif checker_pid ~= my_pid then + -- another worker is checking + return false + end + + return true +end + + +-- touch the shm to refresh the valid period +local function renew_periodic_lock(shm, key) + local my_pid = ngx_worker_pid() + + local _, err = shm:set(key, my_pid, LOCK_PERIOD) + if err then + ngx_log(ERR, "failed to update key '", key, "': ", err) + end +end + + --- Active health check callback function. -- @param self the checker object this timer runs on -- @param health_mode either "healthy" or "unhealthy" to indicate what check local function checker_callback(self, health_mode) + if self.checker_callback_count then + self.checker_callback_count = self.checker_callback_count + 1 + end - -- create a list of targets to check, here we can still do this atomically local list_to_check = {} local targets, err = fetch_target_list(self) if not targets then @@ -1180,8 +1237,19 @@ local function checker_callback(self, health_mode) if not list_to_check[1] then self:log(DEBUG, "checking ", health_mode, " targets: nothing to do") else - self:log(DEBUG, "checking ", health_mode, " targets: #", #list_to_check) - self:active_check_targets(list_to_check) + local timer = resty_timer({ + interval = 0, + recurring = false, + immediate = false, + detached = true, + expire = function() + self:log(DEBUG, "checking ", health_mode, " targets: #", #list_to_check) + self:active_check_targets(list_to_check) + end, + }) + if timer == nil then + self:log(ERR, "failed to create timer to check ", health_mode) + end end end @@ -1193,7 +1261,7 @@ function checker:event_handler(event_name, ip, port, hostname) if event_name == self.events.remove then if target_found then -- remove hash part - self.targets[target_found.ip][target_found.port][target_found.hostname or target_found.ip] = nil + self.targets[target_found.ip][target_found.port][target_found.hostname] = nil if not next(self.targets[target_found.ip][target_found.port]) then -- no more hostnames on this port, so delete it self.targets[target_found.ip][target_found.port] = nil @@ -1211,7 +1279,7 @@ function checker:event_handler(event_name, ip, port, hostname) end end self:log(DEBUG, "event: target '", hostname or "", " (", ip, ":", port, - "' removed") + ")' removed") else self:log(WARN, "event: trying to remove an unknown target '", @@ -1225,25 +1293,19 @@ function checker:event_handler(event_name, ip, port, hostname) then if not target_found then -- it is a new target, must add it first - target_found = { ip = ip, port = port, hostname = hostname } - self.targets[ip] = self.targets[ip] or {} - self.targets[ip][port] = self.targets[ip][port] or {} - self.targets[ip][port][hostname or ip] = target_found + target_found = { ip = ip, port = port, hostname = hostname or ip } + self.targets[target_found.ip] = self.targets[target_found.ip] or {} + self.targets[target_found.ip][target_found.port] = self.targets[target_found.ip][target_found.port] or {} + self.targets[target_found.ip][target_found.port][target_found.hostname] = target_found self.targets[#self.targets + 1] = target_found self:log(DEBUG, "event: target added '", hostname or "", "(", ip, ":", port, ")'") end do - local from_status = target_found.internal_health - local to_status = event_name - local from = from_status == "healthy" or from_status == "mostly_healthy" - local to = to_status == "healthy" or to_status == "mostly_healthy" - - if from ~= to then - self.status_ver = self.status_ver + 1 - end - - self:log(DEBUG, "event: target status '", hostname or "", "(", ip, ":", - port, ")' from '", from, "' to '", to, "', ver: ", self.status_ver) + local from = target_found.internal_health + local to = event_name + self:log(DEBUG, "event: target status '", hostname or "", "(", ip, ":", port, + ")' from '", from == "healthy" or from == "mostly_healthy", + "' to '", to == "healthy" or to == "mostly_healthy", "'") end target_found.internal_health = event_name @@ -1266,17 +1328,14 @@ end -- Log a message specific to this checker -- @param level standard ngx log level constant function checker:log(level, ...) - return ngx_log(level, self.LOG_PREFIX, ...) + ngx_log(level, worker_color(self.LOG_PREFIX), ...) end -- Raises an event for a target status change. function checker:raise_event(event_name, ip, port, hostname) local target = { ip = ip, port = port, hostname = hostname } - local ok, err = worker_events.post(self.EVENT_SOURCE, event_name, target) - if not ok then - self:log(ERR, "failed to post event '", event_name, "' with: ", err) - end + worker_events.post(self.EVENT_SOURCE, event_name, target) end @@ -1285,15 +1344,10 @@ end -- after the current timers have expired they will be marked as stopped. -- @return `true` function checker:stop() - if self.active_healthy_timer then - self.active_healthy_timer:cancel() - self.active_healthy_timer = nil - end - if self.active_unhealthy_timer then - self.active_unhealthy_timer:cancel() - self.active_unhealthy_timer = nil - end - self:log(DEBUG, "timers stopped") + self.checks.active.healthy.active = false + self.checks.active.unhealthy.active = false + worker_events.unregister(self.ev_callback, self.EVENT_SOURCE) + self:log(DEBUG, "healthchecker stopped") return true end @@ -1301,34 +1355,21 @@ end --- Start the background health checks. -- @return `true`, or `nil + error`. function checker:start() - if self.active_healthy_timer or self.active_unhealthy_timer then - return nil, "cannot start, timers are still running" + if self.checks.active.healthy.interval > 0 then + self.checks.active.healthy.active = true + -- the first active check happens only after `interval` + self.checks.active.healthy.last_run = ngx_now() end - for _, health_mode in ipairs({ "healthy", "unhealthy" }) do - if self.checks.active[health_mode].interval > 0 then - local timer, err = resty_timer({ - interval = self.checks.active[health_mode].interval, - recurring = true, - immediate = true, - detached = false, - expire = checker_callback, - cancel = nil, - shm_name = self.shm_name, - key_name = self.PERIODIC_LOCK .. health_mode, - sub_interval = math.min(self.checks.active[health_mode].interval, 0.5), - }, self, health_mode) - if not timer then - return nil, "failed to create '" .. health_mode .. "' timer: " .. err - end - self["active_" .. health_mode .. "_timer"] = timer - end + if self.checks.active.unhealthy.interval > 0 then + self.checks.active.unhealthy.active = true + self.checks.active.unhealthy.last_run = ngx_now() end worker_events.unregister(self.ev_callback, self.EVENT_SOURCE) -- ensure we never double subscribe worker_events.register_weak(self.ev_callback, self.EVENT_SOURCE) - self:log(DEBUG, "timers started") + self:log(DEBUG, "active check flagged as active") return true end @@ -1384,12 +1425,13 @@ local function fill_in_settings(opts, defaults, ctx) return obj end + local function get_defaults() return { name = NO_DEFAULT, shm_name = NO_DEFAULT, type = NO_DEFAULT, - status_ver = 0, + events_module = "resty.worker.events", checks = { active = { type = "http", @@ -1459,9 +1501,6 @@ end -- -- *NOTE*: the returned `checker` object must be anchored, if not it will be -- removed by Lua's garbage collector and the healthchecks will cease to run. --- --- *NOTE*: in non-yieldable contexts, the initial loading of the target --- statusses will be executed async. -- @param opts table with checker options. Options are: -- -- * `name`: name of the health checker @@ -1498,13 +1537,24 @@ end -- @return checker object, or `nil + error` function _M.new(opts) - assert(worker_events.configured(), "please configure the " .. - "'lua-resty-worker-events' module before using 'lua-resty-healthcheck'") + opts = opts or {} + local active_type = (((opts or EMPTY).checks or EMPTY).active or EMPTY).type + local passive_type = (((opts or EMPTY).checks or EMPTY).passive or EMPTY).type - -- create a new defaults table within new() as defaults table will be modified by to_set function later + -- create a new defaults table within new() as defaults table will be modified by to_set function later local defaults = get_defaults() local self = fill_in_settings(opts, defaults) + load_events_module(self) + + -- If using deprecated self.type, that takes precedence over + -- a default value. TODO: remove this in a future version + if self.type then + self.checks.active.type = active_type or self.type + self.checks.passive.type = passive_type or self.type + check_valid_type("type", self.type) + end + assert(self.checks.active.healthy.successes < 255, "checks.active.healthy.successes must be at most 254") assert(self.checks.active.unhealthy.tcp_failures < 255, "checks.active.unhealthy.tcp_failures must be at most 254") assert(self.checks.active.unhealthy.http_failures < 255, "checks.active.unhealthy.http_failures must be at most 254") @@ -1514,24 +1564,9 @@ function _M.new(opts) assert(self.checks.passive.unhealthy.http_failures < 255, "checks.passive.unhealthy.http_failures must be at most 254") assert(self.checks.passive.unhealthy.timeouts < 255, "checks.passive.unhealthy.timeouts must be at most 254") - -- since counter types are independent (tcp failure does not also increment http failure) - -- a TCP threshold of 0 is not allowed for enabled http checks. - -- It would make tcp failures go unnoticed because the http failure counter is not - -- incremented and a tcp threshold of 0 means disabled, and hence it would never trip. - -- See https://github.com/Kong/lua-resty-healthcheck/issues/30 - if self.checks.passive.type == "http" or self.checks.passive.type == "https" then - if self.checks.passive.unhealthy.http_failures > 0 then - assert(self.checks.passive.unhealthy.tcp_failures > 0, "self.checks.passive.unhealthy.tcp_failures must be >0 for http(s) checks with http_failures >0") - end - end - if self.checks.active.type == "http" or self.checks.active.type == "https" then - if self.checks.active.unhealthy.http_failures > 0 then - assert(self.checks.active.unhealthy.tcp_failures > 0, "self.checks.active.unhealthy.tcp_failures must be > 0 for http(s) checks with http_failures >0") - end - end - if opts.test then self.test_get_counter = test_get_counter + self.checker_callback_count = 0 end assert(self.name, "required option 'name' is missing") @@ -1560,7 +1595,7 @@ function _M.new(opts) end -- other properties - self.targets = {} -- list of targets, initially loaded, maintained by events + self.targets = nil -- list of targets, initially loaded, maintained by events self.events = nil -- hash table with supported events (prevent magic strings) self.ev_callback = nil -- callback closure per checker instance @@ -1582,10 +1617,10 @@ function _M.new(opts) self.TARGET_LIST = SHM_PREFIX .. self.name .. ":target_list" self.TARGET_LIST_LOCK = SHM_PREFIX .. self.name .. ":target_list_lock" self.TARGET_LOCK = SHM_PREFIX .. self.name .. ":target_lock" - self.PERIODIC_LOCK = SHM_PREFIX .. self.name .. ":period_lock:" + self.PERIODIC_LOCK = SHM_PREFIX .. ":period_lock:" -- prepare constants self.EVENT_SOURCE = EVENT_SOURCE_PREFIX .. " [" .. self.name .. "]" - self.LOG_PREFIX = worker_color(LOG_PREFIX .. "(" .. self.name .. ") ") + self.LOG_PREFIX = LOG_PREFIX .. "(" .. self.name .. ") " -- register for events, and directly after load initial target list -- order is important! @@ -1605,7 +1640,7 @@ function _M.new(opts) -- fill-in the hash part for easy lookup self.targets[target.ip] = self.targets[target.ip] or {} self.targets[target.ip][target.port] = self.targets[target.ip][target.port] or {} - self.targets[target.ip][target.port][target.hostname or target.ip] = target + self.targets[target.ip][target.port][target.hostname] = target end return true @@ -1620,23 +1655,113 @@ function _M.new(opts) -- just a wrapper to be able to access `self` as a closure return self:event_handler(event, data.ip, data.port, data.hostname) end - worker_events.register_weak(self.ev_callback, self.EVENT_SOURCE) -- handle events to sync up in case there was a change by another worker - worker_events.poll() + worker_events:poll() end - -- start timers + -- turn on active health check local ok, err = self:start() if not ok then self:stop() return nil, err end + -- if active checker is not running, start it + if active_check_timer == nil then + + self:log(DEBUG, "worker ", ngx_worker_id(), " (pid: ", ngx_worker_pid(), ") ", + "starting active check timer") + local shm, key = self.shm, self.PERIODIC_LOCK + last_cleanup_check = ngx_now() + active_check_timer, err = resty_timer({ + recurring = true, + interval = CHECK_INTERVAL, + jitter = CHECK_JITTER, + detached = false, + expire = function() + + if get_periodic_lock(shm, key) then + active_check_timer.interval = CHECK_INTERVAL + renew_periodic_lock(shm, key) + else + active_check_timer.interval = CHECK_INTERVAL * 10 + return + end + + local cur_time = ngx_now() + for _, checker_obj in pairs(hcs) do + + if (last_cleanup_check + CLEANUP_INTERVAL) < cur_time then + -- clear targets marked for delayed removal + locking_target_list(checker_obj, function(target_list) + local removed_targets = {} + local index = 1 + while index <= #target_list do + local target = target_list[index] + if target.purge_time and target.purge_time <= cur_time then + table_insert(removed_targets, target) + table_remove(target_list, index) + else + index = index + 1 + end + end + + if #removed_targets > 0 then + target_list = serialize(target_list) + + local ok, err = shm:set(checker_obj.TARGET_LIST, target_list) + if not ok then + return nil, "failed to store target_list in shm: " .. err + end + + for _, target in ipairs(removed_targets) do + clear_target_data_from_shm(checker_obj, target.ip, target.port, target.hostname) + checker_obj:raise_event(checker_obj.events.remove, target.ip, target.port, target.hostname) + end + end + end) + + last_cleanup_check = cur_time + end + + if checker_obj.checks.active.healthy.active and + (checker_obj.checks.active.healthy.last_run + + checker_obj.checks.active.healthy.interval <= cur_time) + then + checker_obj.checks.active.healthy.last_run = cur_time + checker_callback(checker_obj, "healthy") + end + + if checker_obj.checks.active.unhealthy.active and + (checker_obj.checks.active.unhealthy.last_run + + checker_obj.checks.active.unhealthy.interval <= cur_time) + then + checker_obj.checks.active.unhealthy.last_run = cur_time + checker_callback(checker_obj, "unhealthy") + end + end + end, + }) + if not active_check_timer then + self:log(ERR, "Could not start active check timer: ", err) + end + end + + table.insert(hcs, self) + -- TODO: push entire config in debug level logs self:log(DEBUG, "Healthchecker started!") return self end +if TESTING then + checker._run_locked = run_locked + checker._set_lock_timeout = function(t) + LOCK_TIMEOUT = t + end +end + + return _M diff --git a/lua-resty-healthcheck-2.0.0-1.src.rock b/lua-resty-healthcheck-2.0.0-1.src.rock deleted file mode 100644 index 56053445..00000000 Binary files a/lua-resty-healthcheck-2.0.0-1.src.rock and /dev/null differ diff --git a/lua-resty-healthcheck-scm-1.rockspec b/lua-resty-healthcheck-scm-1.rockspec index 610a44b3..309958b2 100644 --- a/lua-resty-healthcheck-scm-1.rockspec +++ b/lua-resty-healthcheck-scm-1.rockspec @@ -1,8 +1,7 @@ package = "lua-resty-healthcheck" version = "scm-1" source = { - url = "git://github.com/kong/lua-resty-healthcheck", - branch = "master", + url = "git://github.com/Kong/lua-resty-healthcheck", } description = { summary = "Healthchecks for OpenResty to check upstream service status", @@ -15,8 +14,7 @@ description = { homepage = "https://github.com/Kong/lua-resty-healthcheck" } dependencies = { - "lua-resty-worker-events ~> 2", - "penlight ~> 1.7", + "penlight >= 1.9.2", "lua-resty-timer ~> 1", } build = { diff --git a/rockspecs/lua-resty-healthcheck-1.4.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.4.0-1.rockspec new file mode 100644 index 00000000..811af0be --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.4.0-1.rockspec @@ -0,0 +1,27 @@ +package = "lua-resty-healthcheck" +version = "1.4.0-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.4.0.tar.gz", + dir = "lua-resty-healthcheck-1.4.0" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + homepage = "https://github.com/Kong/lua-resty-healthcheck", + license = "Apache 2.0" +} +dependencies = { + "lua-resty-worker-events == 1.0.0", + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.4.1-1.rockspec b/rockspecs/lua-resty-healthcheck-1.4.1-1.rockspec new file mode 100644 index 00000000..f1b1d349 --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.4.1-1.rockspec @@ -0,0 +1,27 @@ +package = "lua-resty-healthcheck" +version = "1.4.1-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.4.1.tar.gz", + dir = "lua-resty-healthcheck-1.4.1" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + homepage = "https://github.com/Kong/lua-resty-healthcheck", + license = "Apache 2.0" +} +dependencies = { + "lua-resty-worker-events == 1.0.0", + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.4.2-1.rockspec b/rockspecs/lua-resty-healthcheck-1.4.2-1.rockspec new file mode 100644 index 00000000..b700ee4d --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.4.2-1.rockspec @@ -0,0 +1,27 @@ +package = "lua-resty-healthcheck" +version = "1.4.2-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.4.2.tar.gz", + dir = "lua-resty-healthcheck-1.4.2" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + homepage = "https://github.com/Kong/lua-resty-healthcheck", + license = "Apache 2.0" +} +dependencies = { + "lua-resty-worker-events == 1.0.0", + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.5.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.5.0-1.rockspec new file mode 100644 index 00000000..e34c92f0 --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.5.0-1.rockspec @@ -0,0 +1,27 @@ +package = "lua-resty-healthcheck" +version = "1.5.0-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.5.0.tar.gz", + dir = "lua-resty-healthcheck-1.5.0" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "lua-resty-worker-events == 1.0.0", + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.5.1-1.rockspec b/rockspecs/lua-resty-healthcheck-1.5.1-1.rockspec new file mode 100644 index 00000000..8e2f129f --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.5.1-1.rockspec @@ -0,0 +1,27 @@ +package = "lua-resty-healthcheck" +version = "1.5.1-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.5.1.tar.gz", + dir = "lua-resty-healthcheck-1.5.1" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "lua-resty-worker-events == 1.0.0", + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.6.0-1.rockspec b/rockspecs/lua-resty-healthcheck-1.6.0-1.rockspec new file mode 100644 index 00000000..de59e27a --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.6.0-1.rockspec @@ -0,0 +1,26 @@ +package = "lua-resty-healthcheck" +version = "1.6.0-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.6.0.tar.gz", + dir = "lua-resty-healthcheck-1.6.0" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.6.1-1.rockspec b/rockspecs/lua-resty-healthcheck-1.6.1-1.rockspec new file mode 100644 index 00000000..eeebdde0 --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.6.1-1.rockspec @@ -0,0 +1,26 @@ +package = "lua-resty-healthcheck" +version = "1.6.1-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.6.1.tar.gz", + dir = "lua-resty-healthcheck-1.6.1" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.6.2-1.rockspec b/rockspecs/lua-resty-healthcheck-1.6.2-1.rockspec new file mode 100644 index 00000000..afadf2a5 --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.6.2-1.rockspec @@ -0,0 +1,26 @@ +package = "lua-resty-healthcheck" +version = "1.6.2-1" +source = { + url = "https://github.com/Kong/lua-resty-healthcheck/archive/1.6.2.tar.gz", + dir = "lua-resty-healthcheck-1.6.2" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-1.6.3-1.rockspec b/rockspecs/lua-resty-healthcheck-1.6.3-1.rockspec new file mode 100644 index 00000000..a900062a --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-1.6.3-1.rockspec @@ -0,0 +1,26 @@ +package = "lua-resty-healthcheck" +version = "1.6.3-1" +source = { + url = "git+https://github.com/Kong/lua-resty-healthcheck.git", + tag = "1.6.3" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/rockspecs/lua-resty-healthcheck-3.0.0-1.rockspec b/rockspecs/lua-resty-healthcheck-3.0.0-1.rockspec new file mode 100644 index 00000000..2eae7acf --- /dev/null +++ b/rockspecs/lua-resty-healthcheck-3.0.0-1.rockspec @@ -0,0 +1,26 @@ +package = "lua-resty-healthcheck" +version = "3.0.0-1" +source = { + url = "git+https://github.com/Kong/lua-resty-healthcheck.git", + tag = "3.0.0" +} +description = { + summary = "Healthchecks for OpenResty to check upstream service status", + detailed = [[ + lua-resty-healthcheck is a module that can check upstream service + availability by sending requests and validating responses at timed + intervals. + ]], + license = "Apache 2.0", + homepage = "https://github.com/Kong/lua-resty-healthcheck" +} +dependencies = { + "penlight >= 1.9.2", + "lua-resty-timer ~> 1", +} +build = { + type = "builtin", + modules = { + ["resty.healthcheck"] = "lib/resty/healthcheck.lua", + } +} diff --git a/t/19-status-ver.t b/t/19-status-ver.t deleted file mode 100644 index d6e9da39..00000000 --- a/t/19-status-ver.t +++ /dev/null @@ -1,65 +0,0 @@ -use Test::Nginx::Socket::Lua 'no_plan'; -use Cwd qw(cwd); - -workers(1); -master_on(); - -my $pwd = cwd(); - -our $HttpConfig = qq{ - lua_package_path "$pwd/lib/?.lua;;"; - lua_shared_dict test_shm 8m; - lua_shared_dict my_worker_events 8m; - - init_worker_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - ngx.timer.at(0, function() - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - healthy = { - interval = 0.1 - }, - unhealthy = { - interval = 0.1 - } - } - } - }) - ngx.sleep(0) - we.poll() - local ok, err = checker:add_target("127.0.0.1", 11111) - if not ok then - error(err) - end - ngx.sleep(0) - we.poll() - end) - } -}; - -run_tests(); - -__DATA__ - -=== TEST 1: add_target() adds an unhealthy target ---- http_config eval: $::HttpConfig ---- config - location = /t { - content_by_lua_block { - ngx.say(true) - ngx.sleep(0.3) -- wait twice the interval - } - } ---- request -GET /t ---- response_body -true ---- error_log -checking unhealthy targets: nothing to do -checking unhealthy targets: #1 -from 'true' to 'false', ver: 2 diff --git a/t/with_resty-events/00-new.t b/t/with_resty-events/00-new.t new file mode 100644 index 00000000..03bc1f97 --- /dev/null +++ b/t/with_resty-events/00-new.t @@ -0,0 +1,229 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) - 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: new() requires worker_events to be configured +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + events_module = "resty.events", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +please configure + +=== TEST 2: new() requires 'name' +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + events_module = "resty.events", + shm_name = "test_shm", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +required option 'name' is missing + +=== TEST 3: new() fails with invalid shm +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "invalid_shm", + events_module = "resty.events", + }) + ngx.log(ngx.ERR, err) + } + } +--- request +GET /t +--- response_body + +--- error_log +no shm found by name + +=== TEST 4: new() initializes with default config +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + }) + } + } +--- request +GET /t +--- response_body + +--- error_log +Healthchecker started! + +=== TEST 5: new() only accepts http or tcp types +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + }) + ngx.say(ok) + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + }) + ngx.say(ok) + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "get lost", + }) + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +true +false + +=== TEST 6: new() deals with bad inputs +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + + -- tests for failure + local tests = { + { active = { timeout = -1 }}, + { active = { timeout = 1e+42 }}, + { active = { concurrency = -1 }}, + { active = { concurrency = 1e42 }}, + { active = { healthy = { interval = -1 }}}, + { active = { healthy = { interval = 1e42 }}}, + { active = { healthy = { successes = -1 }}}, + { active = { healthy = { successes = 1e42 }}}, + { active = { unhealthy = { interval = -1 }}}, + { active = { unhealthy = { interval = 1e42 }}}, + { active = { unhealthy = { tcp_failures = -1 }}}, + { active = { unhealthy = { tcp_failures = 1e42 }}}, + { active = { unhealthy = { timeouts = -1 }}}, + { active = { unhealthy = { timeouts = 1e42 }}}, + { active = { unhealthy = { http_failures = -1 }}}, + { active = { unhealthy = { http_failures = 1e42 }}}, + { passive = { healthy = { successes = -1 }}}, + { passive = { healthy = { successes = 1e42 }}}, + { passive = { unhealthy = { tcp_failures = -1 }}}, + { passive = { unhealthy = { tcp_failures = 1e42 }}}, + { passive = { unhealthy = { timeouts = -1 }}}, + { passive = { unhealthy = { timeouts = 1e42 }}}, + { passive = { unhealthy = { http_failures = -1 }}}, + { passive = { unhealthy = { http_failures = 1e42 }}}, + } + for _, test in ipairs(tests) do + local ok, err = pcall(healthcheck.new, { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = test, + }) + ngx.say(ok) + end + } + } +--- request +GET /t +--- response_body +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false +false diff --git a/t/with_resty-events/01-start-stop.t b/t/with_resty-events/01-start-stop.t new file mode 100644 index 00000000..152355f7 --- /dev/null +++ b/t/with_resty-events/01-start-stop.t @@ -0,0 +1,182 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) + 1; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: start() can start after stop() +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:start() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +[error] + + +=== TEST 3: start() is a no-op if active intervals are 0 +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0 + }, + unhealthy = { + interval = 0 + } + } + } + }) + local ok, err = checker:start() + ngx.say(ok) + local ok, err = checker:start() + ngx.say(ok) + local ok, err = checker:start() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +true +true +--- no_error_log +[error] + +=== TEST 4: stop() stops health checks +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.say(ok) + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +[error] +checking + +=== TEST 5: start() restarts health checks +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + local ok, err = checker:stop() + ngx.say(ok) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:start() + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +checking diff --git a/t/with_resty-events/02-add_target.t b/t/with_resty-events/02-add_target.t new file mode 100644 index 00000000..0815f613 --- /dev/null +++ b/t/with_resty-events/02-add_target.t @@ -0,0 +1,183 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 4) + 3; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: add_target() adds an unhealthy target +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 11111, nil, false) + ngx.say(ok) + ngx.sleep(0.5) + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking unhealthy targets: #1 + +--- no_error_log +checking healthy targets: #1 + + + +=== TEST 2: add_target() adds a healthy target +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +checking healthy targets: #1 + +--- no_error_log +checking unhealthy targets: #1 + + + +=== TEST 3: calling add_target() repeatedly does not change status +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2113; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +checking healthy targets: #1 + +--- no_error_log +checking unhealthy targets: #1 diff --git a/t/with_resty-events/03-get_target_status.t b/t/with_resty-events/03-get_target_status.t new file mode 100644 index 00000000..f85e7d3a --- /dev/null +++ b/t/with_resty-events/03-get_target_status.t @@ -0,0 +1,106 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 4); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: get_target_status() reports proper status +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2115; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2115, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true + + checker:report_tcp_failure("127.0.0.1", 2115) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- false + + checker:report_success("127.0.0.1", 2115) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true +--- no_error_log +checking healthy targets: #1 +checking unhealthy targets: #1 diff --git a/t/with_resty-events/04-report_success.t b/t/with_resty-events/04-report_success.t new file mode 100644 index 00000000..06952980 --- /dev/null +++ b/t/with_resty-events/04-report_success.t @@ -0,0 +1,316 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 28; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_success() recovers HTTP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2118)' from 'false' to 'true' + + +=== TEST 2: report_success() recovers TCP active = passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' +healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2118)' from 'false' to 'true' + +=== TEST 3: report_success() is a nop when active.healthy.sucesses == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2116; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2116, nil, "active") + checker:report_success("127.0.0.1", 2116, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2116)' from 'false' to 'true' + + + +=== TEST 4: report_success() is a nop when passive.healthy.sucesses == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2118; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 0, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 0, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) + ngx.sleep(0.01) + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2118, nil, "passive") + checker:report_success("127.0.0.1", 2118, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '(127.0.0.1:2118)' from 'false' to 'true' diff --git a/t/with_resty-events/05-report_failure.t b/t/with_resty-events/05-report_failure.t new file mode 100644 index 00000000..229c20a0 --- /dev/null +++ b/t/with_resty-events/05-report_failure.t @@ -0,0 +1,261 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 26; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_failure() fails HTTP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy HTTP increment (1/3) for '(127.0.0.1:2117)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2117)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_failure() fails TCP active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy TCP increment (1/2) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 3: report_failure() is a nop when failure counters == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 0, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 0, + http_failures = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + checker:report_failure("127.0.0.1", 2117, nil, "active") + checker:report_failure("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- no_error_log +unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' +event: target status '(127.0.0.1:2117)' from 'true' to 'false' +unhealthy TCP increment (1/2) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' diff --git a/t/with_resty-events/06-report_http_status.t b/t/with_resty-events/06-report_http_status.t new file mode 100644 index 00000000..6686682b --- /dev/null +++ b/t/with_resty-events/06-report_http_status.t @@ -0,0 +1,499 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 41; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_http_status() failures active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy HTTP increment (1/3) for '(127.0.0.1:2119)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2119)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2119)' +event: target status '(127.0.0.1:2119)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (2/3) for '(127.0.0.1:2113)' +unhealthy HTTP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + + +=== TEST 2: report_http_status() successes active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- true + } + } +--- request +GET /t +--- response_body +true +true +--- error_log +healthy SUCCESS increment (1/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (2/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (3/4) for '(127.0.0.1:2119)' +healthy SUCCESS increment (4/4) for '(127.0.0.1:2119)' +event: target status '(127.0.0.1:2119)' from 'false' to 'true' +healthy SUCCESS increment (1/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (2/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (3/4) for '(127.0.0.1:2113)' +healthy SUCCESS increment (4/4) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'false' to 'true' + + +=== TEST 3: report_http_status() with success is a nop when passive.healthy.successes == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 0, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' + + +=== TEST 4: report_http_status() with success is a nop when active.healthy.successes == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 0, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false + } + } +--- request +GET /t +--- response_body +false +--- no_error_log +healthy SUCCESS increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' + + +=== TEST 5: report_http_status() with failure is a nop when passive.unhealthy.http_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy HTTP increment +event: target status '127.0.0.1 (127.0.0.1:2119)' from 'true' to 'false' + + +=== TEST 4: report_http_status() with success is a nop when active.unhealthy.http_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2119; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 4, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 2, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 4, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy HTTP increment +event: target status '(127.0.0.1:2119)' from 'true' to 'false' + + +=== TEST 5: report_http_status() must work in log phase +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + ngx.say("OK") + } + log_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 3, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + ngx.sleep(0.01) + checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +failed to acquire lock: API disabled in the context of log_by_lua diff --git a/t/with_resty-events/07-report_tcp_failure.t b/t/with_resty-events/07-report_tcp_failure.t new file mode 100644 index 00000000..f6dd4898 --- /dev/null +++ b/t/with_resty-events/07-report_tcp_failure.t @@ -0,0 +1,242 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 18; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_tcp_failure() active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TCP increment (1/3) for '(127.0.0.1:2120)' +unhealthy TCP increment (2/3) for '(127.0.0.1:2120)' +unhealthy TCP increment (3/3) for '(127.0.0.1:2120)' +event: target status '(127.0.0.1:2120)' from 'true' to 'false' +unhealthy TCP increment (1/3) for '(127.0.0.1:2113)' +unhealthy TCP increment (2/3) for '(127.0.0.1:2113)' +unhealthy TCP increment (3/3) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_tcp_failure() for active is a nop when active.unhealthy.tcp_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 0, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2120)' from 'true' to 'false' + + + +=== TEST 3: report_tcp_failure() for passive is a nop when passive.unhealthy.tcp_failures == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2120; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 0, + http_failures = 5, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) + ngx.sleep(0.01) + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2120)' from 'true' to 'false' diff --git a/t/with_resty-events/08-report_timeout.t b/t/with_resty-events/08-report_timeout.t new file mode 100644 index 00000000..b418baa5 --- /dev/null +++ b/t/with_resty-events/08-report_timeout.t @@ -0,0 +1,244 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 16; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: report_timeout() active + passive +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2113, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2113, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false + } + } +--- request +GET /t +--- response_body +false +false +--- error_log +unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2122)' +unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2122)' +event: target status '(127.0.0.1:2122)' from 'true' to 'false' +unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2113)' +unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2113)' +event: target status '(127.0.0.1:2113)' from 'true' to 'false' + + +=== TEST 2: report_timeout() for active is a nop when active.unhealthy.timeouts == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 0, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + checker:report_timeout("127.0.0.1", 2122, nil, "active") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2122)' from 'true' to 'false' + + + +=== TEST 3: report_timeout() for passive is a nop when passive.unhealthy.timeouts == 0 +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2122; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 5, + timeouts = 2, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 5, + timeouts = 0, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) + ngx.sleep(0.01) + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + checker:report_timeout("127.0.0.1", 2122, nil, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true + } + } +--- request +GET /t +--- response_body +true +--- no_error_log +unhealthy TCP increment +event: target status '(127.0.0.1:2122)' from 'true' to 'false' diff --git a/t/with_resty-events/09-active_probes.t b/t/with_resty-events/09-active_probes.t new file mode 100644 index 00000000..1a708205 --- /dev/null +++ b/t/with_resty-events/09-active_probes.t @@ -0,0 +1,536 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 59; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: active probes, http node failing +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + ngx.sleep(2) -- active healthchecks might take some time to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 2: active probes, http node recovering +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + +=== TEST 3: active probes, custom http status (regression test for pre-filled defaults) +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + http_statuses = { 429 }, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking unhealthy targets: nothing to do +--- no_error_log +checking healthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' + + +=== TEST 4: active probes, custom http status, node failing +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 401; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + http_statuses = { 401 }, + } + }, + } + }) + ngx.sleep(2) -- active healthchecks might take some time to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 5: active probes, host is correctly set +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + content_by_lua_block { + if ngx.req.get_headers()["Host"] == "example.com" then + ngx.exit(200) + else + ngx.exit(500) + end + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false) + ngx.sleep(0.2) -- wait for 2x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: #1 + + +=== TEST 6: active probes, tcp node failing +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 3, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + -- Note: no http server configured, so port 2114 remains unanswered + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false + } + } +--- request +GET /t +--- response_body +false +--- error_log +checking unhealthy targets: nothing to do +unhealthy TCP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' +checking healthy targets: nothing to do + + + +=== TEST 7: active probes, tcp node recovering +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "tcp", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + tcp_failures = 3, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + + + +=== TEST 8: active probes, custom Host header is correctly set +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + content_by_lua_block { + if ngx.req.get_headers()["Host"] == "custom-host.test" then + ngx.exit(200) + else + ngx.exit(500) + end + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 1, + }, + unhealthy = { + interval = 0.1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false, "custom-host.test") + ngx.sleep(0.3) -- wait for 3x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- error_log +event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' +checking unhealthy targets: nothing to do + + + +=== TEST 9: active probes, interval is respected +--- http_config eval +qq{ + $::HttpConfig + + # ignore lua tcp socket read timed out + lua_socket_log_errors off; + + server { + listen 2114; + location = /status { + access_by_lua_block { + ngx.sleep(0.3) + ngx.exit(200) + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 1, + successes = 1, + }, + unhealthy = { + interval = 1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(1) -- wait for the check interval + -- checker callback should not be called more than 5 times + if checker.checker_callback_count < 5 then + ngx.say("OK") + else + ngx.say("BAD") + end + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +[error] diff --git a/t/with_resty-events/10-garbagecollect.t_disabled b/t/with_resty-events/10-garbagecollect.t_disabled new file mode 100644 index 00000000..885e5a48 --- /dev/null +++ b/t/with_resty-events/10-garbagecollect.t_disabled @@ -0,0 +1,105 @@ +# This test is disabled +# +# We need to understand if it is a false-positive or lua-resty-healthcheck is +# actually leaking the event module memory when deleting a checker instance. +# +# Please rename this test if a solution is found or remove it if it is a +# false-positive. + +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: garbage collect the checker object +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2121; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local dump = function(...) ngx.log(ngx.DEBUG,"\027[31m\n", require("pl.pretty").write({...}),"\027[0m") end + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + assert(checker:add_target("127.0.0.1", 2121, nil, true)) + local weak_table = setmetatable({ checker },{ + __mode = "v", + }) + checker = nil -- now only anchored in weak table above + collectgarbage() + collectgarbage() + collectgarbage() + collectgarbage() + ngx.sleep(0.5) -- leave room for timers to run (they shouldn't, but we want to be sure) + ngx.say(#weak_table) -- after GC, should be 0 length + } + } +--- request +GET /t +--- response_body +0 +--- no_error_log +checking healthy targets: #1 diff --git a/t/with_resty-events/11-clear.t b/t/with_resty-events/11-clear.t new file mode 100644 index 00000000..6fce9327 --- /dev/null +++ b/t/with_resty-events/11-clear.t @@ -0,0 +1,298 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 27; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: clear() clears the list, new checkers never see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + checker1:clear() + + local checker2 = healthcheck.new(config) + + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +initial target list (0 targets) + +--- no_error_log +initial target list (1 targets) +initial target list (2 targets) +initial target list (3 targets) +initial target list (4 targets) +initial target list (5 targets) +initial target list (6 targets) +initial target list (7 targets) +initial target list (8 targets) +initial target list (9 targets) +initial target list (10 targets) +initial target list (11 targets) + +=== TEST 2: clear() clears the list, other checkers get notified and clear too +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + local checker2 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 20000 + i, nil, false) + end + checker2:clear() + ngx.sleep(1) + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +checking unhealthy targets: nothing to do + +--- no_error_log +checking unhealthy targets: #10 + +=== TEST 3: clear() resets counters +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 21120; + location = /status { + return 503; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.2, + }, + unhealthy = { + interval = 0.2, + http_failures = 3, + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 21120, nil, true) + ngx.sleep(0.5) -- wait 2.5x the interval + checker1:clear() + checker1:add_target("127.0.0.1", 21120, nil, true) + ngx.sleep(0.3) -- wait 1.5x the interval + ngx.say(true) + } + } +--- request +GET /t +--- response_body +true + +--- error_log +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:21120)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:21120)' +--- no_error_log +unhealthy HTTP increment (3/3) for '(127.0.0.1:21120)' + + +=== TEST 4: delayed_clear() clears the list, after interval new checkers don't see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10001)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + ngx.say(checker2:get_target_status("127.0.0.1", 10001)) + ngx.sleep(2.6) -- wait while the targets are cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found + +=== TEST 5: delayed_clear() would clear tgt list, but adding again keeps the previous status +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 10001, nil, false) + checker1:add_target("127.0.0.1", 10002, nil, false) + checker1:add_target("127.0.0.1", 10003, nil, false) + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10002)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + checker2:add_target("127.0.0.1", 10002, nil, true) + ngx.say(checker2:get_target_status("127.0.0.1", 10002)) + ngx.sleep(2.6) -- wait while the targets would be cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10002) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10003) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found +false +target not found diff --git a/t/with_resty-events/12-set_target_status.t b/t/with_resty-events/12-set_target_status.t new file mode 100644 index 00000000..9576f7d0 --- /dev/null +++ b/t/with_resty-events/12-set_target_status.t @@ -0,0 +1,207 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: set_target_status() updates a status +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:set_target_status("127.0.0.1", 2112, nil, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:set_target_status("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true + + +=== TEST 2: set_target_status() restores node after passive check disables it +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:set_target_status("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +true +false +true + + +=== TEST 3: set_target_status() resets the failure counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + checker:set_target_status("127.0.0.1", 2112, nil, true) + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + checker:report_http_status("127.0.0.1", 2112, nil, 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + } + } +--- request +GET /t +--- response_body +true +true +false + + +=== TEST 3: set_target_status() resets the success counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.sleep(0.01) + checker:set_target_status("127.0.0.1", 2112, nil, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:report_http_status("127.0.0.1", 2112, nil, 200) + checker:set_target_status("127.0.0.1", 2112, nil, false) + checker:report_http_status("127.0.0.1", 2112, nil, 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false + checker:report_http_status("127.0.0.1", 2112, nil, 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true + } + } +--- request +GET /t +--- response_body +false +false +true diff --git a/t/with_resty-events/13-integration.t_disabled b/t/with_resty-events/13-integration.t_disabled new file mode 100644 index 00000000..0e7b9274 --- /dev/null +++ b/t/with_resty-events/13-integration.t_disabled @@ -0,0 +1,207 @@ +# This test is disabled +# +# All the test steps used here take longer than the request timeout because of +# all the ngx.sleep needed to synchronize the events. Running them invididually +# seem to work, so the solution is to split the integration test into smaller +# tests. + +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: ensure counters work properly +--- http_config eval +qq{ + $::HttpConfig +} +--- config eval +qq{ + location = /t { + content_by_lua_block { + local host = "127.0.0.1" + local port = 2112 + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0, + successes = 4, + }, + unhealthy = { + interval = 0, + tcp_failures = 2, + http_failures = 0, + } + }, + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + timeouts = 2, + } + } + } + }) + + local ok, err = checker:add_target(host, port, nil, true) + + -- S = successes counter + -- F = http_failures counter + -- T = tcp_failures counter + -- O = timeouts counter + + local cases = {} + + local function incr(idxs, i, max) + idxs[i] = idxs[i] + 1 + if idxs[i] > max and i > 1 then + idxs[i] = 1 + incr(idxs, i - 1, max) + end + end + + local function add_cases(cases, len, m) + local idxs = {} + for i = 1, len do + idxs[i] = 1 + end + local word = {} + for _ = 1, (#m) ^ len do + for c = 1, len do + word[c] = m[idxs[c]] + end + table.insert(cases, table.concat(word)) + incr(idxs, len, #m) + end + end + + local m = { "S", "F", "T", "O" } + + -- There are 324 (3*3*3*3*4) possible internal states + -- to the above healthcheck configuration where all limits are set to 2. + -- We need at least five events (4*4*4*4) to be able + -- to exercise all of them + for i = 1, 5 do + add_cases(cases, i, m) + end + + -- Brute-force test all combinations of health events up to 5 events + -- and compares the results given by the library with a simple simulation + -- that implements the specified behavior. + local function run_test_case(case) + assert(checker:set_target_status(host, port, nil, true)) + ngx.sleep(0.002) + local i = 1 + local s, f, t, o = 0, 0, 0, 0 + local mode = true + for c in case:gmatch(".") do + if c == "S" then + checker:report_http_status(host, port, nil, 200, "passive") + ngx.sleep(0.002) + s = s + 1 + f, t, o = 0, 0, 0 + if s == 2 then + mode = true + end + elseif c == "F" then + checker:report_http_status(host, port, nil, 500, "passive") + ngx.sleep(0.002) + f = f + 1 + s = 0 + if f == 2 then + mode = false + end + elseif c == "T" then + checker:report_tcp_failure(host, port, nil, "read", "passive") + ngx.sleep(0.002) + t = t + 1 + s = 0 + if t == 2 then + mode = false + end + elseif c == "O" then + checker:report_timeout(host, port, nil, "passive") + ngx.sleep(0.002) + o = o + 1 + s = 0 + if o == 2 then + mode = false + end + end + + --local ctr, state = checker:test_get_counter(host, port, nil) + --ngx.say(case, ": ", c, " ", string.format("%08x", ctr), " ", state) + --ngx.log(ngx.DEBUG, case, ": ", c, " ", string.format("%08x", ctr), " ", state) + + if checker:get_target_status(host, port, nil) ~= mode then + ngx.say("failed: ", case, " step ", i, " expected ", mode) + return false + end + i = i + 1 + end + return true + end + + for _, case in ipairs(cases) do + ngx.log(ngx.ERR, "Case: ", case) + run_test_case(case) + end + ngx.say("all ok!") + } + } +} +--- request +GET /t +--- response_body +all ok! +--- error_log +--- no_error_log diff --git a/t/with_resty-events/14-tls_active_probes.t b/t/with_resty-events/14-tls_active_probes.t new file mode 100644 index 00000000..d9e44902 --- /dev/null +++ b/t/with_resty-events/14-tls_active_probes.t @@ -0,0 +1,155 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + + + +=== TEST 1: active probes, valid https +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "badssl.com", false) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "badssl.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- timeout +15 + +=== TEST 2: active probes, invalid cert +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "wrong.host.badssl.com", true) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "wrong.host.badssl.com")) -- false + } + } +--- request +GET /t +--- response_body +false +--- timeout +15 + +=== TEST 3: active probes, accept invalid cert when disabling check +--- http_config eval: $::HttpConfig +--- config + location = /t { + lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; + lua_ssl_verify_depth 2; + content_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ unique_timeout = 5, broker_id = 0, listening = "unix:" .. ngx.config.prefix() .. "worker_events.sock" })) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + type = "https", + https_verify_certificate = false, + http_path = "/", + healthy = { + interval = 2, + successes = 2, + }, + unhealthy = { + interval = 2, + tcp_failures = 2, + } + }, + } + }) + local ok, err = checker:add_target("104.154.89.105", 443, "wrong.host.badssl.com", false) + ngx.sleep(8) -- wait for 4x the check interval + ngx.say(checker:get_target_status("104.154.89.105", 443, "wrong.host.badssl.com")) -- true + } + } +--- request +GET /t +--- response_body +true +--- timeout +15 diff --git a/t/with_resty-events/15-get_virtualhost_target_status.t b/t/with_resty-events/15-get_virtualhost_target_status.t new file mode 100644 index 00000000..2921a397 --- /dev/null +++ b/t/with_resty-events/15-get_virtualhost_target_status.t @@ -0,0 +1,322 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 5) + 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: get_target_status() reports proper status for virtualhosts +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 2, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2115, "ahostname", true) + local ok, err = checker:add_target("127.0.0.1", 2115, "otherhostname", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false + checker:report_success("127.0.0.1", 2115, "otherhostname") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true + checker:report_tcp_failure("127.0.0.1", 2115, "otherhostname") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true + local _, err = checker:get_target_status("127.0.0.1", 2115) + ngx.say(err) -- target not found + } + } +--- request +GET /t +--- response_body +true +true +true +false +true +false +true +target not found +--- error_log +unhealthy HTTP increment (1/2) for 'otherhostname(127.0.0.1:2115)' +unhealthy HTTP increment (2/2) for 'otherhostname(127.0.0.1:2115)' +event: target status 'otherhostname(127.0.0.1:2115)' from 'true' to 'false' + + + +=== TEST 2: get_target_status() reports proper status for mixed targets (with/without hostname) +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2116, "ahostname", true) + local ok, err = checker:add_target("127.0.0.1", 2116, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true + checker:report_http_status("127.0.0.1", 2116, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false + } + } +--- request +GET /t +--- response_body +true +true +true +false +--- error_log +unhealthy HTTP increment (1/1) for '(127.0.0.1:2116)' +event: target status '(127.0.0.1:2116)' from 'true' to 'false' + + + +=== TEST 3: active probe for virtualhosts listening on same port:ip combination +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2117; + server_name healthyserver; + location = /status { + return 200; + } + } + server { + listen 2117; + server_name unhealthyserver; + location = /status { + return 500; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1, + successes = 3, + }, + unhealthy = { + interval = 0.1, + http_failures = 3, + } + }, + } + }) + local ok, err = checker:add_target("127.0.0.1", 2117, "healthyserver", true) + local ok, err = checker:add_target("127.0.0.1", 2117, "unhealthyserver", true) + ngx.sleep(0.6) -- wait for 6x the check interval + ngx.say(checker:get_target_status("127.0.0.1", 2117, "healthyserver")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2117, "unhealthyserver")) -- false + local _, err = checker:get_target_status("127.0.0.1", 2117) + ngx.say(err) -- target not found + } + } +--- request +GET /t +--- response_body +true +false +target not found +--- error_log +checking unhealthy targets: nothing to do +unhealthy HTTP increment (1/3) for 'unhealthyserver(127.0.0.1:2117)' +unhealthy HTTP increment (2/3) for 'unhealthyserver(127.0.0.1:2117)' +unhealthy HTTP increment (3/3) for 'unhealthyserver(127.0.0.1:2117)' +event: target status 'unhealthyserver(127.0.0.1:2117)' from 'true' to 'false' + + + +=== TEST 4: get_target_status() reports proper status for same target with and without hostname +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 1, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 1, + http_failures = 1, + } + }, + passive = { + healthy = { + successes = 1, + }, + unhealthy = { + tcp_failures = 1, + http_failures = 1, + } + } + } + }) + local ok, err = checker:add_target("127.0.0.1", 2118, "127.0.0.1", true) + local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true + checker:report_http_status("127.0.0.1", 2118, nil, 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true + checker:report_http_status("127.0.0.1", 2119, "127.0.0.1", 500, "passive") + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false + ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- false + } + } +--- request +GET /t +--- response_body +true +true +true +true +false +true +false +true +false +false +false +false +--- error_log +unhealthy HTTP increment (1/1) for '(127.0.0.1:2118)' +event: target status '(127.0.0.1:2118)' from 'true' to 'false' +unhealthy HTTP increment (1/1) for '127.0.0.1(127.0.0.1:2119)' +event: target status '127.0.0.1(127.0.0.1:2119)' from 'true' to 'false' diff --git a/t/with_resty-events/16-set_all_target_statuses_for_hostname.t b/t/with_resty-events/16-set_all_target_statuses_for_hostname.t new file mode 100644 index 00000000..1385e611 --- /dev/null +++ b/t/with_resty-events/16-set_all_target_statuses_for_hostname.t @@ -0,0 +1,233 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * blocks() * 2; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: set_all_target_statuses_for_hostname() updates statuses +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +false +false +true +true + + +=== TEST 2: set_all_target_statuses_for_hostname() restores node after passive check disables it +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +false +true +true + + +=== TEST 3: set_all_target_statuses_for_hostname() resets failure counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", +events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + checker:set_all_target_statuses_for_hostname("rush", 2112, true) + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + checker:report_http_status("127.0.0.1", 2112, "rush", 500) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true + } + } +--- request +GET /t +--- response_body +true +true +true +true +false +true + + +=== TEST 4: set_target_status() resets the success counters +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + passive = { + healthy = { + successes = 2, + }, + unhealthy = { + tcp_failures = 2, + http_failures = 2, + } + } + } + }) + checker:add_target("127.0.0.1", 2112, "rush", true) + checker:add_target("127.0.0.2", 2112, "rush", true) + ngx.sleep(0.01) + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + checker:set_all_target_statuses_for_hostname("rush", 2112, false) + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + checker:report_http_status("127.0.0.1", 2112, "rush", 200) + ngx.sleep(0.01) + ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true + ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false + } + } +--- request +GET /t +--- response_body +false +false +false +false +true +false diff --git a/t/with_resty-events/17-mtls.t b/t/with_resty-events/17-mtls.t new file mode 100644 index 00000000..c0d0afc3 --- /dev/null +++ b/t/with_resty-events/17-mtls.t @@ -0,0 +1,145 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * 4; + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: configure a MTLS probe +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local pl_file = require "pl.file" + local cert = pl_file.read("t/with_resty-events/util/cert.pem", true) + local key = pl_file.read("t/with_resty-events/util/key.pem", true) + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing_mtls", + shm_name = "test_shm", + events_module = "resty.events", + type = "http", + ssl_cert = cert, + ssl_key = key, + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + ngx.say(checker ~= nil) -- true + } + } +--- request +GET /t +--- response_body +true + + +=== TEST 2: configure a MTLS probe with parsed cert/key +--- http_config eval +qq{ + $::HttpConfig +} +--- config + location = /t { + content_by_lua_block { + local pl_file = require "pl.file" + local ssl = require "ngx.ssl" + local cert = ssl.parse_pem_cert(pl_file.read("t/with_resty-events/util/cert.pem", true)) + local key = ssl.parse_pem_priv_key(pl_file.read("t/with_resty-events/util/key.pem", true)) + + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing_mtls", + shm_name = "test_shm", +events_module = "resty.events", + type = "http", + ssl_cert = cert, + ssl_key = key, + checks = { + active = { + http_path = "/status", + healthy = { + interval = 999, -- we don't want active checks + successes = 3, + }, + unhealthy = { + interval = 999, -- we don't want active checks + tcp_failures = 3, + http_failures = 3, + } + }, + passive = { + healthy = { + successes = 3, + }, + unhealthy = { + tcp_failures = 3, + http_failures = 3, + } + } + } + }) + ngx.say(checker ~= nil) -- true + } + } +--- request +GET /t +--- response_body +true diff --git a/t/with_resty-events/18-req-headers.t b/t/with_resty-events/18-req-headers.t new file mode 100644 index 00000000..7fd69c33 --- /dev/null +++ b/t/with_resty-events/18-req-headers.t @@ -0,0 +1,280 @@ +use Test::Nginx::Socket::Lua 'no_plan'; +use Cwd qw(cwd); + +workers(1); + +my $pwd = cwd(); +$ENV{TEST_NGINX_SERVROOT} = server_root(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + + init_worker_by_lua_block { + local we = require "resty.events.compat" + assert(we.configure({ + unique_timeout = 5, + broker_id = 0, + listening = "unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock" + })) + assert(we.configured()) + } + + server { + server_name kong_worker_events; + listen unix:$ENV{TEST_NGINX_SERVROOT}/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: headers: {"User-Agent: curl/7.29.0"} +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + headers = {"User-Agent: curl/7.29.0"} + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.0 +User-Agent: curl/7.29.0 +Host: 127.0.0.1 + + + +=== TEST 2: headers: {"User-Agent: curl"} +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + headers = {"User-Agent: curl"} + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.0 +User-Agent: curl +Host: 127.0.0.1 + + +=== TEST 3: headers: { ["User-Agent"] = "curl" } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + headers = { ["User-Agent"] = "curl" } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.0 +User-Agent: curl +Host: 127.0.0.1 + + + +=== TEST 4: headers: { ["User-Agent"] = {"curl"} } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + headers = { ["User-Agent"] = {"curl"} } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.0 +User-Agent: curl +Host: 127.0.0.1 + + + +=== TEST 5: headers: { ["User-Agent"] = {"curl", "nginx"} } +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2112; + location = /status { + return 200; + } + } +} +--- config + location = /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + name = "testing", + shm_name = "test_shm", + events_module = "resty.events", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 0.1 + }, + headers = { ["User-Agent"] = {"curl", "nginx"} } + } + } + }) + ngx.sleep(0.2) -- wait twice the interval + local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) + ngx.say(ok) + ngx.sleep(0.2) -- wait twice the interval + } + } +--- request +GET /t +--- response_body +true +--- error_log +checking healthy targets: nothing to do +checking healthy targets: #1 +GET /status HTTP/1.0 +User-Agent: curl +User-Agent: nginx +Host: 127.0.0.1 diff --git a/t/util/cert.pem b/t/with_resty-events/util/cert.pem similarity index 100% rename from t/util/cert.pem rename to t/with_resty-events/util/cert.pem diff --git a/t/util/key.pem b/t/with_resty-events/util/key.pem similarity index 100% rename from t/util/key.pem rename to t/with_resty-events/util/key.pem diff --git a/t/util/reindex b/t/with_resty-events/util/reindex similarity index 100% rename from t/util/reindex rename to t/with_resty-events/util/reindex diff --git a/t/00-new.t b/t/with_worker-events/00-new.t similarity index 91% rename from t/00-new.t rename to t/with_worker-events/00-new.t index 95a41ba7..d01274f4 100644 --- a/t/00-new.t +++ b/t/with_worker-events/00-new.t @@ -119,7 +119,7 @@ GET /t --- error_log Healthchecker started! -=== TEST 6: new() only accepts http(s) or tcp types +=== TEST 6: new() only accepts http or tcp types --- http_config eval: $::HttpConfig --- config location = /t { @@ -130,41 +130,19 @@ Healthchecker started! local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "http", - }, - } + type = "http", }) ngx.say(ok) local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "https", - }, - } + type = "tcp", }) ngx.say(ok) local ok, err = pcall(healthcheck.new, { name = "testing", shm_name = "test_shm", - checks = { - active = { - type = "tcp", - }, - } - }) - ngx.say(ok) - local ok, err = pcall(healthcheck.new, { - name = "testing", - shm_name = "test_shm", - checks = { - active = { - type = "get lost", - }, - } + type = "get lost", }) ngx.say(ok) } @@ -174,7 +152,6 @@ GET /t --- response_body true true -true false === TEST 7: new() deals with bad inputs @@ -310,4 +287,4 @@ GET /t true true nil -true \ No newline at end of file +true diff --git a/t/01-start-stop.t b/t/with_worker-events/01-start-stop.t similarity index 69% rename from t/01-start-stop.t rename to t/with_worker-events/01-start-stop.t index 88b59de3..6ca00f9d 100644 --- a/t/01-start-stop.t +++ b/t/with_worker-events/01-start-stop.t @@ -43,52 +43,15 @@ __DATA__ ngx.sleep(0.2) -- wait twice the interval local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request GET /t --- response_body true -2 --- no_error_log [error] -=== TEST 2: start() cannot start a second time using active health checks ---- http_config eval: $::HttpConfig ---- config - location = /t { - content_by_lua_block { - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - healthy = { - interval = 0.1 - }, - unhealthy = { - interval = 0.1 - } - } - } - }) - local ok, err = checker:start() - ngx.say(err) - } - } ---- request -GET /t ---- response_body -cannot start, timers are still running ---- no_error_log -[error] === TEST 3: start() is a no-op if active intervals are 0 --- http_config eval: $::HttpConfig @@ -118,10 +81,6 @@ cannot start, timers are still running ngx.say(ok) local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request @@ -130,7 +89,6 @@ GET /t true true true -0 --- no_error_log [error] @@ -158,18 +116,12 @@ true }) local ok, err = checker:stop() ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) } } --- request GET /t --- response_body true -0 --- no_error_log [error] checking @@ -198,17 +150,9 @@ checking }) local ok, err = checker:stop() ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:start() ngx.say(ok) - ngx.say( - (checker.active_healthy_timer and 1 or 0) + - (checker.active_unhealthy_timer and 1 or 0) - ) ngx.sleep(0.2) -- wait twice the interval } } @@ -216,8 +160,6 @@ checking GET /t --- response_body true -0 true -2 --- error_log checking diff --git a/t/02-add_target.t b/t/with_worker-events/02-add_target.t similarity index 60% rename from t/02-add_target.t rename to t/with_worker-events/02-add_target.t index 376a0bad..2f8fffd1 100644 --- a/t/02-add_target.t +++ b/t/with_worker-events/02-add_target.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 4) + 5; +plan tests => repeat_each() * (blocks() * 4) + 3; my $pwd = cwd(); @@ -39,10 +39,10 @@ __DATA__ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 11111, nil, false) ngx.say(ok) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(0.5) } } --- request @@ -51,7 +51,6 @@ GET /t true --- error_log checking healthy targets: nothing to do -checking unhealthy targets: nothing to do checking unhealthy targets: #1 --- no_error_log @@ -92,7 +91,7 @@ qq{ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) ngx.say(ok) ngx.sleep(0.2) -- wait twice the interval @@ -103,7 +102,6 @@ GET /t --- response_body true --- error_log -checking healthy targets: nothing to do checking unhealthy targets: nothing to do checking healthy targets: #1 @@ -148,7 +146,7 @@ qq{ } } }) - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) ngx.say(ok) @@ -160,95 +158,8 @@ GET /t --- response_body true --- error_log -checking healthy targets: nothing to do checking unhealthy targets: nothing to do checking healthy targets: #1 --- no_error_log checking unhealthy targets: #1 - - - -=== TEST 4: calling add_target() repeatedly does not exhaust timers ---- http_config eval -qq{ - $::HttpConfig - - server { - listen 2113; - location = /status { - return 200; - } - } - lua_max_pending_timers 100; - - init_worker_by_lua_block { - --error("erreur") - local resty_lock = require ("resty.lock") - local we = require "resty.worker.events" - assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) - local healthcheck = require("resty.healthcheck") - local checker = healthcheck.new({ - name = "testing", - shm_name = "test_shm", - checks = { - active = { - http_path = "/status", - healthy = { - interval = 0.1, - successes = 1, - }, - unhealthy = { - interval = 0.1, - tcp_failures = 1, - http_failures = 1, - } - } - } - }) - - -- lock the key, so adding targets will fallback on timers - local lock = assert(resty_lock:new(checker.shm_name, { - exptime = 10, -- timeout after which lock is released anyway - timeout = 5, -- max wait time to acquire lock - })) - assert(lock:lock(checker.TARGET_LIST_LOCK)) - - local addr = { - 127, 0, 0, 1 - } - -- add 10000 check, exhausting timers... - for i = 0, 150 do - addr[4] = addr[4] + 1 - if addr[4] > 255 then - addr[4] = 1 - addr[3] = addr[3] + 1 - if addr[3] > 255 then - addr[3] = 1 - addr[2] = addr[2] + 1 - if addr[2] > 255 then - addr[2] = 1 - addr[1] = addr[1] + 1 - end - end - end - local ok, err = assert(checker:add_target(table.concat(addr, "."), 2113, nil, true)) - end - } - -} - ---- config - location = /t { - content_by_lua_block { - ngx.say(true) - ngx.exit(200) - } - } - ---- request -GET /t ---- response_body -true ---- no_error_log -too many pending timers diff --git a/t/03-get_target_status.t b/t/with_worker-events/03-get_target_status.t similarity index 91% rename from t/03-get_target_status.t rename to t/with_worker-events/03-get_target_status.t index 695d2d3b..d7e0b4d5 100644 --- a/t/03-get_target_status.t +++ b/t/with_worker-events/03-get_target_status.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 4) + 2; +plan tests => repeat_each() * (blocks() * 4); my $pwd = cwd(); @@ -63,17 +63,11 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2115, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true - checker:report_tcp_failure("127.0.0.1", 2115) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- false - checker:report_success("127.0.0.1", 2115) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115)) -- true } } @@ -83,9 +77,6 @@ GET /t true false true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log checking healthy targets: #1 checking unhealthy targets: #1 diff --git a/t/04-report_success.t b/t/with_worker-events/04-report_success.t similarity index 96% rename from t/04-report_success.t rename to t/with_worker-events/04-report_success.t index 0d71fe10..8fb5fb6e 100644 --- a/t/04-report_success.t +++ b/t/with_worker-events/04-report_success.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 32; +plan tests => repeat_each() * 28; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true } @@ -86,8 +84,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' @@ -147,14 +143,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true } @@ -165,8 +159,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (2/3) for '(127.0.0.1:2116)' healthy SUCCESS increment (3/3) for '(127.0.0.1:2116)' @@ -224,11 +216,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, nil, false) - we.poll() checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2116, nil, "active") checker:report_success("127.0.0.1", 2116, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false } } @@ -290,11 +280,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2118, nil, false) - we.poll() checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2118, nil, "passive") checker:report_success("127.0.0.1", 2118, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, nil)) -- false } } diff --git a/t/05-report_failure.t b/t/with_worker-events/05-report_failure.t similarity index 92% rename from t/05-report_failure.t rename to t/with_worker-events/05-report_failure.t index e3227dda..47f8c7e9 100644 --- a/t/05-report_failure.t +++ b/t/with_worker-events/05-report_failure.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * (blocks() * 11) - 1; +plan tests => repeat_each() * 26; my $pwd = cwd(); @@ -40,9 +40,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "http", checks = { active = { - type = "http", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -55,7 +55,6 @@ qq{ } }, passive = { - type = "http", healthy = { successes = 3, }, @@ -69,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false } @@ -87,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy HTTP increment (1/3) for '(127.0.0.1:2117)' unhealthy HTTP increment (2/3) for '(127.0.0.1:2117)' unhealthy HTTP increment (3/3) for '(127.0.0.1:2117)' @@ -120,9 +115,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "tcp", checks = { active = { - type = "tcp", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -135,7 +130,6 @@ qq{ } }, passive = { - type = "tcp", healthy = { successes = 3, }, @@ -149,14 +143,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- false } @@ -167,8 +159,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' event: target status '(127.0.0.1:2117)' from 'true' to 'false' @@ -198,9 +188,9 @@ qq{ local checker = healthcheck.new({ name = "testing", shm_name = "test_shm", + type = "tcp", checks = { active = { - type = "tcp", http_path = "/status", healthy = { interval = 999, -- we don't want active checks @@ -213,7 +203,6 @@ qq{ } }, passive = { - type = "tcp", healthy = { successes = 3, }, @@ -227,14 +216,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2117, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") checker:report_failure("127.0.0.1", 2117, nil, "active") checker:report_failure("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2117, nil)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2113, nil)) -- true } @@ -244,9 +231,6 @@ GET /t --- response_body true true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment (1/2) for '(127.0.0.1:2117)' unhealthy TCP increment (2/2) for '(127.0.0.1:2117)' diff --git a/t/06-report_http_status.t b/t/with_worker-events/06-report_http_status.t similarity index 95% rename from t/06-report_http_status.t rename to t/with_worker-events/06-report_http_status.t index 235e5c71..0d2c1cdd 100644 --- a/t/06-report_http_status.t +++ b/t/with_worker-events/06-report_http_status.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 53; +plan tests => repeat_each() * 41; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2113, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy HTTP increment (1/3) for '(127.0.0.1:2119)' unhealthy HTTP increment (2/3) for '(127.0.0.1:2119)' unhealthy HTTP increment (3/3) for '(127.0.0.1:2119)' @@ -148,7 +144,6 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) local ok, err = checker:add_target("127.0.0.1", 2113, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") @@ -157,7 +152,6 @@ qq{ checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2113, nil, 200, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- true } @@ -168,8 +162,6 @@ GET /t true true --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do healthy SUCCESS increment (1/4) for '(127.0.0.1:2119)' healthy SUCCESS increment (2/4) for '(127.0.0.1:2119)' healthy SUCCESS increment (3/4) for '(127.0.0.1:2119)' @@ -230,12 +222,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 200, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false } } @@ -243,9 +233,6 @@ qq{ GET /t --- response_body false ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log healthy SUCCESS increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' @@ -299,12 +286,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, false) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") checker:report_http_status("127.0.0.1", 2119, nil, 200, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- false } } @@ -312,9 +297,6 @@ qq{ GET /t --- response_body false ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log healthy SUCCESS increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'false' to 'true' @@ -368,12 +350,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true } } @@ -381,9 +361,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy HTTP increment event: target status '127.0.0.1 (127.0.0.1:2119)' from 'true' to 'false' @@ -437,12 +414,10 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") checker:report_http_status("127.0.0.1", 2119, nil, 500, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2119, nil)) -- true } } @@ -450,9 +425,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy HTTP increment event: target status '(127.0.0.1:2119)' from 'true' to 'false' @@ -489,7 +461,6 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") checker:report_http_status("127.0.0.1", 2119, nil, 500, "passive") diff --git a/t/07-report_tcp_failure.t b/t/with_worker-events/07-report_tcp_failure.t similarity index 93% rename from t/07-report_tcp_failure.t rename to t/with_worker-events/07-report_tcp_failure.t index be82e72e..9e4e0e7c 100644 --- a/t/07-report_tcp_failure.t +++ b/t/with_worker-events/07-report_tcp_failure.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 24; +plan tests => repeat_each() * 18; my $pwd = cwd(); @@ -68,14 +68,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2113, nil, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TCP increment (1/3) for '(127.0.0.1:2120)' unhealthy TCP increment (2/3) for '(127.0.0.1:2120)' unhealthy TCP increment (3/3) for '(127.0.0.1:2120)' @@ -130,7 +126,7 @@ qq{ unhealthy = { interval = 999, -- we don't want active checks tcp_failures = 0, - http_failures = 0, + http_failures = 5, } }, passive = { @@ -146,11 +142,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true } } @@ -158,9 +152,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2120)' from 'true' to 'false' @@ -208,18 +199,16 @@ qq{ }, unhealthy = { tcp_failures = 0, - http_failures = 0, + http_failures = 5, } } } }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2120, nil, true) - we.poll() checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") checker:report_tcp_failure("127.0.0.1", 2120, nil, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2120)) -- true } } @@ -227,9 +216,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2120)' from 'true' to 'false' diff --git a/t/08-report_timeout.t b/t/with_worker-events/08-report_timeout.t similarity index 94% rename from t/08-report_timeout.t rename to t/with_worker-events/08-report_timeout.t index 317e245f..fb61ea18 100644 --- a/t/08-report_timeout.t +++ b/t/with_worker-events/08-report_timeout.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 22; +plan tests => repeat_each() * 16; my $pwd = cwd(); @@ -70,12 +70,10 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) local ok, err = checker:add_target("127.0.0.1", 2113, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2113, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2113, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2113)) -- false } @@ -86,8 +84,6 @@ GET /t false false --- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do unhealthy TIMEOUT increment (1/2) for '(127.0.0.1:2122)' unhealthy TIMEOUT increment (2/2) for '(127.0.0.1:2122)' event: target status '(127.0.0.1:2122)' from 'true' to 'false' @@ -146,11 +142,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2122, nil, "active") checker:report_timeout("127.0.0.1", 2122, nil, "active") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true } } @@ -158,9 +152,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2122)' from 'true' to 'false' @@ -217,11 +208,9 @@ qq{ }) ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2122, nil, true) - we.poll() checker:report_timeout("127.0.0.1", 2122, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "passive") checker:report_timeout("127.0.0.1", 2122, nil, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2122)) -- true } } @@ -229,9 +218,6 @@ qq{ GET /t --- response_body true ---- error_log -checking healthy targets: nothing to do -checking unhealthy targets: nothing to do --- no_error_log unhealthy TCP increment event: target status '(127.0.0.1:2122)' from 'true' to 'false' diff --git a/t/09-active_probes.t b/t/with_worker-events/09-active_probes.t similarity index 73% rename from t/09-active_probes.t rename to t/with_worker-events/09-active_probes.t index 599f65d7..dd68faf4 100644 --- a/t/09-active_probes.t +++ b/t/with_worker-events/09-active_probes.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 56; +plan tests => repeat_each() * 59; my $pwd = cwd(); @@ -55,9 +55,9 @@ qq{ }, } }) + ngx.sleep(2) -- active healthchecks might take some time to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -67,10 +67,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -112,8 +112,8 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(1) -- active healthchecks might take up to 1s to start + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -123,10 +123,10 @@ GET /t true --- error_log checking healthy targets: nothing to do -healthy SUCCESS increment (1/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (2/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do === TEST 3: active probes, custom http status (regression test for pre-filled defaults) @@ -167,8 +167,7 @@ qq{ } }) local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -180,10 +179,10 @@ true checking unhealthy targets: nothing to do --- no_error_log checking healthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' === TEST 4: active probes, custom http status, node failing @@ -223,9 +222,9 @@ qq{ }, } }) + ngx.sleep(2) -- active healthchecks might take some time to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -235,10 +234,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy HTTP increment (1/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:2114)' -unhealthy HTTP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy HTTP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -285,9 +284,9 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false) - we.poll() - ngx.sleep(0.3) -- wait for 3x the check interval + ngx.sleep(0.2) -- wait for 2x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true } } @@ -297,7 +296,7 @@ GET /t true --- error_log event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' -checking unhealthy targets: nothing to do +checking unhealthy targets: #1 === TEST 6: active probes, tcp node failing @@ -328,10 +327,10 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start -- Note: no http server configured, so port 2114 remains unanswered local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- false } } @@ -341,10 +340,10 @@ GET /t false --- error_log checking unhealthy targets: nothing to do -unhealthy TCP increment (1/3) for '(127.0.0.1:2114)' -unhealthy TCP increment (2/3) for '(127.0.0.1:2114)' -unhealthy TCP increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'true' to 'false' +unhealthy TCP increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +unhealthy TCP increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'true' to 'false' checking healthy targets: nothing to do @@ -385,9 +384,9 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, nil, false) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114)) -- true } } @@ -397,10 +396,10 @@ GET /t true --- error_log checking healthy targets: nothing to do -healthy SUCCESS increment (1/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (2/3) for '(127.0.0.1:2114)' -healthy SUCCESS increment (3/3) for '(127.0.0.1:2114)' -event: target status '(127.0.0.1:2114)' from 'false' to 'true' +healthy SUCCESS increment (1/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (2/3) for '127.0.0.1(127.0.0.1:2114)' +healthy SUCCESS increment (3/3) for '127.0.0.1(127.0.0.1:2114)' +event: target status '127.0.0.1(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do @@ -447,8 +446,8 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2114, "example.com", false, "custom-host.test") - we.poll() ngx.sleep(0.3) -- wait for 3x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2114, "example.com")) -- true } @@ -462,3 +461,61 @@ event: target status 'example.com(127.0.0.1:2114)' from 'false' to 'true' checking unhealthy targets: nothing to do + +=== TEST 9: active probes, interval is respected +--- http_config eval +qq{ + $::HttpConfig + + server { + listen 2114; + location = /status { + access_by_lua_block { + ngx.sleep(0.3) + ngx.exit(200) + } + } + } +} +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local checker = healthcheck.new({ + test = true, + name = "testing", + shm_name = "test_shm", + type = "http", + checks = { + active = { + http_path = "/status", + healthy = { + interval = 1, + successes = 1, + }, + unhealthy = { + interval = 1, + http_failures = 1, + } + }, + } + }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start + local ok, err = checker:add_target("127.0.0.1", 2114, nil, true) + ngx.sleep(1) -- wait for the check interval + -- checker callback should not be called more than 5 times + if checker.checker_callback_count < 5 then + ngx.say("OK") + else + ngx.say("BAD") + end + } + } +--- request +GET /t +--- response_body +OK +--- no_error_log +[error] diff --git a/t/10-garbagecollect.t b/t/with_worker-events/10-garbagecollect.t similarity index 100% rename from t/10-garbagecollect.t rename to t/with_worker-events/10-garbagecollect.t diff --git a/t/11-clear.t b/t/with_worker-events/11-clear.t similarity index 53% rename from t/11-clear.t rename to t/with_worker-events/11-clear.t index 70f1407f..0ddb02d5 100644 --- a/t/11-clear.t +++ b/t/with_worker-events/11-clear.t @@ -3,7 +3,7 @@ use Cwd qw(cwd); workers(1); -plan tests => repeat_each() * 23; +plan tests => repeat_each() * 27; my $pwd = cwd(); @@ -100,7 +100,7 @@ initial target list (11 targets) checker1:add_target("127.0.0.1", 20000 + i, nil, false) end checker2:clear() - ngx.sleep(0.2) -- wait twice the interval + ngx.sleep(1) ngx.say(true) } } @@ -151,7 +151,7 @@ qq{ } local checker1 = healthcheck.new(config) checker1:add_target("127.0.0.1", 21120, nil, true) - ngx.sleep(0.3) -- wait 1.5x the interval + ngx.sleep(0.5) -- wait 2.5x the interval checker1:clear() checker1:add_target("127.0.0.1", 21120, nil, true) ngx.sleep(0.3) -- wait 1.5x the interval @@ -164,7 +164,119 @@ GET /t true --- error_log -unhealthy HTTP increment (1/3) for '(127.0.0.1:21120)' -unhealthy HTTP increment (2/3) for '(127.0.0.1:21120)' +unhealthy HTTP increment (1/3) for '127.0.0.1(127.0.0.1:21120)' +unhealthy HTTP increment (2/3) for '127.0.0.1(127.0.0.1:21120)' --- no_error_log unhealthy HTTP increment (3/3) for '(127.0.0.1:21120)' + + +=== TEST 4: delayed_clear() clears the list, after interval new checkers don't see it +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + for i = 1, 10 do + checker1:add_target("127.0.0.1", 10000 + i, nil, false) + end + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10001)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + ngx.say(checker2:get_target_status("127.0.0.1", 10001)) + ngx.sleep(2.6) -- wait while the targets are cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found + +=== TEST 5: delayed_clear() would clear tgt list, but adding again keeps the previous status +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local we = require "resty.worker.events" + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + local healthcheck = require("resty.healthcheck") + local config = { + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0.1 + }, + unhealthy = { + interval = 0.1 + } + } + } + } + local checker1 = healthcheck.new(config) + checker1:add_target("127.0.0.1", 10001, nil, false) + checker1:add_target("127.0.0.1", 10002, nil, false) + checker1:add_target("127.0.0.1", 10003, nil, false) + ngx.sleep(0.2) -- wait twice the interval + ngx.say(checker1:get_target_status("127.0.0.1", 10002)) + checker1:delayed_clear(0.2) + + local checker2 = healthcheck.new(config) + checker2:add_target("127.0.0.1", 10002, nil, true) + ngx.say(checker2:get_target_status("127.0.0.1", 10002)) + ngx.sleep(2.6) -- wait while the targets would be cleared + local status, err = checker2:get_target_status("127.0.0.1", 10001) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10002) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + status, err = checker2:get_target_status("127.0.0.1", 10003) + if status ~= nil then + ngx.say(status) + else + ngx.say(err) + end + } + } +--- request +GET /t +--- response_body +false +false +target not found +false +target not found diff --git a/t/12-set_target_status.t b/t/with_worker-events/12-set_target_status.t similarity index 95% rename from t/12-set_target_status.t rename to t/with_worker-events/12-set_target_status.t index 83364fe1..ff69adc0 100644 --- a/t/12-set_target_status.t +++ b/t/with_worker-events/12-set_target_status.t @@ -33,17 +33,11 @@ qq{ shm_name = "test_shm", }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:set_target_status("127.0.0.1", 2112, nil, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:set_target_status("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } @@ -77,18 +71,12 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:set_target_status("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } @@ -125,19 +113,13 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) checker:set_target_status("127.0.0.1", 2112, nil, true) checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true - checker:report_http_status("127.0.0.1", 2112, nil, 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false } } @@ -174,21 +156,14 @@ qq{ } }) ngx.sleep(0.1) -- wait for initial timers to run once - local ok, err = checker:add_target("127.0.0.1", 2112, nil, true) - we.poll() checker:set_target_status("127.0.0.1", 2112, nil, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:report_http_status("127.0.0.1", 2112, nil, 200) checker:set_target_status("127.0.0.1", 2112, nil, false) checker:report_http_status("127.0.0.1", 2112, nil, 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- false - checker:report_http_status("127.0.0.1", 2112, nil, 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112)) -- true } } diff --git a/t/13-integration.t b/t/with_worker-events/13-integration.t similarity index 98% rename from t/13-integration.t rename to t/with_worker-events/13-integration.t index d650bea4..bc3549f3 100644 --- a/t/13-integration.t +++ b/t/with_worker-events/13-integration.t @@ -67,8 +67,6 @@ qq{ local ok, err = checker:add_target(host, port, nil, true) - we.poll() - -- S = successes counter -- F = http_failures counter -- T = tcp_failures counter @@ -114,9 +112,6 @@ qq{ -- that implements the specified behavior. local function run_test_case(case) assert(checker:set_target_status(host, port, nil, true)) - - we.poll() - local i = 1 local s, f, t, o = 0, 0, 0, 0 local mode = true @@ -155,8 +150,6 @@ qq{ --ngx.say(case, ": ", c, " ", string.format("%08x", ctr), " ", state) --ngx.log(ngx.DEBUG, case, ": ", c, " ", string.format("%08x", ctr), " ", state) - we.poll() - if checker:get_target_status(host, port, nil) ~= mode then ngx.say("failed: ", case, " step ", i, " expected ", mode) return false diff --git a/t/14-tls_active_probes.t b/t/with_worker-events/14-tls_active_probes.t similarity index 100% rename from t/14-tls_active_probes.t rename to t/with_worker-events/14-tls_active_probes.t diff --git a/t/15-get_virtualhost_target_status.t b/t/with_worker-events/15-get_virtualhost_target_status.t similarity index 97% rename from t/15-get_virtualhost_target_status.t rename to t/with_worker-events/15-get_virtualhost_target_status.t index f3222453..9bfbc92c 100644 --- a/t/15-get_virtualhost_target_status.t +++ b/t/with_worker-events/15-get_virtualhost_target_status.t @@ -58,20 +58,15 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2115, "ahostname", true) local ok, err = checker:add_target("127.0.0.1", 2115, "otherhostname", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_http_status("127.0.0.1", 2115, "otherhostname", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false checker:report_success("127.0.0.1", 2115, "otherhostname") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- true checker:report_tcp_failure("127.0.0.1", 2115, "otherhostname") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2115, "otherhostname")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2115, "ahostname")) -- true local _, err = checker:get_target_status("127.0.0.1", 2115) @@ -137,11 +132,9 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2116, "ahostname", true) local ok, err = checker:add_target("127.0.0.1", 2116, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- true checker:report_http_status("127.0.0.1", 2116, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2116, "ahostname")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2116)) -- false } @@ -202,10 +195,10 @@ qq{ }, } }) + ngx.sleep(1) -- active healthchecks might take up to 1s to start local ok, err = checker:add_target("127.0.0.1", 2117, "healthyserver", true) local ok, err = checker:add_target("127.0.0.1", 2117, "unhealthyserver", true) - we.poll() - ngx.sleep(0.5) -- wait for 5x the check interval + ngx.sleep(0.6) -- wait for 6x the check interval ngx.say(checker:get_target_status("127.0.0.1", 2117, "healthyserver")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2117, "unhealthyserver")) -- false local _, err = checker:get_target_status("127.0.0.1", 2117) @@ -268,19 +261,16 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once local ok, err = checker:add_target("127.0.0.1", 2118, "127.0.0.1", true) local ok, err = checker:add_target("127.0.0.1", 2119, nil, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- true ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true checker:report_http_status("127.0.0.1", 2118, nil, 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- true ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119, "127.0.0.1")) -- true checker:report_http_status("127.0.0.1", 2119, "127.0.0.1", 500, "passive") - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2118, "127.0.0.1")) -- false ngx.say(checker:get_target_status("127.0.0.1", 2119)) -- false ngx.say(checker:get_target_status("127.0.0.1", 2118)) -- false diff --git a/t/16-set_all_target_statuses_for_hostname.t b/t/with_worker-events/16-set_all_target_statuses_for_hostname.t similarity index 96% rename from t/16-set_all_target_statuses_for_hostname.t rename to t/with_worker-events/16-set_all_target_statuses_for_hostname.t index 6600dced..ffa9256f 100644 --- a/t/16-set_all_target_statuses_for_hostname.t +++ b/t/with_worker-events/16-set_all_target_statuses_for_hostname.t @@ -35,15 +35,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:set_all_target_statuses_for_hostname("rush", 2112, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:set_all_target_statuses_for_hostname("rush", 2112, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -83,15 +80,12 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false checker:set_all_target_statuses_for_hostname("rush", 2112, true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -133,17 +127,14 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) checker:set_all_target_statuses_for_hostname("rush", 2112, true) checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true checker:report_http_status("127.0.0.1", 2112, "rush", 500) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- true } @@ -186,19 +177,15 @@ qq{ ngx.sleep(0.1) -- wait for initial timers to run once checker:add_target("127.0.0.1", 2112, "rush", true) checker:add_target("127.0.0.2", 2112, "rush", true) - we.poll() checker:set_all_target_statuses_for_hostname("rush", 2112, false) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:report_http_status("127.0.0.1", 2112, "rush", 200) checker:set_all_target_statuses_for_hostname("rush", 2112, false) checker:report_http_status("127.0.0.1", 2112, "rush", 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- false ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false checker:report_http_status("127.0.0.1", 2112, "rush", 200) - we.poll() ngx.say(checker:get_target_status("127.0.0.1", 2112, "rush")) -- true ngx.say(checker:get_target_status("127.0.0.2", 2112, "rush")) -- false } diff --git a/t/17-mtls.t b/t/with_worker-events/17-mtls.t similarity index 90% rename from t/17-mtls.t rename to t/with_worker-events/17-mtls.t index 21166d64..5883cc9d 100644 --- a/t/17-mtls.t +++ b/t/with_worker-events/17-mtls.t @@ -29,8 +29,8 @@ qq{ assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) local pl_file = require "pl.file" - local cert = pl_file.read("t/util/cert.pem", true) - local key = pl_file.read("t/util/key.pem", true) + local cert = pl_file.read("t/with_worker-events/util/cert.pem", true) + local key = pl_file.read("t/with_worker-events/util/key.pem", true) local healthcheck = require("resty.healthcheck") local checker = healthcheck.new({ @@ -85,8 +85,8 @@ qq{ local pl_file = require "pl.file" local ssl = require "ngx.ssl" - local cert = ssl.parse_pem_cert(pl_file.read("t/util/cert.pem", true)) - local key = ssl.parse_pem_priv_key(pl_file.read("t/util/key.pem", true)) + local cert = ssl.parse_pem_cert(pl_file.read("t/with_worker-events/util/cert.pem", true)) + local key = ssl.parse_pem_priv_key(pl_file.read("t/with_worker-events/util/key.pem", true)) local healthcheck = require("resty.healthcheck") local checker = healthcheck.new({ diff --git a/t/20-req-headers.t b/t/with_worker-events/18-req-headers.t similarity index 100% rename from t/20-req-headers.t rename to t/with_worker-events/18-req-headers.t diff --git a/t/18-event_handler.t b/t/with_worker-events/20-event_handler.t similarity index 100% rename from t/18-event_handler.t rename to t/with_worker-events/20-event_handler.t diff --git a/t/with_worker-events/21-run_locked.t b/t/with_worker-events/21-run_locked.t new file mode 100644 index 00000000..aae19330 --- /dev/null +++ b/t/with_worker-events/21-run_locked.t @@ -0,0 +1,347 @@ +use Test::Nginx::Socket::Lua; +use Cwd qw(cwd); + +workers(1); + +plan tests => repeat_each() * (blocks() * 3) + 1; + +my $pwd = cwd(); + +our $HttpConfig = qq{ + lua_package_path "$pwd/lib/?.lua;;"; + lua_shared_dict test_shm 8m; + lua_shared_dict my_worker_events 8m; + + init_worker_by_lua_block { + local we = require "resty.worker.events" + + assert(we.configure{ shm = "my_worker_events", interval = 0.1 }) + + _G.__TESTING_HEALTHCHECKER = true + + local healthcheck = require("resty.healthcheck") + + _G.checker = assert(healthcheck.new({ + name = "testing", + shm_name = "test_shm", + checks = { + active = { + healthy = { + interval = 0, + }, + unhealthy = { + interval = 0, + } + } + } + })) + + checker._set_lock_timeout(1) + } +}; + +run_tests(); + +__DATA__ + +=== TEST 1: run_locked() runs a function immediately and returns its result +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local flag = false + local ok, err = checker:_run_locked("key", function() + flag = true + return "OK" + end) + + ngx.say(ok) + ngx.say(err) + ngx.say(flag) + } + } +--- request +GET /t +--- response_body +OK +nil +true +--- no_error_log +[error] + + + +=== TEST 2: run_locked() can run a function immediately in an non-yieldable phase if no lock is held +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + local value + local ok, err = checker:_run_locked("key", function() + value = "SET" + return "OK" + end) + + if not ok then + ngx.log(ngx.ERR, "run_locked failed: ", err) + return + end + + ngx.ctx.ok = ok + return value + } + + content_by_lua_block { + ngx.say(ngx.ctx.ok) + ngx.say(ngx.var.test) + } + } +--- request +GET /t +--- response_body +OK +SET +--- no_error_log +[error] + + + +=== TEST 3: run_locked() schedules a function in a timer if a lock cannot be acquired during a non-yieldable phase +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + ngx.ctx.lock = lock + + local t = {} + ngx.ctx.t = t + + local ok, err = checker:_run_locked(key, function() + t.flag = true + t.phase = ngx.get_phase() + return true + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + } + + content_by_lua_block { + assert(ngx.ctx.lock:unlock()) + + local t = ngx.ctx.t + + for i = 1, 10 do + if t.flag then + break + end + ngx.sleep(0.25) + end + + ngx.say(t.phase or "none") + ngx.say(t.flag or "timeout") + } + } +--- request +GET /t +--- response_body +timer +true +--- no_error_log +[error] + + + +=== TEST 4: run_locked() doesn't schedule a function in a timer during a yieldable phase +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + + local flag = false + local ok, err = checker:_run_locked(key, function() + flag = true + return true + end) + + ngx.say(ok) + ngx.say(err) + ngx.say(flag) + } + } +--- request +GET /t +--- response_body +nil +failed acquiring lock for 'my_lock_key', timeout +false +--- no_error_log +[error] + + + +=== TEST 5: run_locked() handles function exceptions +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local ok, err = checker:_run_locked("key", function() + error("oh no!") + return true + end) + + -- remove "content_by_lua(nginx.conf:)" context and such from + -- the error string so that our test is a little more stable + err = err:gsub(" content_by_lua[^ ]+", "") + + ngx.say(ok) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +nil +locked function threw an exception: oh no! +--- no_error_log +[error] + + + +=== TEST 6: run_locked() returns errors from the locked function +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local ok, err = checker:_run_locked("key", function() + return nil, "I've failed you" + end) + + ngx.say(ok) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +nil +I've failed you +--- no_error_log +[error] + + + +=== TEST 7: run_locked() logs errors/exceptions from scheduled functions +--- http_config eval: $::HttpConfig +--- config + location = /t { + set_by_lua_block $test { + local checker = _G.checker + + local key = "my_lock_key" + + local resty_lock = require "resty.lock" + local lock = assert(resty_lock:new(checker.shm_name)) + assert(lock:lock(key)) + ngx.ctx.lock = lock + + local t = { count = 0 } + ngx.ctx.t = t + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + error("LOCK EXCEPTION") + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + return nil, "LOCK ERROR" + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + + local ok, err = checker:_run_locked(key, function() + t.count = t.count + 1 + return true + end) + + assert(err == nil, "expected no error") + assert(ok == "scheduled", "expected the function to be scheduled") + } + + content_by_lua_block { + assert(ngx.ctx.lock:unlock()) + + local t = ngx.ctx.t + + for i = 1, 10 do + if t.count >= 3 then + break + end + ngx.sleep(0.25) + end + + ngx.say(t.count) + } + } +--- request +GET /t +--- response_body +3 +--- error_log +LOCK ERROR +LOCK EXCEPTION + + + +=== TEST 8: run_locked() passes any/all arguments to the locked function +--- http_config eval: $::HttpConfig +--- config + location = /t { + content_by_lua_block { + local checker = _G.checker + + local sum = 0 + local ok, err = checker:_run_locked("key", function(a, b, c) + sum = sum + a + b + c + return true + end, 1, 2, 3) + + ngx.say(ok) + ngx.say(err) + ngx.say(sum) + } + } +--- request +GET /t +--- response_body +true +nil +6 +--- no_error_log +[error] diff --git a/t/with_worker-events/util/cert.pem b/t/with_worker-events/util/cert.pem new file mode 100644 index 00000000..2df6a75a --- /dev/null +++ b/t/with_worker-events/util/cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIUWWntedJ1yLAJE2baK/Mg06osmGAwDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UECgwJS29uZyBJbmMuMB4XDTIwMDQyMzIwMjcwMFoXDTMwMDQy +MTIwMjcwMFowFDESMBAGA1UECgwJS29uZyBJbmMuMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAvVBrEH34MzwKlkBapiNyXr9huSShuojy+7i/01BSFng3 +1TiejXJ3pEjykZqt7ENkZ6+BTYUdb9klK221yXiSyX71x97O0WHHuhH/m4XwGiIH +YPBHdg+ExdMRflXgwtlW3of2hTWxkPkPQDPhoSQVMc5DkU7EOgrTxkv1rUWVAed4 +gSK4IT2AkhKwOSkewZANj2bnK5Evf71ACyJd7IQbJAIYoKBwRJAUXJMA7XAreIB+ +nEr9whNYTklhB4aEa2wtOQuiQubIMJzdOryEX5nufH+tL4p1QKhRPFAqqtJ2Czgw +YZY/v9IrThl19r0nL7FIvxFDNIMeOamJxDLQqsh9NwIDAQABo1MwUTAdBgNVHQ4E +FgQU9t6YAdQ5mOXeqvptN5l3yYZGibEwHwYDVR0jBBgwFoAU9t6YAdQ5mOXeqvpt +N5l3yYZGibEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhi83 +aXsfJGqr9Zb1guWxbI8uKoG6o88ptXjV2c6dJnxXag0A/Rj+bX2bcPkN2kvQksNl +MBUQlniOydZfsBUAoC0V7yyGUv9eO2RIeFnnNpRXNu+n+Kg2bvgvu8BKNNNOASZv ++Vmzvo9lbfhS9MNAxYk9eTiPNUZ3zn2RfFyT6YWWJbRjk//EAlchyud3XGug9/hw +c05dtzWEYT8GdzMd+Y1/2kR5r/CapSj7GEqL5T3+zDIfjbhTokV7WBrw6og2avoZ +vzrF8xWucry5/2mKQbRxMyCtKYUKTcoLzF4HrNQCETm0n9qUODrHER7Wit9fQFZX +1GEA3BkX2tsbIVVaig== +-----END CERTIFICATE----- diff --git a/t/with_worker-events/util/key.pem b/t/with_worker-events/util/key.pem new file mode 100644 index 00000000..ae945f44 --- /dev/null +++ b/t/with_worker-events/util/key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9UGsQffgzPAqW +QFqmI3Jev2G5JKG6iPL7uL/TUFIWeDfVOJ6NcnekSPKRmq3sQ2Rnr4FNhR1v2SUr +bbXJeJLJfvXH3s7RYce6Ef+bhfAaIgdg8Ed2D4TF0xF+VeDC2Vbeh/aFNbGQ+Q9A +M+GhJBUxzkORTsQ6CtPGS/WtRZUB53iBIrghPYCSErA5KR7BkA2PZucrkS9/vUAL +Il3shBskAhigoHBEkBRckwDtcCt4gH6cSv3CE1hOSWEHhoRrbC05C6JC5sgwnN06 +vIRfme58f60vinVAqFE8UCqq0nYLODBhlj+/0itOGXX2vScvsUi/EUM0gx45qYnE +MtCqyH03AgMBAAECggEAA1hWa/Yt2onnDfyZHXJm5PGwwlq5WNhuorADA7LZoHgD +VIspkgpBvu9jCduX0yLltUdOm5YMjRtjIr9PhP3SaikKIrv3H5AAvXLv90mIko2j +X70fJiDkEbLHDlpqHEdG16vDWVs3hf5AnLvN8tD2ZujkHL8tjHEAiPJyptsh5OSw +XaltCD67U940XXJ89x0zFZ/3RoRk78wX3ELz7/dY0cMnslMavON+LYTq9hQZyVmm +nOhZICWerKjax4t5f9PZ/zM6IhEVrUhw2WrC31tgRo+ITCIA/nkKid8vNhkiLVdw +jTyAYDLgYW7K8/zVrzmV9TOr3CaZHLQxnF/LMpIEAQKBgQDjnA/G4g2mDD7lsqU1 +N3it87v2VBnZPFNW6L17Qig+2BDTXg1kadFBlp8qtEJI+H5axVSmzsrlmATJVhUK +iYOQwiEsQnt4tGmWZI268NAIUtv0TX0i9yscsezmvGABMcyBCF7ZwFhUfhy0pn1t +kzmbYN4AjYdcisCnSusoMD92NwKBgQDU7YVNuieMIZCIuSxG61N1+ZyX3Ul5l6KU +m1xw1PZvugqXnQlOLV/4Iaz86Vvlt2aDqTWO/iv4LU7ixNdhRtxFIU/b2a8DzDOw +ijhzMGRJqJOdi1NfciiIWHyrjRmGbhCgm784vqV7qbQomiIsjgnDvjoZkossZMiJ +63vs7huxAQKBgQDiQjT8w6JFuk6cD+Zi7G2unmfvCtNXO7ys3Fffu3g+YJL5SrmN +ZBN8W7qFvQNXfo48tYTc/Rx8941qh4QLIYAD2rcXRE9xQgbkVbj+aHykiZnVVWJb +69CTidux0vist1BPxH5lf+tOsr7eZdKxpnTRnI2Thx1URSoWI0d4f93WKQKBgBXn +kW0bl3HtCgdmtU1ebCmY0ik1VJezp8AN84aQAgIga3KJbymhtVu7ayZhg1iwc1Vc +FOxu7WsMji75/QY+2e4qrSJ61GxZl3+z2HbRJaAGPZlZeew5vD26jKjBTTztGbzM +CPH3euKr5KLAqH9Y5VxDt4pl7vdULuUxWoBXRnYBAoGAHIFMYiCdXETtrFHKVTzc +vm4P24PnsNHoDTGMXPeRYRKF2+3VEJrwp1Q3fue4Go4zFB8I6nhNVIbh4dIHxFab +hyxZvGWGUgRvTvD4VYn/YHVoSf2/xNZ0r/S2LKomp+jwoWKfukbCoDjAOWvnK5iD +o41Tn0yhzBdnrYguKznGR3g= +-----END PRIVATE KEY----- diff --git a/t/with_worker-events/util/reindex b/t/with_worker-events/util/reindex new file mode 100755 index 00000000..77ae5484 --- /dev/null +++ b/t/with_worker-events/util/reindex @@ -0,0 +1,27 @@ +#!/usr/bin/env lua + +if not arg[1] then + io.stderr:write("Usage: "..arg[0].." t/*.t\n") + os.exit(1) +end + +for _, name in ipairs(arg) do + local i = 1 + local fd = io.open(name, "r") + if fd then + local new = name.."~" + local out = io.open(new, "w") + for line in fd:lines() do + local test, n, desc = line:match("^(===%s*TEST%s*)(%d+)(.*)$") + if test then + out:write(test .. tostring(i) .. desc .. "\n") + i = i + 1 + else + out:write(line .. "\n") + end + end + out:close() + fd:close() + os.execute("mv " .. new .. " " .. name) + end +end

    Clear all healthcheck data.
    checker:delayed_clear (delay)Clear all healthcheck data after a period of time.
    checker:get_target_status (ip, port, hostname) Get the current status of the target.