Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into feat/read-replicas
Browse files Browse the repository at this point in the history
# Conflicts:
#	VERSION
#	lib/supavisor.ex
#	lib/supavisor/client_handler.ex
  • Loading branch information
abc3 committed Sep 27, 2023
2 parents 83fea47 + 29e5f8b commit 3688a93
Show file tree
Hide file tree
Showing 12 changed files with 128 additions and 31 deletions.
46 changes: 46 additions & 0 deletions .github/workflows/prod.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
name: Publish upgrade artifacts to production

on:
push:
branches:
- release
env:
INCLUDE_ERTS: false
MIX_ENV: prod
jobs:
publish:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
id-token: write
steps:
- uses: actions/checkout@v4
- name: Setup elixir
id: beam
uses: erlef/setup-beam@v1
with:
otp-version: 25.3.2 # Define the OTP version [required]
elixir-version: 1.14.5 # Define the elixir version [required]
version-type: strict
- name: Cache Mix
uses: actions/cache@v3
with:
path: deps
key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}
restore-keys: |
${{ runner.os }}-mix-
- name: Install dependencies
run: mix deps.get
- name: Make release
run: mix release supavisor
- name: Create tarball
run: cd _build/prod/rel/ && tar -czvf ${{ secrets.TARBALL_REGIONS_PROD }}_supavisor_v$(cat ../../../VERSION)_$(date "+%s").tar.gz supavisor
- name: configure aws credentials - production
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.PROD_AWS_ROLE }}
aws-region: "us-east-1"
- name: Deploy to S3
shell: bash
run: aws s3 sync ./_build/prod/rel/ ${{ secrets.TARBALLS_PATH_PROD }} --exclude '*' --include '*tar.gz'
6 changes: 3 additions & 3 deletions .github/workflows/stage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
packages: write
id-token: write
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup elixir
id: beam
uses: erlef/setup-beam@v1
Expand All @@ -24,7 +24,7 @@ jobs:
elixir-version: 1.14.5 # Define the elixir version [required]
version-type: strict
- name: Cache Mix
uses: actions/cache@v1
uses: actions/cache@v3
with:
path: deps
key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}
Expand All @@ -37,7 +37,7 @@ jobs:
- name: Create tarball
run: cd _build/prod/rel/ && tar -czvf ${{ secrets.TARBALL_REGIONS_STAGE }}_supavisor_v$(cat ../../../VERSION)_$(date "+%s").tar.gz supavisor
- name: configure aws credentials - staging
uses: aws-actions/configure-aws-credentials@v1
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.DEV_AWS_ROLE }}
aws-region: "us-east-1"
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/staging_linter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ jobs:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup elixir
id: beam
uses: erlef/setup-beam@v1
with:
otp-version: 25.x # Define the OTP version [required]
elixir-version: 1.14.x # Define the elixir version [required]
- name: Cache Mix
uses: actions/cache@v1
uses: actions/cache@v3
with:
path: deps
key: ${{ runner.os }}-mix-${{ hashFiles(format('{0}{1}', github.workspace, '/mix.lock')) }}
Expand All @@ -37,7 +37,7 @@ jobs:
- name: Credo checks
run: mix credo --strict --mute-exit-status
- name: Retrieve PLT Cache
uses: actions/cache@v1
uses: actions/cache@v3
id: plt-cache
with:
path: priv/plts
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/version_updated.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ jobs:
name: Bump version
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Verify Versions Updated
uses: tj-actions/changed-files@v35
id: verify_changed_files
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.9.5
0.9.7
15 changes: 9 additions & 6 deletions lib/supavisor.ex
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ defmodule Supavisor do
@type secrets :: {:password | :auth_query, fun()}
@type mode :: :transaction | :session
@type id :: {{:single | :cluster, String.t()}, String.t(), mode}
@type subscribe_opts :: %{workers: workers, ps: list, idle_timeout: integer}

@registry Supavisor.Registry.Tenants

Expand Down Expand Up @@ -49,18 +50,18 @@ defmodule Supavisor do
end
end

@spec subscribe_local(pid, id) :: {:ok, workers, iodata()} | {:error, any()}
def subscribe_local(pid, id) do
@spec subscribe_local(pid, id) :: {:ok, subscribe_opts} | {:error, any()}
def(subscribe_local(pid, id)) do
with {:ok, workers} <- get_local_workers(id),
{:ok, ps} <- Manager.subscribe(workers.manager, pid) do
{:ok, workers, ps}
{:ok, ps, idle_timeout} <- Manager.subscribe(workers.manager, pid) do
{:ok, %{workers: workers, ps: ps, idle_timeout: idle_timeout}}
else
error ->
error
end
end

@spec subscribe(pid, id, pid) :: {:ok, workers, iodata()} | {:error, any()}
@spec subscribe(pid, id, pid) :: {:ok, subscribe_opts} | {:error, any()}
def subscribe(sup, id, pid \\ self()) do
dest_node = node(sup)

Expand Down Expand Up @@ -216,6 +217,7 @@ defmodule Supavisor do
ip_version: ip_ver,
default_pool_size: def_pool_size,
default_max_clients: def_max_clients,
client_idle_timeout: client_idle_timeout,
replica_type: replica_type,
users: [
%{
Expand Down Expand Up @@ -258,7 +260,8 @@ defmodule Supavisor do
pool_size: pool_size,
mode: mode,
default_parameter_status: ps,
max_clients: max_clients
max_clients: max_clients,
client_idle_timeout: client_idle_timeout
}
end

Expand Down
2 changes: 1 addition & 1 deletion lib/supavisor/application.ex
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ defmodule Supavisor.Application do
%{
max_connections: String.to_integer(System.get_env("MAX_CONNECTIONS") || "25000"),
num_acceptors: String.to_integer(System.get_env("NUM_ACCEPTORS") || "100"),
socket_opts: [port: port]
socket_opts: [port: port, keepalive: true]
},
Supavisor.ClientHandler,
%{mode: mode}
Expand Down
57 changes: 46 additions & 11 deletions lib/supavisor/client_handler.ex
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ defmodule Supavisor.ClientHandler do
proxy_type: nil,
mode: opts.mode,
stats: %{},
idle_timeout: 0,
last_query: nil
}

Expand Down Expand Up @@ -193,17 +194,17 @@ defmodule Supavisor.ClientHandler do
Logger.debug("Subscribe to tenant #{inspect(data.id)}")

with {:ok, sup} <- Supavisor.start(data.id, data.auth_secrets),
{:ok, workers, ps} <- Supavisor.subscribe(sup, data.id) do
Process.monitor(workers.manager)
data = Map.merge(data, workers)
{:ok, opts} <- Supavisor.subscribe(sup, data.id) do
Process.monitor(opts.workers.manager)
data = Map.merge(data, opts.workers)
db_pid = db_checkout(nil, :on_connect, data)
data = %{data | db_pid: db_pid}
data = %{data | db_pid: db_pid, idle_timeout: opts.idle_timeout}

next =
if ps == [] do
if opts.ps == [] do
{:timeout, 10_000, :wait_ps}
else
{:next_event, :internal, {:greetings, ps}}
{:next_event, :internal, {:greetings, opts.ps}}
end

{:keep_state, data, next}
Expand All @@ -222,7 +223,12 @@ defmodule Supavisor.ClientHandler do

def handle_event(:internal, {:greetings, ps}, _, %{sock: sock} = data) do
:ok = sock_send(sock, Server.greetings(ps))
{:next_state, :idle, data}

if data.idle_timeout > 0 do
{:next_state, :idle, data, idle_check(data.idle_timeout)}
else
{:next_state, :idle, data}
end
end

def handle_event(:timeout, :subscribe, _, _) do
Expand All @@ -236,10 +242,27 @@ defmodule Supavisor.ClientHandler do
{:keep_state_and_data, {:next_event, :internal, {:greetings, ps}}}
end

# ignore termination messages
def handle_event(:info, {proto, _, <<?X, 4::32>>}, _, _) when proto in [:tcp, :ssl] do
def handle_event(:timeout, :idle_terminate, _, data) do
Logger.warning("Terminate an idle connection by #{data.idle_timeout} timeout")
{:stop, :normal, data}
end

# handle Terminate message
def handle_event(:info, {proto, _, <<?X, 4::32>>}, :idle, data) when proto in [:tcp, :ssl] do
Logger.debug("Receive termination")
:keep_state_and_data
{:stop, :normal, data}
end

# handle Sync message
def handle_event(:info, {proto, _, <<?S, 4::32>>}, :idle, data) when proto in [:tcp, :ssl] do
Logger.debug("Receive sync")
:ok = sock_send(data.sock, Server.ready_for_query())

if data.idle_timeout > 0 do
{:keep_state_and_data, idle_check(data.idle_timeout)}
else
:keep_state_and_data
end
end

# incoming query with a single pool
Expand Down Expand Up @@ -365,7 +388,14 @@ defmodule Supavisor.ClientHandler do
{_, stats} = Telem.network_usage(:client, data.sock, data.id, data.stats)
Telem.client_query_time(data.query_start, data.id)
reply = {:reply, from, sock_send(data.sock, bin)}
{:next_state, :idle, %{data | db_pid: db_pid, stats: stats, last_query: nil}, reply}
actions =
if data.idle_timeout > 0 do
[reply, idle_check(data.idle_timeout)]
else
reply
end

{:next_state, :idle, %{data | db_pid: db_pid, stats: stats}, actions}

:continue ->
Logger.debug("Client is not ready")
Expand Down Expand Up @@ -725,4 +755,9 @@ defmodule Supavisor.ClientHandler do
end

def try_get_sni(_), do: nil

@spec idle_check(non_neg_integer) :: {:timeout, non_neg_integer, :idle_terminate}
defp idle_check(timeout) do
{:timeout, timeout, :idle_terminate}
end
end
9 changes: 5 additions & 4 deletions lib/supavisor/manager.ex
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ defmodule Supavisor.Manager do
GenServer.start_link(__MODULE__, args, name: name)
end

@spec subscribe(pid, pid) :: {:ok, iodata() | []} | {:error, :max_clients_reached}
@spec subscribe(pid, pid) :: {:ok, iodata() | [], integer} | {:error, :max_clients_reached}
def subscribe(manager, pid) do
GenServer.call(manager, {:subscribe, pid})
end
Expand Down Expand Up @@ -45,7 +45,8 @@ defmodule Supavisor.Manager do
parameter_status: [],
wait_ps: [],
default_parameter_status: args.default_parameter_status,
max_clients: args.max_clients
max_clients: args.max_clients,
idle_timeout: args.client_idle_timeout
}

{{type, tenant}, user, _mode} = args.id
Expand All @@ -66,10 +67,10 @@ defmodule Supavisor.Manager do

case state.parameter_status do
[] ->
{{:ok, []}, update_in(state.wait_ps, &[pid | &1])}
{{:ok, [], state.idle_timeout}, update_in(state.wait_ps, &[pid | &1])}

ps ->
{{:ok, ps}, state}
{{:ok, ps, state.idle_timeout}, state}
end
else
{{:error, :max_clients_reached}, state}
Expand Down
4 changes: 3 additions & 1 deletion lib/supavisor/tenants/tenant.ex
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ defmodule Supavisor.Tenants.Tenant do
field(:default_pool_size, :integer, default: 15)
field(:sni_hostname, :string)
field(:default_max_clients, :integer, default: 1000)
field(:client_idle_timeout, :integer, default: 0)

has_many(:users, User,
foreign_key: :tenant_external_id,
Expand Down Expand Up @@ -55,7 +56,8 @@ defmodule Supavisor.Tenants.Tenant do
:auth_query,
:default_pool_size,
:sni_hostname,
:default_max_clients
:default_max_clients,
:client_idle_timeout
])
|> check_constraint(:upstream_ssl, name: :upstream_constraints, prefix: "_supavisor")
|> check_constraint(:upstream_verify, name: :upstream_constraints, prefix: "_supavisor")
Expand Down
1 change: 1 addition & 0 deletions lib/supavisor_web/views/tenant_view.ex
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ defmodule SupavisorWeb.TenantView do
auth_query: tenant.auth_query,
sni_hostname: tenant.sni_hostname,
default_max_clients: tenant.default_max_clients,
client_idle_timeout: tenant.client_idle_timeout,
users: render_many(tenant.users, UserView, "user.json")
}
end
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
defmodule Supavisor.Repo.Migrations.AddClientIdleTimeout do
use Ecto.Migration

def change do
alter table("tenants", prefix: "_supavisor") do
add(:client_idle_timeout, :integer, null: false, default: 0)
end
end
end

0 comments on commit 3688a93

Please sign in to comment.