Understand the latency between nodes across the Realtime cluster.
-
- <%= for {_pair, p} <- @pings do %>
-
-
From: <%= p.from_region %> - <%= p.from_node %>
-
To: <%= p.region %> - <%= p.node %>
-
<%= p.latency %> ms
-
<%= p.timestamp %>
-
- <% end %>
+
+
+
From: <%= p.payload.from_region %> - <%= p.payload.from_node %>
+
To: <%= p.payload.region %> - <%= p.payload.node %>
+
<%= p.payload.latency %> ms
+
<%= p.payload.timestamp %>
+
diff --git a/lib/realtime_web/open_api_schemas.ex b/lib/realtime_web/open_api_schemas.ex
index d5fa9dbb0..67d73cd2b 100644
--- a/lib/realtime_web/open_api_schemas.ex
+++ b/lib/realtime_web/open_api_schemas.ex
@@ -111,6 +111,16 @@ defmodule RealtimeWeb.OpenApiSchemas do
type: :number,
description: "Maximum payload size in KB"
},
+ max_client_presence_events_per_window: %Schema{
+ type: :number,
+ description: "Maximum client presence events (overrides environment default when set)",
+ nullable: true
+ },
+ client_presence_window_ms: %Schema{
+ type: :number,
+ description: "Client presence rate limit window in milliseconds (overrides environment default when set)",
+ nullable: true
+ },
extensions: %Schema{
type: :array,
items: %Schema{
@@ -214,6 +224,16 @@ defmodule RealtimeWeb.OpenApiSchemas do
}
}
},
+ max_client_presence_events_per_window: %Schema{
+ type: :number,
+ description: "Maximum client presence events (overrides environment default when set)",
+ nullable: true
+ },
+ client_presence_window_ms: %Schema{
+ type: :number,
+ description: "Client presence rate limit window in milliseconds (overrides environment default when set)",
+ nullable: true
+ },
inserted_at: %Schema{type: :string, format: "date-time", description: "Insert timestamp"},
extensions: %Schema{
type: :array,
@@ -313,18 +333,25 @@ defmodule RealtimeWeb.OpenApiSchemas do
type: :boolean,
description: "Indicates if Realtime has an active connection to the tenant database"
},
+ replication_connected: %Schema{
+ type: :boolean,
+ description: "Indicates if Realtime has an active replication connection for broadcast changes"
+ },
connected_cluster: %Schema{
type: :integer,
description: "The count of currently connected clients for a tenant on the Realtime cluster"
}
},
required: [
- :external_id,
- :jwt_secret
+ :healthy,
+ :db_connected,
+ :replication_connected,
+ :connected_cluster
],
example: %{
healthy: true,
db_connected: true,
+ replication_connected: true,
connected_cluster: 10
}
})
diff --git a/lib/realtime_web/plugs/assign_tenant.ex b/lib/realtime_web/plugs/assign_tenant.ex
index 69b52e8ab..b60d3e28a 100644
--- a/lib/realtime_web/plugs/assign_tenant.ex
+++ b/lib/realtime_web/plugs/assign_tenant.ex
@@ -20,7 +20,7 @@ defmodule RealtimeWeb.Plugs.AssignTenant do
def call(%Plug.Conn{host: host} = conn, _opts) do
with {:ok, external_id} <- Database.get_external_id(host),
- %Tenant{} = tenant <- Api.get_tenant_by_external_id(external_id) do
+ %Tenant{} = tenant <- Api.get_tenant_by_external_id(external_id, use_replica?: true) do
Logger.metadata(external_id: external_id, project: external_id)
OpenTelemetry.Tracer.set_attributes(external_id: external_id)
diff --git a/lib/realtime_web/plugs/auth_tenant.ex b/lib/realtime_web/plugs/auth_tenant.ex
index 11bf2e0bc..23c0581a8 100644
--- a/lib/realtime_web/plugs/auth_tenant.ex
+++ b/lib/realtime_web/plugs/auth_tenant.ex
@@ -42,6 +42,9 @@ defmodule RealtimeWeb.AuthTenant do
[] ->
nil
+ [""] ->
+ nil
+
[value | _] ->
[bearer, token] = value |> String.split(" ")
bearer = String.downcase(bearer)
diff --git a/lib/realtime_web/router.ex b/lib/realtime_web/router.ex
index 1e368f6d2..f5d7c29c7 100644
--- a/lib/realtime_web/router.ex
+++ b/lib/realtime_web/router.ex
@@ -76,6 +76,7 @@ defmodule RealtimeWeb.Router do
pipe_through(:metrics)
get("/", MetricsController, :index)
+ get("/:region", MetricsController, :region)
end
scope "/api" do
@@ -89,6 +90,7 @@ defmodule RealtimeWeb.Router do
resources("/tenants", TenantController, param: "tenant_id", except: [:edit, :new])
post("/tenants/:tenant_id/reload", TenantController, :reload)
+ post("/tenants/:tenant_id/shutdown", TenantController, :shutdown)
get("/tenants/:tenant_id/health", TenantController, :health)
end
diff --git a/lib/realtime_web/socket/user_broadcast.ex b/lib/realtime_web/socket/user_broadcast.ex
new file mode 100644
index 000000000..7caba33ce
--- /dev/null
+++ b/lib/realtime_web/socket/user_broadcast.ex
@@ -0,0 +1,39 @@
+defmodule RealtimeWeb.Socket.UserBroadcast do
+ @moduledoc """
+ Defines a message sent from pubsub to channels and vice-versa.
+
+ The message format requires the following keys:
+
+ * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123"
+ * `:user_event`- The string user event name, for example "my-event"
+ * `:user_payload_encoding`- :json or :binary
+ * `:user_payload` - The actual message payload
+
+ Optionally metadata which is a map to be JSON encoded
+ """
+
+ alias Phoenix.Socket.Broadcast
+
+ @type t :: %__MODULE__{}
+ defstruct topic: nil, user_event: nil, user_payload: nil, user_payload_encoding: nil, metadata: nil
+
+ @spec convert_to_json_broadcast(t) :: {:ok, Broadcast.t()} | {:error, String.t()}
+ def convert_to_json_broadcast(%__MODULE__{user_payload_encoding: :json} = user_broadcast) do
+ payload = %{
+ "event" => user_broadcast.user_event,
+ "payload" => Jason.Fragment.new(user_broadcast.user_payload),
+ "type" => "broadcast"
+ }
+
+ payload =
+ if user_broadcast.metadata do
+ Map.put(payload, "meta", user_broadcast.metadata)
+ else
+ payload
+ end
+
+ {:ok, %Broadcast{event: "broadcast", payload: payload, topic: user_broadcast.topic}}
+ end
+
+ def convert_to_json_broadcast(%__MODULE__{}), do: {:error, "User payload encoding is not JSON"}
+end
diff --git a/lib/realtime_web/socket/v2_serializer.ex b/lib/realtime_web/socket/v2_serializer.ex
new file mode 100644
index 000000000..ff50dab5d
--- /dev/null
+++ b/lib/realtime_web/socket/v2_serializer.ex
@@ -0,0 +1,232 @@
+defmodule RealtimeWeb.Socket.V2Serializer do
+ @moduledoc """
+ Custom serializer that is a superset of Phoenix's V2 JSONSerializer
+ that handles user broadcast and user broadcast push
+ """
+
+ @behaviour Phoenix.Socket.Serializer
+
+ @push 0
+ @reply 1
+ @broadcast 2
+ @user_broadcast_push 3
+ @user_broadcast 4
+
+ alias Phoenix.Socket.{Message, Reply, Broadcast}
+ alias RealtimeWeb.Socket.UserBroadcast
+
+ @impl true
+ def fastlane!(%UserBroadcast{} = msg) do
+ metadata =
+ if msg.metadata do
+ Phoenix.json_library().encode!(msg.metadata)
+ else
+ msg.metadata
+ end
+
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ user_event_size = byte_size!(msg.user_event, :user_event, 255)
+ metadata_size = byte_size!(metadata, :metadata, 255)
+ user_payload_encoding = if msg.user_payload_encoding == :json, do: 1, else: 0
+
+ bin = <<
+ @user_broadcast::size(8),
+ topic_size::size(8),
+ user_event_size::size(8),
+ metadata_size::size(8),
+ user_payload_encoding::size(8),
+ msg.topic::binary-size(topic_size),
+ msg.user_event::binary-size(user_event_size),
+ metadata || <<>>::binary-size(metadata_size),
+ msg.user_payload::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def fastlane!(%Broadcast{payload: {:binary, data}} = msg) do
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ event_size = byte_size!(msg.event, :event, 255)
+
+ bin = <<
+ @broadcast::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ msg.topic::binary-size(topic_size),
+ msg.event::binary-size(event_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def fastlane!(%Broadcast{payload: %{}} = msg) do
+ data = Phoenix.json_library().encode_to_iodata!([nil, nil, msg.topic, msg.event, msg.payload])
+ {:socket_push, :text, data}
+ end
+
+ def fastlane!(%Broadcast{payload: invalid}) do
+ raise ArgumentError, "expected broadcasted payload to be a map, got: #{inspect(invalid)}"
+ end
+
+ @impl true
+ def encode!(%Reply{payload: {:binary, data}} = reply) do
+ status = to_string(reply.status)
+ join_ref = to_string(reply.join_ref)
+ ref = to_string(reply.ref)
+ join_ref_size = byte_size!(join_ref, :join_ref, 255)
+ ref_size = byte_size!(ref, :ref, 255)
+ topic_size = byte_size!(reply.topic, :topic, 255)
+ status_size = byte_size!(status, :status, 255)
+
+ bin = <<
+ @reply::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ status_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ reply.topic::binary-size(topic_size),
+ status::binary-size(status_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def encode!(%Reply{} = reply) do
+ data = [
+ reply.join_ref,
+ reply.ref,
+ reply.topic,
+ "phx_reply",
+ %{status: reply.status, response: reply.payload}
+ ]
+
+ {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)}
+ end
+
+ def encode!(%Message{payload: {:binary, data}} = msg) do
+ join_ref = to_string(msg.join_ref)
+ join_ref_size = byte_size!(join_ref, :join_ref, 255)
+ topic_size = byte_size!(msg.topic, :topic, 255)
+ event_size = byte_size!(msg.event, :event, 255)
+
+ bin = <<
+ @push::size(8),
+ join_ref_size::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ msg.topic::binary-size(topic_size),
+ msg.event::binary-size(event_size),
+ data::binary
+ >>
+
+ {:socket_push, :binary, bin}
+ end
+
+ def encode!(%Message{payload: %{}} = msg) do
+ data = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]
+ {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)}
+ end
+
+ def encode!(%Message{payload: invalid}) do
+ raise ArgumentError, "expected payload to be a map, got: #{inspect(invalid)}"
+ end
+
+ @impl true
+ def decode!(raw_message, opts) do
+ case Keyword.fetch(opts, :opcode) do
+ {:ok, :text} -> decode_text(raw_message)
+ {:ok, :binary} -> decode_binary(raw_message)
+ end
+ end
+
+ defp decode_text(raw_message) do
+ [join_ref, ref, topic, event, payload | _] = Phoenix.json_library().decode!(raw_message)
+
+ %Message{
+ topic: topic,
+ event: event,
+ payload: payload,
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp decode_binary(<<
+ @push::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ event_size::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ topic::binary-size(topic_size),
+ event::binary-size(event_size),
+ data::binary
+ >>) do
+ %Message{
+ topic: topic,
+ event: event,
+ payload: {:binary, data},
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp decode_binary(<<
+ @user_broadcast_push::size(8),
+ join_ref_size::size(8),
+ ref_size::size(8),
+ topic_size::size(8),
+ user_event_size::size(8),
+ metadata_size::size(8),
+ user_payload_encoding::size(8),
+ join_ref::binary-size(join_ref_size),
+ ref::binary-size(ref_size),
+ topic::binary-size(topic_size),
+ user_event::binary-size(user_event_size),
+ metadata::binary-size(metadata_size),
+ user_payload::binary
+ >>) do
+ user_payload_encoding = if user_payload_encoding == 0, do: :binary, else: :json
+
+ metadata =
+ if metadata_size > 0 do
+ Phoenix.json_library().decode!(metadata)
+ else
+ %{}
+ end
+
+ # Encoding as Message because that's how Phoenix Socket and Channel.Server expects things to show up
+ # Here we abuse the payload field to carry a tuple of (user_event, user payload encoding, user payload, metadata)
+ %Message{
+ topic: topic,
+ event: "broadcast",
+ payload: {user_event, user_payload_encoding, user_payload, metadata},
+ ref: ref,
+ join_ref: join_ref
+ }
+ end
+
+ defp byte_size!(nil, _kind, _max), do: 0
+
+ defp byte_size!(bin, kind, max) do
+ case byte_size(bin) do
+ size when size <= max ->
+ size
+
+ oversized ->
+ raise ArgumentError, """
+ unable to convert #{kind} to binary.
+
+ #{inspect(bin)}
+
+ must be less than or equal to #{max} bytes, but is #{oversized} bytes.
+ """
+ end
+ end
+end
diff --git a/lib/realtime_web/tenant_broadcaster.ex b/lib/realtime_web/tenant_broadcaster.ex
index ee8646614..b1b878b5d 100644
--- a/lib/realtime_web/tenant_broadcaster.ex
+++ b/lib/realtime_web/tenant_broadcaster.ex
@@ -5,11 +5,49 @@ defmodule RealtimeWeb.TenantBroadcaster do
alias Phoenix.PubSub
- @spec pubsub_broadcast(tenant_id :: String.t(), PubSub.topic(), PubSub.message(), PubSub.dispatcher()) :: :ok
- def pubsub_broadcast(tenant_id, topic, message, dispatcher) do
- collect_payload_size(tenant_id, message)
+ @type message_type :: :broadcast | :presence | :postgres_changes
- Realtime.GenRpc.multicast(PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ @spec pubsub_direct_broadcast(
+ node :: node(),
+ tenant_id :: String.t(),
+ PubSub.topic(),
+ PubSub.message(),
+ PubSub.dispatcher(),
+ message_type
+ ) ::
+ :ok
+ def pubsub_direct_broadcast(node, tenant_id, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
+
+ do_direct_broadcast(node, topic, message, dispatcher)
+
+ :ok
+ end
+
+ # Remote
+ defp do_direct_broadcast(node, topic, message, dispatcher) when node != node() do
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.direct_broadcast(node, Realtime.PubSub, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.cast(node, PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ end
+ end
+
+ # Local
+ defp do_direct_broadcast(_node, topic, message, dispatcher) do
+ PubSub.local_broadcast(Realtime.PubSub, topic, message, dispatcher)
+ end
+
+ @spec pubsub_broadcast(tenant_id :: String.t(), PubSub.topic(), PubSub.message(), PubSub.dispatcher(), message_type) ::
+ :ok
+ def pubsub_broadcast(tenant_id, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
+
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.broadcast(Realtime.PubSub, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.multicast(PubSub, :local_broadcast, [Realtime.PubSub, topic, message, dispatcher], key: topic)
+ end
:ok
end
@@ -19,30 +57,41 @@ defmodule RealtimeWeb.TenantBroadcaster do
from :: pid,
PubSub.topic(),
PubSub.message(),
- PubSub.dispatcher()
+ PubSub.dispatcher(),
+ message_type
) ::
:ok
- def pubsub_broadcast_from(tenant_id, from, topic, message, dispatcher) do
- collect_payload_size(tenant_id, message)
+ def pubsub_broadcast_from(tenant_id, from, topic, message, dispatcher, message_type) do
+ collect_payload_size(tenant_id, message, message_type)
- Realtime.GenRpc.multicast(
- PubSub,
- :local_broadcast_from,
- [Realtime.PubSub, from, topic, message, dispatcher],
- key: topic
- )
+ if pubsub_adapter() == :gen_rpc do
+ PubSub.broadcast_from(Realtime.PubSub, from, topic, message, dispatcher)
+ else
+ Realtime.GenRpc.multicast(
+ PubSub,
+ :local_broadcast_from,
+ [Realtime.PubSub, from, topic, message, dispatcher],
+ key: topic
+ )
+ end
:ok
end
@payload_size_event [:realtime, :tenants, :payload, :size]
- defp collect_payload_size(tenant_id, payload) when is_struct(payload) do
+ @spec collect_payload_size(tenant_id :: String.t(), payload :: term, message_type :: message_type) :: :ok
+ def collect_payload_size(tenant_id, payload, message_type) when is_struct(payload) do
# Extracting from struct so the __struct__ bit is not calculated as part of the payload
- collect_payload_size(tenant_id, Map.from_struct(payload))
+ collect_payload_size(tenant_id, Map.from_struct(payload), message_type)
end
- defp collect_payload_size(tenant_id, payload) do
- :telemetry.execute(@payload_size_event, %{size: :erlang.external_size(payload)}, %{tenant: tenant_id})
+ def collect_payload_size(tenant_id, payload, message_type) do
+ :telemetry.execute(@payload_size_event, %{size: :erlang.external_size(payload)}, %{
+ tenant: tenant_id,
+ message_type: message_type
+ })
end
+
+ defp pubsub_adapter, do: Application.fetch_env!(:realtime, :pubsub_adapter)
end
diff --git a/lib/realtime_web/views/tenant_view.ex b/lib/realtime_web/views/tenant_view.ex
index a74428f7d..2f74ed082 100644
--- a/lib/realtime_web/views/tenant_view.ex
+++ b/lib/realtime_web/views/tenant_view.ex
@@ -30,7 +30,9 @@ defmodule RealtimeWeb.TenantView do
Map.drop(settings, ["db_password"])
end)
end),
- private_only: tenant.private_only
+ private_only: tenant.private_only,
+ max_client_presence_events_per_window: tenant.max_client_presence_events_per_window,
+ client_presence_window_ms: tenant.client_presence_window_ms
}
end
end
diff --git a/mix.exs b/mix.exs
index d0f8a267b..dd0732de4 100644
--- a/mix.exs
+++ b/mix.exs
@@ -4,8 +4,8 @@ defmodule Realtime.MixProject do
def project do
[
app: :realtime,
- version: "2.46.2",
- elixir: "~> 1.17.3",
+ version: "2.77.0",
+ elixir: "~> 1.18",
elixirc_paths: elixirc_paths(Mix.env()),
start_permanent: Mix.env() == :prod,
aliases: aliases(),
@@ -53,7 +53,7 @@ defmodule Realtime.MixProject do
# Type `mix help deps` for examples and options.
defp deps do
[
- {:phoenix, "~> 1.7.0"},
+ {:phoenix, override: true, github: "supabase/phoenix", branch: "feat/presence-custom-dispatcher-1.7.19"},
{:phoenix_ecto, "~> 4.4.0"},
{:ecto_sql, "~> 3.11"},
{:ecto_psql_extras, "~> 0.8"},
@@ -65,7 +65,7 @@ defmodule Realtime.MixProject do
{:phoenix_view, "~> 2.0"},
{:esbuild, "~> 0.4", runtime: Mix.env() == :dev},
{:tailwind, "~> 0.1", runtime: Mix.env() == :dev},
- {:telemetry_metrics, "~> 0.6"},
+ {:telemetry_metrics, "~> 1.0"},
{:telemetry_poller, "~> 1.0"},
{:gettext, "~> 0.19"},
{:jason, "~> 1.3"},
@@ -73,13 +73,17 @@ defmodule Realtime.MixProject do
{:libcluster, "~> 3.3"},
{:libcluster_postgres, "~> 0.2"},
{:uuid, "~> 1.1"},
- {:prom_ex, "~> 1.8"},
+ {:prom_ex, "~> 1.10"},
+ # prom_ex depends on peep ~> 3.0 but there is no issue using peep ~> 4.0
+ # https://github.com/akoutmos/prom_ex/pull/270
+ {:peep, "~> 4.3", override: true},
{:joken, "~> 2.5.0"},
{:ex_json_schema, "~> 0.7"},
{:recon, "~> 2.5"},
{:mint, "~> 1.4"},
{:logflare_logger_backend, "~> 0.11"},
{:syn, "~> 3.3"},
+ {:beacon, path: "./beacon"},
{:cachex, "~> 4.0"},
{:open_api_spex, "~> 3.16"},
{:corsica, "~> 2.0"},
@@ -90,7 +94,8 @@ defmodule Realtime.MixProject do
{:opentelemetry_phoenix, "~> 2.0"},
{:opentelemetry_cowboy, "~> 1.0"},
{:opentelemetry_ecto, "~> 1.2"},
- {:gen_rpc, git: "https://github.com/supabase/gen_rpc.git", ref: "d161cf263c661a534eaabf80aac7a34484dac772"},
+ {:gen_rpc, git: "https://github.com/supabase/gen_rpc.git", ref: "5382a0f2689a4cb8838873a2173928281dbe5002"},
+ {:req, "~> 0.5"},
{:mimic, "~> 1.0", only: :test},
{:floki, ">= 0.30.0", only: :test},
{:mint_web_socket, "~> 1.0", only: :test},
@@ -102,7 +107,6 @@ defmodule Realtime.MixProject do
{:credo, "~> 1.7", only: [:dev, :test], runtime: false},
{:dialyxir, "~> 1.4", only: :dev, runtime: false},
{:poolboy, "~> 1.5", only: :test},
- {:req, "~> 0.5", only: :test},
{:mix_test_watch, "~> 1.0", only: [:dev, :test], runtime: false}
]
end
@@ -121,10 +125,15 @@ defmodule Realtime.MixProject do
test: [
"cmd epmd -daemon",
"ecto.create --quiet",
- "run priv/repo/seeds_before_migration.exs",
"ecto.migrate --migrations-path=priv/repo/migrations",
"test"
],
+ "test.partitioned": [
+ "cmd epmd -daemon",
+ "ecto.create --quiet",
+ "ecto.migrate --migrations-path=priv/repo/migrations",
+ "test --partitions 4"
+ ],
"assets.deploy": ["esbuild default --minify", "tailwind default --minify", "phx.digest"]
]
end
diff --git a/mix.lock b/mix.lock
index 76eb0d980..b9000570f 100644
--- a/mix.lock
+++ b/mix.lock
@@ -3,39 +3,39 @@
"benchee": {:hex, :benchee, "1.1.0", "f3a43817209a92a1fade36ef36b86e1052627fd8934a8b937ac9ab3a76c43062", [:mix], [{:deep_merge, "~> 1.0", [hex: :deep_merge, repo: "hexpm", optional: false]}, {:statistex, "~> 1.0", [hex: :statistex, repo: "hexpm", optional: false]}], "hexpm", "7da57d545003165a012b587077f6ba90b89210fd88074ce3c60ce239eb5e6d93"},
"bertex": {:hex, :bertex, "1.3.0", "0ad0df9159b5110d9d2b6654f72fbf42a54884ef43b6b651e6224c0af30ba3cb", [:mix], [], "hexpm", "0a5d5e478bb5764b7b7bae37cae1ca491200e58b089df121a2fe1c223d8ee57a"},
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
- "cachex": {:hex, :cachex, "4.0.3", "95e88c3ef4d37990948eaecccefe40b4ce4a778e0d7ade29081e6b7a89309ee2", [:mix], [{:eternal, "~> 1.2", [hex: :eternal, repo: "hexpm", optional: false]}, {:ex_hash_ring, "~> 6.0", [hex: :ex_hash_ring, repo: "hexpm", optional: false]}, {:jumper, "~> 1.0", [hex: :jumper, repo: "hexpm", optional: false]}, {:sleeplocks, "~> 1.1", [hex: :sleeplocks, repo: "hexpm", optional: false]}, {:unsafe, "~> 1.0", [hex: :unsafe, repo: "hexpm", optional: false]}], "hexpm", "d5d632da7f162f8a190f1c39b712c0ebc9cf0007c4e2029d44eddc8041b52d55"},
- "castore": {:hex, :castore, "1.0.11", "4bbd584741601eb658007339ea730b082cc61f3554cf2e8f39bf693a11b49073", [:mix], [], "hexpm", "e03990b4db988df56262852f20de0f659871c35154691427a5047f4967a16a62"},
+ "cachex": {:hex, :cachex, "4.1.1", "574c5cd28473db313a0a76aac8c945fe44191659538ca6a1e8946ec300b1a19f", [:mix], [{:eternal, "~> 1.2", [hex: :eternal, repo: "hexpm", optional: false]}, {:ex_hash_ring, "~> 6.0", [hex: :ex_hash_ring, repo: "hexpm", optional: false]}, {:jumper, "~> 1.0", [hex: :jumper, repo: "hexpm", optional: false]}, {:sleeplocks, "~> 1.1", [hex: :sleeplocks, repo: "hexpm", optional: false]}, {:unsafe, "~> 1.0", [hex: :unsafe, repo: "hexpm", optional: false]}], "hexpm", "d6b7449ff98d6bb92dda58bd4fc3189cae9f99e7042054d669596f56dc503cd8"},
+ "castore": {:hex, :castore, "1.0.15", "8aa930c890fe18b6fe0a0cff27b27d0d4d231867897bd23ea772dee561f032a3", [:mix], [], "hexpm", "96ce4c69d7d5d7a0761420ef743e2f4096253931a3ba69e5ff8ef1844fe446d3"},
"chatterbox": {:hex, :ts_chatterbox, "0.15.1", "5cac4d15dd7ad61fc3c4415ce4826fc563d4643dee897a558ec4ea0b1c835c9c", [:rebar3], [{:hpack, "~> 0.3.0", [hex: :hpack_erl, repo: "hexpm", optional: false]}], "hexpm", "4f75b91451338bc0da5f52f3480fa6ef6e3a2aeecfc33686d6b3d0a0948f31aa"},
"corsica": {:hex, :corsica, "2.1.3", "dccd094ffce38178acead9ae743180cdaffa388f35f0461ba1e8151d32e190e6", [:mix], [{:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "616c08f61a345780c2cf662ff226816f04d8868e12054e68963e95285b5be8bc"},
- "cowboy": {:hex, :cowboy, "2.12.0", "f276d521a1ff88b2b9b4c54d0e753da6c66dd7be6c9fca3d9418b561828a3731", [:make, :rebar3], [{:cowlib, "2.13.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "8a7abe6d183372ceb21caa2709bec928ab2b72e18a3911aa1771639bef82651e"},
+ "cowboy": {:hex, :cowboy, "2.14.2", "4008be1df6ade45e4f2a4e9e2d22b36d0b5aba4e20b0a0d7049e28d124e34847", [:make, :rebar3], [{:cowlib, ">= 2.16.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "569081da046e7b41b5df36aa359be71a0c8874e5b9cff6f747073fc57baf1ab9"},
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
- "cowlib": {:hex, :cowlib, "2.13.0", "db8f7505d8332d98ef50a3ef34b34c1afddec7506e4ee4dd4a3a266285d282ca", [:make, :rebar3], [], "hexpm", "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4"},
- "credo": {:hex, :credo, "1.7.11", "d3e805f7ddf6c9c854fd36f089649d7cf6ba74c42bc3795d587814e3c9847102", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "56826b4306843253a66e47ae45e98e7d284ee1f95d53d1612bb483f88a8cf219"},
+ "cowlib": {:hex, :cowlib, "2.16.0", "54592074ebbbb92ee4746c8a8846e5605052f29309d3a873468d76cdf932076f", [:make, :rebar3], [], "hexpm", "7f478d80d66b747344f0ea7708c187645cfcc08b11aa424632f78e25bf05db51"},
+ "credo": {:hex, :credo, "1.7.13", "126a0697df6b7b71cd18c81bc92335297839a806b6f62b61d417500d1070ff4e", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "47641e6d2bbff1e241e87695b29f617f1a8f912adea34296fb10ecc3d7e9e84f"},
"ctx": {:hex, :ctx, "0.6.0", "8ff88b70e6400c4df90142e7f130625b82086077a45364a78d208ed3ed53c7fe", [:rebar3], [], "hexpm", "a14ed2d1b67723dbebbe423b28d7615eb0bdcba6ff28f2d1f1b0a7e1d4aa5fc2"},
- "db_connection": {:hex, :db_connection, "2.8.0", "64fd82cfa6d8e25ec6660cea73e92a4cbc6a18b31343910427b702838c4b33b2", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "008399dae5eee1bf5caa6e86d204dcb44242c82b1ed5e22c881f2c34da201b15"},
+ "db_connection": {:hex, :db_connection, "2.8.1", "9abdc1e68c34c6163f6fb96a96532272d13ad7ca45262156ae8b7ec6d9dc4bec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61a3d489b239d76f326e03b98794fb8e45168396c925ef25feb405ed09da8fd"},
"decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"},
"deep_merge": {:hex, :deep_merge, "1.0.0", "b4aa1a0d1acac393bdf38b2291af38cb1d4a52806cf7a4906f718e1feb5ee961", [:mix], [], "hexpm", "ce708e5f094b9cd4e8f2be4f00d2f4250c4095be93f8cd6d018c753894885430"},
- "dialyxir": {:hex, :dialyxir, "1.4.5", "ca1571ac18e0f88d4ab245f0b60fa31ff1b12cbae2b11bd25d207f865e8ae78a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "b0fb08bb8107c750db5c0b324fa2df5ceaa0f9307690ee3c1f6ba5b9eb5d35c3"},
- "ecto": {:hex, :ecto, "3.13.2", "7d0c0863f3fc8d71d17fc3ad3b9424beae13f02712ad84191a826c7169484f01", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "669d9291370513ff56e7b7e7081b7af3283d02e046cf3d403053c557894a0b3e"},
+ "dialyxir": {:hex, :dialyxir, "1.4.6", "7cca478334bf8307e968664343cbdb432ee95b4b68a9cba95bdabb0ad5bdfd9a", [:mix], [{:erlex, ">= 0.2.7", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "8cf5615c5cd4c2da6c501faae642839c8405b49f8aa057ad4ae401cb808ef64d"},
+ "ecto": {:hex, :ecto, "3.13.3", "6a983f0917f8bdc7a89e96f2bf013f220503a0da5d8623224ba987515b3f0d80", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1927db768f53a88843ff25b6ba7946599a8ca8a055f69ad8058a1432a399af94"},
"ecto_psql_extras": {:hex, :ecto_psql_extras, "0.8.8", "aa02529c97f69aed5722899f5dc6360128735a92dd169f23c5d50b1f7fdede08", [:mix], [{:ecto_sql, "~> 3.7", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:postgrex, "> 0.16.0", [hex: :postgrex, repo: "hexpm", optional: false]}, {:table_rex, "~> 3.1.1 or ~> 4.0", [hex: :table_rex, repo: "hexpm", optional: false]}], "hexpm", "04c63d92b141723ad6fed2e60a4b461ca00b3594d16df47bbc48f1f4534f2c49"},
"ecto_sql": {:hex, :ecto_sql, "3.13.2", "a07d2461d84107b3d037097c822ffdd36ed69d1cf7c0f70e12a3d1decf04e2e1", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "539274ab0ecf1a0078a6a72ef3465629e4d6018a3028095dc90f60a19c371717"},
"erlex": {:hex, :erlex, "0.2.7", "810e8725f96ab74d17aac676e748627a07bc87eb950d2b83acd29dc047a30595", [:mix], [], "hexpm", "3ed95f79d1a844c3f6bf0cea61e0d5612a42ce56da9c03f01df538685365efb0"},
- "esbuild": {:hex, :esbuild, "0.8.2", "5f379dfa383ef482b738e7771daf238b2d1cfb0222bef9d3b20d4c8f06c7a7ac", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "558a8a08ed78eb820efbfda1de196569d8bfa9b51e8371a1934fbb31345feda7"},
+ "esbuild": {:hex, :esbuild, "0.10.0", "b0aa3388a1c23e727c5a3e7427c932d89ee791746b0081bbe56103e9ef3d291f", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "468489cda427b974a7cc9f03ace55368a83e1a7be12fba7e30969af78e5f8c70"},
"eternal": {:hex, :eternal, "1.2.2", "d1641c86368de99375b98d183042dd6c2b234262b8d08dfd72b9eeaafc2a1abd", [:mix], [], "hexpm", "2c9fe32b9c3726703ba5e1d43a1d255a4f3f2d8f8f9bc19f094c7cb1a7a9e782"},
"ex_hash_ring": {:hex, :ex_hash_ring, "6.0.4", "bef9d2d796afbbe25ab5b5a7ed746e06b99c76604f558113c273466d52fa6d6b", [:mix], [], "hexpm", "89adabf31f7d3dfaa36802ce598ce918e9b5b33bae8909ac1a4d052e1e567d18"},
- "ex_json_schema": {:hex, :ex_json_schema, "0.10.2", "7c4b8c1481fdeb1741e2ce66223976edfb9bccebc8014f6aec35d4efe964fb71", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "37f43be60f8407659d4d0155a7e45e7f406dab1f827051d3d35858a709baf6a6"},
- "excoveralls": {:hex, :excoveralls, "0.18.3", "bca47a24d69a3179951f51f1db6d3ed63bca9017f476fe520eb78602d45f7756", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "746f404fcd09d5029f1b211739afb8fb8575d775b21f6a3908e7ce3e640724c6"},
+ "ex_json_schema": {:hex, :ex_json_schema, "0.11.1", "b593f92937a095f66054bb318681397dfe7304e7d2b6b1a7534ea3aa40024f8c", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}], "hexpm", "32d651a575a6ce2fd613f140b0fef8dd0acc7cf8e8bcd29a3a1be5c945700dd5"},
+ "excoveralls": {:hex, :excoveralls, "0.18.5", "e229d0a65982613332ec30f07940038fe451a2e5b29bce2a5022165f0c9b157e", [:mix], [{:castore, "~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "523fe8a15603f86d64852aab2abe8ddbd78e68579c8525ae765facc5eae01562"},
"expo": {:hex, :expo, "1.1.0", "f7b9ed7fb5745ebe1eeedf3d6f29226c5dd52897ac67c0f8af62a07e661e5c75", [:mix], [], "hexpm", "fbadf93f4700fb44c331362177bdca9eeb8097e8b0ef525c9cc501cb9917c960"},
- "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"},
- "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"},
- "floki": {:hex, :floki, "0.37.0", "b83e0280bbc6372f2a403b2848013650b16640cd2470aea6701f0632223d719e", [:mix], [], "hexpm", "516a0c15a69f78c47dc8e0b9b3724b29608aa6619379f91b1ffa47109b5d0dd3"},
- "gen_rpc": {:git, "https://github.com/supabase/gen_rpc.git", "d161cf263c661a534eaabf80aac7a34484dac772", [ref: "d161cf263c661a534eaabf80aac7a34484dac772"]},
+ "file_system": {:hex, :file_system, "1.1.1", "31864f4685b0148f25bd3fbef2b1228457c0c89024ad67f7a81a3ffbc0bbad3a", [:mix], [], "hexpm", "7a15ff97dfe526aeefb090a7a9d3d03aa907e100e262a0f8f7746b78f8f87a5d"},
+ "finch": {:hex, :finch, "0.20.0", "5330aefb6b010f424dcbbc4615d914e9e3deae40095e73ab0c1bb0968933cadf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2658131a74d051aabfcba936093c903b8e89da9a1b63e430bee62045fa9b2ee2"},
+ "floki": {:hex, :floki, "0.38.0", "62b642386fa3f2f90713f6e231da0fa3256e41ef1089f83b6ceac7a3fd3abf33", [:mix], [], "hexpm", "a5943ee91e93fb2d635b612caf5508e36d37548e84928463ef9dd986f0d1abd9"},
+ "gen_rpc": {:git, "https://github.com/supabase/gen_rpc.git", "5382a0f2689a4cb8838873a2173928281dbe5002", [ref: "5382a0f2689a4cb8838873a2173928281dbe5002"]},
"gettext": {:hex, :gettext, "0.26.2", "5978aa7b21fada6deabf1f6341ddba50bc69c999e812211903b169799208f2a8", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "aa978504bcf76511efdc22d580ba08e2279caab1066b76bb9aa81c4a1e0a32a5"},
"gproc": {:hex, :gproc, "0.9.1", "f1df0364423539cf0b80e8201c8b1839e229e5f9b3ccb944c5834626998f5b8c", [:rebar3], [], "hexpm", "905088e32e72127ed9466f0bac0d8e65704ca5e73ee5a62cb073c3117916d507"},
"grpcbox": {:hex, :grpcbox, "0.17.1", "6e040ab3ef16fe699ffb513b0ef8e2e896da7b18931a1ef817143037c454bcce", [:rebar3], [{:acceptor_pool, "~> 1.0.0", [hex: :acceptor_pool, repo: "hexpm", optional: false]}, {:chatterbox, "~> 0.15.1", [hex: :ts_chatterbox, repo: "hexpm", optional: false]}, {:ctx, "~> 0.6.0", [hex: :ctx, repo: "hexpm", optional: false]}, {:gproc, "~> 0.9.1", [hex: :gproc, repo: "hexpm", optional: false]}], "hexpm", "4a3b5d7111daabc569dc9cbd9b202a3237d81c80bf97212fbc676832cb0ceb17"},
- "ham": {:hex, :ham, "0.3.0", "7cd031b4a55fba219c11553e7b13ba73bd86eab4034518445eff1e038cb9a44d", [:mix], [], "hexpm", "7d6c6b73d7a6a83233876cc1b06a4d9b5de05562b228effda4532f9a49852bf6"},
+ "ham": {:hex, :ham, "0.3.2", "02ae195f49970ef667faf9d01bc454fb80909a83d6c775bcac724ca567aeb7b3", [:mix], [], "hexpm", "b71cc684c0e5a3d32b5f94b186770551509e93a9ae44ca1c1a313700f2f6a69a"},
"hpack": {:hex, :hpack_erl, "0.3.0", "2461899cc4ab6a0ef8e970c1661c5fc6a52d3c25580bc6dd204f84ce94669926", [:rebar3], [], "hexpm", "d6137d7079169d8c485c6962dfe261af5b9ef60fbc557344511c1e65e3d95fb0"},
- "hpax": {:hex, :hpax, "1.0.2", "762df951b0c399ff67cc57c3995ec3cf46d696e41f0bba17da0518d94acd4aac", [:mix], [], "hexpm", "2f09b4c1074e0abd846747329eaa26d535be0eb3d189fa69d812bfb8bfefd32f"},
+ "hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"},
"jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
"joken": {:hex, :joken, "2.5.0", "09be497d804b8115eb6f07615cef2e60c2a1008fb89dc0aef0d4c4b4609b99aa", [:mix], [{:jose, "~> 1.11.2", [hex: :jose, repo: "hexpm", optional: false]}], "hexpm", "22b25c89617c5ed8ca7b31026340a25ea0f9ca7160f9706b79be9ed81fdf74e7"},
"jose": {:hex, :jose, "1.11.10", "a903f5227417bd2a08c8a00a0cbcc458118be84480955e8d251297a425723f83", [:mix, :rebar3], [], "hexpm", "0d6cd36ff8ba174db29148fc112b5842186b68a90ce9fc2b3ec3afe76593e614"},
@@ -45,65 +45,66 @@
"logflare_api_client": {:hex, :logflare_api_client, "0.3.5", "c427ebf65a8402d68b056d4a5ef3e1eb3b90c0ad1d0de97d1fe23807e0c1b113", [:mix], [{:bertex, "~> 1.3", [hex: :bertex, repo: "hexpm", optional: false]}, {:finch, "~> 0.10", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:tesla, "~> 1.0", [hex: :tesla, repo: "hexpm", optional: false]}], "hexpm", "16d29abcb80c4f72745cdf943379da02a201504813c3aa12b4d4acb0302b7723"},
"logflare_etso": {:hex, :logflare_etso, "1.1.2", "040bd3e482aaf0ed20080743b7562242ec5079fd88a6f9c8ce5d8298818292e9", [:mix], [{:ecto, "~> 3.8", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "ab96be42900730a49b132891f43a9be1d52e4ad3ee9ed9cb92565c5f87345117"},
"logflare_logger_backend": {:hex, :logflare_logger_backend, "0.11.4", "3a5df94e764b7c8ee4bd7b875a480a34a27807128d8459aa59ea63b2b38bddc7", [:mix], [{:bertex, "~> 1.3", [hex: :bertex, repo: "hexpm", optional: false]}, {:logflare_api_client, "~> 0.3.5", [hex: :logflare_api_client, repo: "hexpm", optional: false]}, {:logflare_etso, "~> 1.1.2", [hex: :logflare_etso, repo: "hexpm", optional: false]}, {:typed_struct, "~> 0.3.0", [hex: :typed_struct, repo: "hexpm", optional: false]}], "hexpm", "00998d81b3c481ad93d2bf25e66d1ddb1a01ad77d994e2c1a7638c6da94755c5"},
- "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
+ "mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
"mimic": {:hex, :mimic, "1.12.0", "34c9d1fb8e756df09ca5f96861d273f2bb01063df1a6a51a4c101f9ad7f07a9c", [:mix], [{:ham, "~> 0.2", [hex: :ham, repo: "hexpm", optional: false]}], "hexpm", "eaa43d495d6f3bc8099b28886e05a1b09a2a6be083f6385c3abc17599e5e2c43"},
- "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"},
+ "mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"},
"mint_web_socket": {:hex, :mint_web_socket, "1.0.4", "0b539116dbb3d3f861cdf5e15e269a933cb501c113a14db7001a3157d96ffafd", [:mix], [{:mint, ">= 1.4.1 and < 2.0.0-0", [hex: :mint, repo: "hexpm", optional: false]}], "hexpm", "027d4c5529c45a4ba0ce27a01c0f35f284a5468519c045ca15f43decb360a991"},
- "mix_audit": {:hex, :mix_audit, "2.1.4", "0a23d5b07350cdd69001c13882a4f5fb9f90fbd4cbf2ebc190a2ee0d187ea3e9", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "fd807653cc8c1cada2911129c7eb9e985e3cc76ebf26f4dd628bb25bbcaa7099"},
+ "mix_audit": {:hex, :mix_audit, "2.1.5", "c0f77cee6b4ef9d97e37772359a187a166c7a1e0e08b50edf5bf6959dfe5a016", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "87f9298e21da32f697af535475860dc1d3617a010e0b418d2ec6142bc8b42d69"},
"mix_test_watch": {:hex, :mix_test_watch, "1.3.0", "2ffc9f72b0d1f4ecf0ce97b044e0e3c607c3b4dc21d6228365e8bc7c2856dc77", [:mix], [{:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}], "hexpm", "f9e5edca976857ffac78632e635750d158df14ee2d6185a15013844af7570ffe"},
"nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
"nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
- "observer_cli": {:hex, :observer_cli, "1.8.1", "edfe0c0f983631961599326f239f6e99750aba7387515002b1284dcfe7fcd6d2", [:mix, :rebar3], [{:recon, "~> 2.5.6", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "a3cd6300dd8290ade93d688fbd79c872e393b01256309dd7a653feb13c434fb4"},
+ "observer_cli": {:hex, :observer_cli, "1.8.4", "09030c04d2480499037ba33d801c6e02adba4e7244a05e05b984b5a82843be71", [:mix, :rebar3], [{:recon, "~> 2.5.6", [hex: :recon, repo: "hexpm", optional: false]}], "hexpm", "0fcd71ac723bcd2d91266d99b3c3ccd9465c71c9f392d900cea8effdc1a1485c"},
"octo_fetch": {:hex, :octo_fetch, "0.4.0", "074b5ecbc08be10b05b27e9db08bc20a3060142769436242702931c418695b19", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "cf8be6f40cd519d7000bb4e84adcf661c32e59369ca2827c4e20042eda7a7fc6"},
- "open_api_spex": {:hex, :open_api_spex, "3.21.2", "6a704f3777761feeb5657340250d6d7332c545755116ca98f33d4b875777e1e5", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "f42ae6ed668b895ebba3e02773cfb4b41050df26f803f2ef634c72a7687dc387"},
- "opentelemetry": {:hex, :opentelemetry, "1.5.0", "7dda6551edfc3050ea4b0b40c0d2570423d6372b97e9c60793263ef62c53c3c2", [:rebar3], [{:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "cdf4f51d17b592fc592b9a75f86a6f808c23044ba7cf7b9534debbcc5c23b0ee"},
- "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.0", "63ca1742f92f00059298f478048dfb826f4b20d49534493d6919a0db39b6db04", [:mix, :rebar3], [], "hexpm", "3dfbbfaa2c2ed3121c5c483162836c4f9027def469c41578af5ef32589fcfc58"},
+ "open_api_spex": {:hex, :open_api_spex, "3.22.0", "fbf90dc82681dc042a4ee79853c8e989efbba73d9e87439085daf849bbf8bc20", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "dd751ddbdd709bb4a5313e9a24530da6e66594773c7242a0c2592cbd9f589063"},
+ "opentelemetry": {:hex, :opentelemetry, "1.6.0", "0954dbe12f490ee7b126c9e924cf60141b1238a02dfc700907eadde4dcc20460", [:rebar3], [{:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "5fd0123d65d2649f10e478e7444927cd9fbdffcaeb8c1c2fcae3d486d18c5e62"},
+ "opentelemetry_api": {:hex, :opentelemetry_api, "1.4.1", "e071429a37441a0fe9097eeea0ff921ebadce8eba8e1ce297b05a43c7a0d121f", [:mix, :rebar3], [], "hexpm", "39bdb6ad740bc13b16215cb9f233d66796bbae897f3bf6eb77abb712e87c3c26"},
"opentelemetry_cowboy": {:hex, :opentelemetry_cowboy, "1.0.0", "786c7cde66a2493323c79d2c94e679ff501d459a9b403d8b60b9bef116333117", [:rebar3], [{:cowboy_telemetry, "~> 0.4", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_http, "~> 0.2", [hex: :otel_http, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7575716eaccacd0eddc3e7e61403aecb5d0a6397183987d6049094aeb0b87a7c"},
"opentelemetry_ecto": {:hex, :opentelemetry_ecto, "1.2.0", "2382cb47ddc231f953d3b8263ed029d87fbf217915a1da82f49159d122b64865", [:mix], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.2", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "70dfa2e79932e86f209df00e36c980b17a32f82d175f0068bf7ef9a96cf080cf"},
- "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.8.0", "5d546123230771ef4174e37bedfd77e3374913304cd6ea3ca82a2add49cd5d56", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.5.0", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "a1f9f271f8d3b02b81462a6bfef7075fd8457fdb06adff5d2537df5e2264d9af"},
+ "opentelemetry_exporter": {:hex, :opentelemetry_exporter, "1.9.0", "e344bf5e3dab2815fe381b0cac172c06cfc29ecf792c5d74cbbd2b3184af359c", [:rebar3], [{:grpcbox, ">= 0.0.0", [hex: :grpcbox, repo: "hexpm", optional: false]}, {:opentelemetry, "~> 1.6.0", [hex: :opentelemetry, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:tls_certificate_check, "~> 1.18", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "2030a59e33afff6aaeba847d865c8db5dc3873db87a9257df2ca03cafd9e0478"},
"opentelemetry_phoenix": {:hex, :opentelemetry_phoenix, "2.0.1", "c664cdef205738cffcd409b33599439a4ffb2035ef6e21a77927ac1da90463cb", [:mix], [{:nimble_options, "~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:opentelemetry_api, "~> 1.4", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:opentelemetry_process_propagator, "~> 0.3", [hex: :opentelemetry_process_propagator, repo: "hexpm", optional: false]}, {:opentelemetry_semantic_conventions, "~> 1.27", [hex: :opentelemetry_semantic_conventions, repo: "hexpm", optional: false]}, {:opentelemetry_telemetry, "~> 1.1", [hex: :opentelemetry_telemetry, repo: "hexpm", optional: false]}, {:otel_http, "~> 0.2", [hex: :otel_http, repo: "hexpm", optional: false]}, {:plug, ">= 1.11.0", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a24fdccdfa6b890c8892c6366beab4a15a27ec0c692b0f77ec2a862e7b235f6e"},
"opentelemetry_process_propagator": {:hex, :opentelemetry_process_propagator, "0.3.0", "ef5b2059403a1e2b2d2c65914e6962e56371570b8c3ab5323d7a8d3444fb7f84", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.0", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}], "hexpm", "7243cb6de1523c473cba5b1aefa3f85e1ff8cc75d08f367104c1e11919c8c029"},
"opentelemetry_semantic_conventions": {:hex, :opentelemetry_semantic_conventions, "1.27.0", "acd0194a94a1e57d63da982ee9f4a9f88834ae0b31b0bd850815fe9be4bbb45f", [:mix, :rebar3], [], "hexpm", "9681ccaa24fd3d810b4461581717661fd85ff7019b082c2dff89c7d5b1fc2864"},
"opentelemetry_telemetry": {:hex, :opentelemetry_telemetry, "1.1.2", "410ab4d76b0921f42dbccbe5a7c831b8125282850be649ee1f70050d3961118a", [:mix, :rebar3], [{:opentelemetry_api, "~> 1.3", [hex: :opentelemetry_api, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "641ab469deb181957ac6d59bce6e1321d5fe2a56df444fc9c19afcad623ab253"},
"otel_http": {:hex, :otel_http, "0.2.0", "b17385986c7f1b862f5d577f72614ecaa29de40392b7618869999326b9a61d8a", [:rebar3], [], "hexpm", "f2beadf922c8cfeb0965488dd736c95cc6ea8b9efce89466b3904d317d7cc717"},
- "phoenix": {:hex, :phoenix, "1.7.19", "36617efe5afbd821099a8b994ff4618a340a5bfb25531a1802c4d4c634017a57", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "ba4dc14458278773f905f8ae6c2ec743d52c3a35b6b353733f64f02dfe096cd6"},
+ "peep": {:hex, :peep, "4.3.1", "5157b7ed02d1fa90af2f67768230084c8bc82ec1513e6982e46d6fb1ec5f957f", [:mix], [{:nimble_options, "~> 1.1", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:plug, "~> 1.16", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e96cca0c194a1ed8b0a8b109fa2244a3bfb23acf2b45c01434007ffb67859fe"},
+ "phoenix": {:git, "https://github.com/supabase/phoenix.git", "7b884cc0cc1a49ad2bc272acda2e622b3e11c139", [branch: "feat/presence-custom-dispatcher-1.7.19"]},
"phoenix_ecto": {:hex, :phoenix_ecto, "4.4.3", "86e9878f833829c3f66da03d75254c155d91d72a201eb56ae83482328dc7ca93", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "d36c401206f3011fefd63d04e8ef626ec8791975d9d107f9a0817d426f61ac07"},
"phoenix_html": {:hex, :phoenix_html, "3.3.4", "42a09fc443bbc1da37e372a5c8e6755d046f22b9b11343bf885067357da21cb3", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "0249d3abec3714aff3415e7ee3d9786cb325be3151e6c4b3021502c585bf53fb"},
- "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.6", "7b1f0327f54c9eb69845fd09a77accf922f488c549a7e7b8618775eb603a62c7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "1681ab813ec26ca6915beb3414aa138f298e17721dc6a2bde9e6eb8a62360ff6"},
- "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.5.3", "f2161c207fda0e4fb55165f650f7f8db23f02b29e3bff00ff7ef161d6ac1f09d", [:mix], [{:file_system, "~> 0.3 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b4ec9cd73cb01ff1bd1cac92e045d13e7030330b74164297d1aee3907b54803c"},
+ "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.7", "405880012cb4b706f26dd1c6349125bfc903fb9e44d1ea668adaf4e04d4884b7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "3a8625cab39ec261d48a13b7468dc619c0ede099601b084e343968309bd4d7d7"},
+ "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.6.1", "05df733a09887a005ed0d69a7fc619d376aea2730bf64ce52ac51ce716cc1ef0", [:mix], [{:file_system, "~> 0.2.10 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "74273843d5a6e4fef0bbc17599f33e3ec63f08e69215623a0cd91eea4288e5a0"},
"phoenix_live_view": {:hex, :phoenix_live_view, "0.20.17", "f396bbdaf4ba227b82251eb75ac0afa6b3da5e509bc0d030206374237dfc9450", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61d741ffb78c85fdbca0de084da6a48f8ceb5261a79165b5a0b59e5f65ce98b"},
"phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"},
"phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"},
"phoenix_view": {:hex, :phoenix_view, "2.0.4", "b45c9d9cf15b3a1af5fb555c674b525391b6a1fe975f040fb4d913397b31abf4", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}], "hexpm", "4e992022ce14f31fe57335db27a28154afcc94e9983266835bb3040243eb620b"},
- "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"},
- "plug_cowboy": {:hex, :plug_cowboy, "2.7.2", "fdadb973799ae691bf9ecad99125b16625b1c6039999da5fe544d99218e662e4", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "245d8a11ee2306094840c000e8816f0cbed69a23fc0ac2bcf8d7835ae019bb2f"},
- "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"},
+ "plug": {:hex, :plug, "1.18.1", "5067f26f7745b7e31bc3368bc1a2b818b9779faa959b49c934c17730efc911cf", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "57a57db70df2b422b564437d2d33cf8d33cd16339c1edb190cd11b1a3a546cc2"},
+ "plug_cowboy": {:hex, :plug_cowboy, "2.7.4", "729c752d17cf364e2b8da5bdb34fb5804f56251e88bb602aff48ae0bd8673d11", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "9b85632bd7012615bae0a5d70084deb1b25d2bcbb32cab82d1e9a1e023168aa3"},
+ "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
"poolboy": {:hex, :poolboy, "1.5.2", "392b007a1693a64540cead79830443abf5762f5d30cf50bc95cb2c1aaafa006b", [:rebar3], [], "hexpm", "dad79704ce5440f3d5a3681c8590b9dc25d1a561e8f5a9c995281012860901e3"},
- "postgres_replication": {:git, "https://github.com/filipecabaco/postgres_replication.git", "69129221f0263aa13faa5fbb8af97c28aeb4f71c", []},
+ "postgres_replication": {:git, "https://github.com/filipecabaco/postgres_replication.git", "3b0700ee38a1dddaf7936c5793d6f35431fee2cd", []},
"postgrex": {:hex, :postgrex, "0.20.0", "363ed03ab4757f6bc47942eff7720640795eb557e1935951c1626f0d303a3aed", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "d36ef8b36f323d29505314f704e21a1a038e2dc387c6409ee0cd24144e187c0f"},
- "prom_ex": {:hex, :prom_ex, "1.9.0", "63e6dda6c05cdeec1f26c48443dcc38ffd2118b3665ae8d2bd0e5b79f2aea03e", [:mix], [{:absinthe, ">= 1.6.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.0.2", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.5.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.15", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.2", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.4.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.3", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.5.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.14.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.12.1", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, "~> 2.5", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.0", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.0", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "01f3d4f69ec93068219e686cc65e58a29c42bea5429a8ff4e2121f19db178ee6"},
- "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"},
+ "prom_ex": {:hex, :prom_ex, "1.11.0", "1f6d67f2dead92224cb4f59beb3e4d319257c5728d9638b4a5e8ceb51a4f9c7e", [:mix], [{:absinthe, ">= 1.7.0", [hex: :absinthe, repo: "hexpm", optional: true]}, {:broadway, ">= 1.1.0", [hex: :broadway, repo: "hexpm", optional: true]}, {:ecto, ">= 3.11.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:finch, "~> 0.18", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:oban, ">= 2.10.0", [hex: :oban, repo: "hexpm", optional: true]}, {:octo_fetch, "~> 0.4", [hex: :octo_fetch, repo: "hexpm", optional: false]}, {:peep, "~> 3.0", [hex: :peep, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.7.0", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, ">= 0.20.0", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, ">= 1.16.0", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 2.6.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, ">= 1.0.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}, {:telemetry_metrics_prometheus_core, "~> 1.2", [hex: :telemetry_metrics_prometheus_core, repo: "hexpm", optional: false]}, {:telemetry_poller, "~> 1.1", [hex: :telemetry_poller, repo: "hexpm", optional: false]}], "hexpm", "76b074bc3730f0802978a7eb5c7091a65473eaaf07e99ec9e933138dcc327805"},
+ "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
"recon": {:hex, :recon, "2.5.6", "9052588e83bfedfd9b72e1034532aee2a5369d9d9343b61aeb7fbce761010741", [:mix, :rebar3], [], "hexpm", "96c6799792d735cc0f0fd0f86267e9d351e63339cbe03df9d162010cefc26bb0"},
- "req": {:hex, :req, "0.5.10", "a3a063eab8b7510785a467f03d30a8d95f66f5c3d9495be3474b61459c54376c", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "8a604815743f8a2d3b5de0659fa3137fa4b1cffd636ecb69b30b2b9b2c2559be"},
+ "req": {:hex, :req, "0.5.15", "662020efb6ea60b9f0e0fac9be88cd7558b53fe51155a2d9899de594f9906ba9", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "a6513a35fad65467893ced9785457e91693352c70b58bbc045b47e5eb2ef0c53"},
"sleeplocks": {:hex, :sleeplocks, "1.1.3", "96a86460cc33b435c7310dbd27ec82ca2c1f24ae38e34f8edde97f756503441a", [:rebar3], [], "hexpm", "d3b3958552e6eb16f463921e70ae7c767519ef8f5be46d7696cc1ed649421321"},
"snabbkaffe": {:git, "https://github.com/kafka4beam/snabbkaffe", "b59298334ed349556f63405d1353184c63c66534", [tag: "1.0.10"]},
- "sobelow": {:hex, :sobelow, "0.13.0", "218afe9075904793f5c64b8837cc356e493d88fddde126a463839351870b8d1e", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cd6e9026b85fc35d7529da14f95e85a078d9dd1907a9097b3ba6ac7ebbe34a0d"},
+ "sobelow": {:hex, :sobelow, "0.14.1", "2f81e8632f15574cba2402bcddff5497b413c01e6f094bc0ab94e83c2f74db81", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "8fac9a2bd90fdc4b15d6fca6e1608efb7f7c600fa75800813b794ee9364c87f2"},
"ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"},
- "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"},
+ "statistex": {:hex, :statistex, "1.1.0", "7fec1eb2f580a0d2c1a05ed27396a084ab064a40cfc84246dbfb0c72a5c761e5", [:mix], [], "hexpm", "f5950ea26ad43246ba2cce54324ac394a4e7408fdcf98b8e230f503a0cba9cf5"},
"syn": {:hex, :syn, "3.3.0", "4684a909efdfea35ce75a9662fc523e4a8a4e8169a3df275e4de4fa63f99c486", [:rebar3], [], "hexpm", "e58ee447bc1094bdd21bf0acc102b1fbf99541a508cd48060bf783c245eaf7d6"},
"table_rex": {:hex, :table_rex, "4.1.0", "fbaa8b1ce154c9772012bf445bfb86b587430fb96f3b12022d3f35ee4a68c918", [:mix], [], "hexpm", "95932701df195d43bc2d1c6531178fc8338aa8f38c80f098504d529c43bc2601"},
- "tailwind": {:hex, :tailwind, "0.2.4", "5706ec47182d4e7045901302bf3a333e80f3d1af65c442ba9a9eed152fb26c2e", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c6e4a82b8727bab593700c998a4d98cf3d8025678bfde059aed71d0000c3e463"},
+ "tailwind": {:hex, :tailwind, "0.4.1", "e7bcc222fe96a1e55f948e76d13dd84a1a7653fb051d2a167135db3b4b08d3e9", [:mix], [], "hexpm", "6249d4f9819052911120dbdbe9e532e6bd64ea23476056adb7f730aa25c220d1"},
"telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
- "telemetry_metrics": {:hex, :telemetry_metrics, "0.6.2", "2caabe9344ec17eafe5403304771c3539f3b6e2f7fb6a6f602558c825d0d0bfb", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9b43db0dc33863930b9ef9d27137e78974756f5f198cae18409970ed6fa5b561"},
+ "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"},
"telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"},
- "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"},
- "tesla": {:hex, :tesla, "1.13.2", "85afa342eb2ac0fee830cf649dbd19179b6b359bec4710d02a3d5d587f016910", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:mox, "~> 1.0", [hex: :mox, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "960609848f1ef654c3cdfad68453cd84a5febecb6ed9fed9416e36cd9cd724f9"},
- "tls_certificate_check": {:hex, :tls_certificate_check, "1.28.0", "c39bf21f67c2d124ae905454fad00f27e625917e8ab1009146e916e1df6ab275", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "3ab058c3f9457fffca916729587415f0ddc822048a0e5b5e2694918556d92df1"},
+ "telemetry_poller": {:hex, :telemetry_poller, "1.3.0", "d5c46420126b5ac2d72bc6580fb4f537d35e851cc0f8dbd571acf6d6e10f5ec7", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "51f18bed7128544a50f75897db9974436ea9bfba560420b646af27a9a9b35211"},
+ "tesla": {:hex, :tesla, "1.15.3", "3a2b5c37f09629b8dcf5d028fbafc9143c0099753559d7fe567eaabfbd9b8663", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.21", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:mox, "~> 1.0", [hex: :mox, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "98bb3d4558abc67b92fb7be4cd31bb57ca8d80792de26870d362974b58caeda7"},
+ "tls_certificate_check": {:hex, :tls_certificate_check, "1.29.0", "4473005eb0bbdad215d7083a230e2e076f538d9ea472c8009fd22006a4cfc5f6", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "5b0d0e5cb0f928bc4f210df667304ed91c5bff2a391ce6bdedfbfe70a8f096c5"},
"typed_struct": {:hex, :typed_struct, "0.3.0", "939789e3c1dca39d7170c87f729127469d1315dcf99fee8e152bb774b17e7ff7", [:mix], [], "hexpm", "c50bd5c3a61fe4e198a8504f939be3d3c85903b382bde4865579bc23111d1b6d"},
"unsafe": {:hex, :unsafe, "1.0.2", "23c6be12f6c1605364801f4b47007c0c159497d0446ad378b5cf05f1855c0581", [:mix], [], "hexpm", "b485231683c3ab01a9cd44cb4a79f152c6f3bb87358439c6f68791b85c2df675"},
"uuid": {:hex, :uuid, "1.1.8", "e22fc04499de0de3ed1116b770c7737779f226ceefa0badb3592e64d5cfb4eb9", [:mix], [], "hexpm", "c790593b4c3b601f5dc2378baae7efaf5b3d73c4c6456ba85759905be792f2ac"},
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
"websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"},
"yamerl": {:hex, :yamerl, "0.10.0", "4ff81fee2f1f6a46f1700c0d880b24d193ddb74bd14ef42cb0bcf46e81ef2f8e", [:rebar3], [], "hexpm", "346adb2963f1051dc837a2364e4acf6eb7d80097c0f53cbdc3046ec8ec4b4e6e"},
- "yaml_elixir": {:hex, :yaml_elixir, "2.11.0", "9e9ccd134e861c66b84825a3542a1c22ba33f338d82c07282f4f1f52d847bd50", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "53cc28357ee7eb952344995787f4bb8cc3cecbf189652236e9b163e8ce1bc242"},
+ "yaml_elixir": {:hex, :yaml_elixir, "2.12.0", "30343ff5018637a64b1b7de1ed2a3ca03bc641410c1f311a4dbdc1ffbbf449c7", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "ca6bacae7bac917a7155dca0ab6149088aa7bc800c94d0fe18c5238f53b313c6"},
}
diff --git a/priv/repo/dev_seeds.exs b/priv/repo/dev_seeds.exs
index 7dec7895a..46cba0dec 100644
--- a/priv/repo/dev_seeds.exs
+++ b/priv/repo/dev_seeds.exs
@@ -1,5 +1,3 @@
-import Ecto.Adapters.SQL, only: [query!: 3]
-
alias Realtime.Api.Tenant
alias Realtime.Database
alias Realtime.Repo
@@ -41,19 +39,6 @@ default_db_host = "127.0.0.1"
})
|> Repo.insert!()
- publication = "supabase_realtime"
-
- [
- "drop publication if exists #{publication}",
- "drop table if exists public.test_tenant;",
- "create table public.test_tenant ( id SERIAL PRIMARY KEY, details text );",
- "grant all on table public.test_tenant to anon;",
- "grant all on table public.test_tenant to postgres;",
- "grant all on table public.test_tenant to authenticated;",
- "create publication #{publication} for table public.test_tenant"
- ]
- |> Enum.each(&query!(Repo, &1, []))
-
tenant
end)
@@ -61,10 +46,22 @@ default_db_host = "127.0.0.1"
settings = Database.from_tenant(tenant, "realtime_migrations", :stop)
settings = %{settings | max_restarts: 0, ssl: false}
{:ok, tenant_conn} = Database.connect_db(settings)
+publication = "supabase_realtime"
Postgrex.transaction(tenant_conn, fn db_conn ->
Postgrex.query!(db_conn, "DROP SCHEMA IF EXISTS realtime CASCADE", [])
Postgrex.query!(db_conn, "CREATE SCHEMA IF NOT EXISTS realtime", [])
+
+ [
+ "drop publication if exists #{publication}",
+ "drop table if exists public.test_tenant;",
+ "create table public.test_tenant ( id SERIAL PRIMARY KEY, details text );",
+ "grant all on table public.test_tenant to anon;",
+ "grant all on table public.test_tenant to supabase_admin;",
+ "grant all on table public.test_tenant to authenticated;",
+ "create publication #{publication} for table public.test_tenant"
+ ]
+ |> Enum.each(&Postgrex.query!(db_conn, &1))
end)
case Tenants.Migrations.run_migrations(tenant) do
diff --git a/priv/repo/migrations/20250926223044_set_default_presence_value.exs b/priv/repo/migrations/20250926223044_set_default_presence_value.exs
new file mode 100644
index 000000000..5f1833a34
--- /dev/null
+++ b/priv/repo/migrations/20250926223044_set_default_presence_value.exs
@@ -0,0 +1,10 @@
+defmodule Realtime.Repo.Migrations.SetDefaultPresenceValue do
+ use Ecto.Migration
+ @disable_ddl_transaction true
+ @disable_migration_lock true
+ def change do
+ alter table(:tenants) do
+ modify :max_presence_events_per_second, :integer, default: 1000
+ end
+ end
+end
diff --git a/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs b/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs
new file mode 100644
index 000000000..342a80ad9
--- /dev/null
+++ b/priv/repo/migrations/20251204170944_nullable_jwt_secrets.exs
@@ -0,0 +1,13 @@
+defmodule Realtime.Repo.Migrations.NullableJwtSecrets do
+ use Ecto.Migration
+
+ def change do
+ alter table(:tenants) do
+ modify :jwt_secret, :text, null: true
+ end
+
+ create constraint(:tenants, :jwt_secret_or_jwt_jwks_required,
+ check: "jwt_secret IS NOT NULL OR jwt_jwks IS NOT NULL"
+ )
+ end
+end
diff --git a/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs b/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs
new file mode 100644
index 000000000..008c9d7db
--- /dev/null
+++ b/priv/repo/migrations/20251218000543_ensure_jwt_secret_is_text.exs
@@ -0,0 +1,9 @@
+defmodule Realtime.Repo.Migrations.EnsureJwtSecretIsText do
+ use Ecto.Migration
+
+ def change do
+ alter table(:tenants) do
+ modify :jwt_secret, :text, null: true
+ end
+ end
+end
diff --git a/priv/repo/migrations/20260209232800_add_max_client_presence_events_per_second.exs b/priv/repo/migrations/20260209232800_add_max_client_presence_events_per_second.exs
new file mode 100644
index 000000000..403ad77c5
--- /dev/null
+++ b/priv/repo/migrations/20260209232800_add_max_client_presence_events_per_second.exs
@@ -0,0 +1,10 @@
+defmodule Realtime.Repo.Migrations.AddMaxClientPresenceEventsPerSecond do
+ use Ecto.Migration
+
+ def change do
+ alter table(:tenants) do
+ add :max_client_presence_events_per_window, :integer, null: true
+ add :client_presence_window_ms, :integer, null: true
+ end
+ end
+end
diff --git a/rel/vm.args.eex b/rel/vm.args.eex
index 278da5524..983e240c4 100644
--- a/rel/vm.args.eex
+++ b/rel/vm.args.eex
@@ -10,8 +10,8 @@
## Tweak GC to run more often
##-env ERL_FULLSWEEP_AFTER 10
-## Limit process heap for all procs to 1000 MB
-+hmax 1000000000
+## Limit process heap for all procs to 2500 MB. The number here is the number of words
++hmax <%= div(2_500_000_000, :erlang.system_info(:wordsize)) %>
## Set distribution buffer busy limit (default is 1024)
+zdbbl 100000
@@ -19,4 +19,4 @@
## Disable Busy Wait
+sbwt none
+sbwtdio none
-+sbwtdcpu none
\ No newline at end of file
++sbwtdcpu none
diff --git a/run.sh b/run.sh
index 2dddbc1b8..ae4d48e33 100755
--- a/run.sh
+++ b/run.sh
@@ -3,7 +3,7 @@ set -euo pipefail
set -x
ulimit -n
-if [ ! -z "$RLIMIT_NOFILE" ]; then
+if [ ! -z "${RLIMIT_NOFILE:-}" ]; then
echo "Setting RLIMIT_NOFILE to ${RLIMIT_NOFILE}"
ulimit -Sn "$RLIMIT_NOFILE"
fi
@@ -90,7 +90,7 @@ if [ "${ENABLE_ERL_CRASH_DUMP:-false}" = true ]; then
trap upload_crash_dump_to_s3 INT TERM KILL EXIT
fi
-if [[ -n "${GENERATE_CLUSTER_CERTS}" ]] ; then
+if [[ -n "${GENERATE_CLUSTER_CERTS:-}" ]] ; then
generate_certs
fi
diff --git a/test/e2e/tests.ts b/test/e2e/tests.ts
index 2711a959e..4193b06c2 100644
--- a/test/e2e/tests.ts
+++ b/test/e2e/tests.ts
@@ -1,8 +1,5 @@
import { load } from "https://deno.land/std@0.224.0/dotenv/mod.ts";
-import {
- createClient,
- SupabaseClient,
-} from "npm:@supabase/supabase-js@2.49.5-next.5";
+import { createClient, SupabaseClient } from "npm:@supabase/supabase-js@latest";
import { assertEquals } from "https://deno.land/std@0.224.0/assert/mod.ts";
import {
describe,
@@ -69,11 +66,7 @@ describe("broadcast extension", () => {
while (activeChannel.state == "joining") await sleep(0.2);
// Send from unsubscribed channel
- supabase.channel(topic, config).send({
- type: "broadcast",
- event,
- payload: expectedPayload,
- });
+ supabase.channel(topic, config).httpSend(event, expectedPayload);
while (result == null) await sleep(0.2);
diff --git a/test/extensions/postgres_cdc_rls/message_dispatcher_test.exs b/test/extensions/postgres_cdc_rls/message_dispatcher_test.exs
new file mode 100644
index 000000000..3761f41d5
--- /dev/null
+++ b/test/extensions/postgres_cdc_rls/message_dispatcher_test.exs
@@ -0,0 +1,110 @@
+defmodule Extensions.PostgresCdcRls.MessageDispatcherTest do
+ use ExUnit.Case, async: true
+
+ alias Extensions.PostgresCdcRls.MessageDispatcher
+ alias Phoenix.Socket.Broadcast
+
+ defmodule FakeSerializer do
+ def fastlane!(msg), do: {:encoded, msg}
+ end
+
+ describe "dispatch/3" do
+ test "dispatches to fastlane subscribers with matching sub_ids using new api" do
+ parent = self()
+
+ fastlane_pid =
+ spawn(fn ->
+ receive do
+ msg -> send(parent, {:received, msg})
+ end
+ end)
+
+ sub_ids = MapSet.new(["sub_1"])
+ ids = [{"sub_1", 1}]
+
+ subscriptions = [
+ {self(), {:subscriber_fastlane, fastlane_pid, FakeSerializer, ids, "realtime:topic", true}}
+ ]
+
+ payload = Jason.encode!(%{data: "test"})
+
+ assert :ok = MessageDispatcher.dispatch(subscriptions, self(), {"INSERT", payload, sub_ids})
+
+ assert_receive {:received, {:encoded, %Broadcast{topic: "realtime:topic", event: "postgres_changes"}}}
+ end
+
+ test "dispatches to fastlane subscribers with matching sub_ids using old api" do
+ parent = self()
+
+ fastlane_pid =
+ spawn(fn ->
+ receive do
+ msg -> send(parent, {:received, msg})
+ end
+ end)
+
+ sub_ids = MapSet.new(["sub_1"])
+ ids = [{"sub_1", 1}]
+
+ subscriptions = [
+ {self(), {:subscriber_fastlane, fastlane_pid, FakeSerializer, ids, "realtime:topic", false}}
+ ]
+
+ payload = Jason.encode!(%{data: "test"})
+
+ assert :ok = MessageDispatcher.dispatch(subscriptions, self(), {"INSERT", payload, sub_ids})
+
+ assert_receive {:received, {:encoded, %Broadcast{topic: "realtime:topic", event: "INSERT"}}}
+ end
+
+ test "does not dispatch when sub_ids do not match" do
+ parent = self()
+
+ fastlane_pid =
+ spawn(fn ->
+ receive do
+ msg -> send(parent, {:received, msg})
+ after
+ 1000 -> :ok
+ end
+ end)
+
+ sub_ids = MapSet.new(["sub_2"])
+ ids = [{"sub_1", 1}]
+
+ subscriptions = [
+ {self(), {:subscriber_fastlane, fastlane_pid, FakeSerializer, ids, "realtime:topic", true}}
+ ]
+
+ assert :ok = MessageDispatcher.dispatch(subscriptions, self(), {"INSERT", "payload", sub_ids})
+
+ refute_receive {:received, _}
+ end
+
+ test "caches encoded messages across multiple subscribers" do
+ parent = self()
+
+ pids =
+ for _ <- 1..2 do
+ spawn(fn ->
+ receive do
+ msg -> send(parent, {:received, msg})
+ end
+ end)
+ end
+
+ sub_ids = MapSet.new(["sub_1"])
+ ids = [{"sub_1", 1}]
+
+ subscriptions =
+ Enum.map(pids, fn pid ->
+ {self(), {:subscriber_fastlane, pid, FakeSerializer, ids, "realtime:topic", true}}
+ end)
+
+ assert :ok = MessageDispatcher.dispatch(subscriptions, self(), {"INSERT", "payload", sub_ids})
+
+ assert_receive {:received, {:encoded, %Broadcast{}}}
+ assert_receive {:received, {:encoded, %Broadcast{}}}
+ end
+ end
+end
diff --git a/test/integration/distributed_realtime_channel_test.exs b/test/integration/distributed_realtime_channel_test.exs
new file mode 100644
index 000000000..faed00a27
--- /dev/null
+++ b/test/integration/distributed_realtime_channel_test.exs
@@ -0,0 +1,48 @@
+defmodule Realtime.Integration.DistributedRealtimeChannelTest do
+ # Use of Clustered
+ use RealtimeWeb.ConnCase,
+ async: false,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
+ alias Phoenix.Socket.Message
+
+ alias Realtime.Tenants.Connect
+ alias Realtime.Integration.WebsocketClient
+
+ setup do
+ tenant = Containers.checkout_tenant_unboxed(run_migrations: true)
+
+ {:ok, node} = Clustered.start()
+ region = Realtime.Tenants.region(tenant)
+ {:ok, db_conn} = :erpc.call(node, Connect, :connect, [tenant.external_id, region])
+ assert Connect.ready?(tenant.external_id)
+
+ assert node(db_conn) == node
+ %{tenant: tenant, topic: random_string()}
+ end
+
+ describe "distributed broadcast" do
+ @tag mode: :distributed
+ test "it works", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {:ok, token} =
+ generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
+
+ {:ok, remote_socket} =
+ WebsocketClient.connect(self(), uri(tenant, serializer, 4012), serializer, [{"x-api-key", token}])
+
+ {:ok, socket} = WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
+
+ config = %{broadcast: %{self: false}, private: false}
+ topic = "realtime:#{topic}"
+
+ :ok = WebsocketClient.join(remote_socket, topic, %{config: config})
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+
+ # Send through one socket and receive through the other (self: false)
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ :ok = WebsocketClient.send_event(remote_socket, topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 2000
+ end
+ end
+end
diff --git a/test/integration/measure_traffic_test.exs b/test/integration/measure_traffic_test.exs
new file mode 100644
index 000000000..56ee190d5
--- /dev/null
+++ b/test/integration/measure_traffic_test.exs
@@ -0,0 +1,239 @@
+defmodule Realtime.Integration.MeasureTrafficTest do
+ use RealtimeWeb.ConnCase, async: false
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants.ReplicationConnection
+
+ setup [:checkout_tenant_and_connect]
+
+ def handle_telemetry(event, measurements, metadata, name) do
+ tenant = metadata[:tenant]
+ [key] = Enum.take(event, -1)
+ value = Map.get(measurements, :sum) || Map.get(measurements, :value) || Map.get(measurements, :size) || 0
+
+ Agent.update(name, fn state ->
+ state =
+ Map.put_new(
+ state,
+ tenant,
+ %{
+ joins: 0,
+ events: 0,
+ db_events: 0,
+ presence_events: 0,
+ output_bytes: 0,
+ input_bytes: 0
+ }
+ )
+
+ update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + value end)
+ end)
+ end
+
+ defp get_count(event, tenant) do
+ [key] = Enum.take(event, -1)
+
+ :"TestCounter_#{tenant}"
+ |> Agent.get(fn state -> get_in(state, [tenant, key]) || 0 end)
+ end
+
+ describe "measure traffic" do
+ setup %{tenant: tenant} do
+ events = [
+ [:realtime, :channel, :output_bytes],
+ [:realtime, :channel, :input_bytes]
+ ]
+
+ name = :"TestCounter_#{tenant.external_id}"
+
+ {:ok, _} =
+ start_supervised(%{
+ id: 1,
+ start: {Agent, :start_link, [fn -> %{} end, [name: name]]}
+ })
+
+ RateCounterHelper.stop(tenant.external_id)
+ on_exit(fn -> :telemetry.detach({__MODULE__, tenant.external_id}) end)
+ :telemetry.attach_many({__MODULE__, tenant.external_id}, events, &__MODULE__.handle_telemetry/4, name)
+
+ measure_traffic_interval_in_ms = Application.get_env(:realtime, :measure_traffic_interval_in_ms)
+ Application.put_env(:realtime, :measure_traffic_interval_in_ms, 10)
+ on_exit(fn -> Application.put_env(:realtime, :measure_traffic_interval_in_ms, measure_traffic_interval_in_ms) end)
+
+ :ok
+ end
+
+ test "measure traffic for broadcast events", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ for _ <- 1..5 do
+ WebsocketClient.send_event(socket, topic, "broadcast", %{
+ "event" => "TEST",
+ "payload" => %{"msg" => 1},
+ "type" => "broadcast"
+ })
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"},
+ topic: ^topic
+ },
+ 500
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0
+ assert input_bytes > 0
+ end
+
+ test "measure traffic for presence events", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, presence: %{enabled: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ for _ <- 1..5 do
+ WebsocketClient.send_event(socket, topic, "presence", %{
+ "event" => "TRACK",
+ "payload" => %{name: "realtime_presence_#{:rand.uniform(1000)}", t: 1814.7000000029802},
+ "type" => "presence"
+ })
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+
+ test "measure traffic for postgres changes events", %{tenant: tenant, db_conn: db_conn} do
+ Integrations.setup_postgres_changes(db_conn)
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ # Wait for postgres_changes subscription to be ready
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "status" => "ok"
+ },
+ topic: ^topic
+ },
+ 8000
+
+ for _ <- 1..5 do
+ Postgrex.query!(db_conn, "INSERT INTO test (details) VALUES ($1)", [random_string()])
+ end
+
+ for _ <- 1..5 do
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{"data" => %{"schema" => "public", "table" => "test", "type" => "INSERT"}},
+ topic: ^topic
+ },
+ 500
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+
+ test "measure traffic for db events", %{tenant: tenant, db_conn: db_conn} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, db: %{enabled: true}}
+ topic = "realtime:any"
+ channel_name = "any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Wait for join to complete
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ Enum.reduce_while(1..30, nil, fn _, _ ->
+ if ReplicationConnection.whereis(tenant.external_id),
+ do: {:halt, :ok},
+ else:
+ (
+ Process.sleep(500)
+ {:cont, nil}
+ )
+ end)
+
+ for _ <- 1..5 do
+ event = random_string()
+ value = random_string()
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, FALSE::bool);",
+ [value, event, channel_name]
+ )
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => ^event,
+ "payload" => %{"value" => ^value},
+ "type" => "broadcast"
+ },
+ topic: ^topic,
+ join_ref: nil,
+ ref: nil
+ },
+ 2000
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+ Process.sleep(100)
+
+ output_bytes = get_count([:realtime, :channel, :output_bytes], tenant.external_id)
+ input_bytes = get_count([:realtime, :channel, :input_bytes], tenant.external_id)
+
+ assert output_bytes > 0, "Expected output_bytes to be greater than 0, got #{output_bytes}"
+ assert input_bytes > 0, "Expected input_bytes to be greater than 0, got #{input_bytes}"
+ end
+ end
+end
diff --git a/test/integration/region_aware_migrations_test.exs b/test/integration/region_aware_migrations_test.exs
new file mode 100644
index 000000000..cc3f38051
--- /dev/null
+++ b/test/integration/region_aware_migrations_test.exs
@@ -0,0 +1,75 @@
+defmodule Realtime.Integration.RegionAwareMigrationsTest do
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Containers
+ alias Realtime.Tenants
+ alias Realtime.Tenants.Migrations
+
+ setup do
+ {:ok, port} = Containers.checkout()
+
+ settings = [
+ %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "db_port" => "#{port}",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "ap-southeast-2",
+ "publication" => "supabase_realtime_test",
+ "ssl_enforced" => false
+ }
+ }
+ ]
+
+ tenant = tenant_fixture(%{extensions: settings})
+ region = Application.get_env(:realtime, :region)
+
+ {:ok, node} =
+ Clustered.start(nil,
+ extra_config: [
+ {:realtime, :region, Tenants.region(tenant)},
+ {:realtime, :master_region, region}
+ ]
+ )
+
+ Process.sleep(100)
+
+ %{tenant: tenant, node: node}
+ end
+
+ test "run_migrations routes to node in tenant's region with expected arguments", %{tenant: tenant, node: node} do
+ assert tenant.migrations_ran == 0
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn
+ called_node, Realtime.Nodes, func, args, opts ->
+ call_original(Realtime.GenRpc, :call, [called_node, Realtime.Nodes, func, args, opts])
+
+ called_node, Migrations, func, args, opts ->
+ assert called_node == node
+ assert func == :start_migration
+ assert opts[:tenant_id] == tenant.external_id
+
+ arg = hd(args)
+ assert arg.tenant_external_id == tenant.external_id
+ assert arg.migrations_ran == tenant.migrations_ran
+ assert arg.settings == hd(tenant.extensions).settings
+
+ assert opts[:timeout] == 50_000
+
+ call_original(Realtime.GenRpc, :call, [node, Migrations, func, args, opts])
+ end)
+
+ assert :ok = Migrations.run_migrations(tenant)
+ Process.sleep(1000)
+ tenant = Realtime.Repo.reload!(tenant)
+ refute tenant.migrations_ran == 0
+ end
+end
diff --git a/test/integration/region_aware_routing_test.exs b/test/integration/region_aware_routing_test.exs
new file mode 100644
index 000000000..6e696891b
--- /dev/null
+++ b/test/integration/region_aware_routing_test.exs
@@ -0,0 +1,228 @@
+defmodule Realtime.Integration.RegionAwareRoutingTest do
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Realtime.Api
+ alias Realtime.Api.Tenant
+ alias Realtime.GenRpc
+ alias Realtime.Nodes
+
+ setup do
+ original_master_region = Application.get_env(:realtime, :master_region)
+
+ on_exit(fn ->
+ Application.put_env(:realtime, :master_region, original_master_region)
+ end)
+
+ Application.put_env(:realtime, :master_region, "eu-west-2")
+
+ {:ok, master_node} =
+ Clustered.start(nil,
+ extra_config: [
+ {:realtime, :region, "eu-west-2"},
+ {:realtime, :master_region, "eu-west-2"}
+ ]
+ )
+
+ Process.sleep(100)
+
+ %{master_node: master_node}
+ end
+
+ test "create_tenant automatically routes to master region", %{master_node: master_node} do
+ external_id = "test_routing_#{System.unique_integer([:positive])}"
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(Realtime.GenRpc, :call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == external_id
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ result = Api.create_tenant(attrs)
+
+ assert {:ok, %Tenant{} = tenant} = result
+ assert tenant.external_id == external_id
+
+ assert Realtime.Repo.get_by(Tenant, external_id: external_id)
+ end
+
+ test "update_tenant automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_update_#{System.unique_integer([:positive])}",
+ "name" => "original",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :update_tenant_by_external_id
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ new_name = "updated_via_routing"
+ result = Api.update_tenant_by_external_id(tenant.external_id, %{name: new_name})
+
+ assert {:ok, %Tenant{} = updated} = result
+ assert updated.name == new_name
+
+ reloaded = Realtime.Repo.get(Tenant, tenant.id)
+ assert reloaded.name == new_name
+ end
+
+ test "delete_tenant_by_external_id automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_delete_#{System.unique_integer([:positive])}",
+ "name" => "to_delete",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :delete_tenant_by_external_id
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ result = Api.delete_tenant_by_external_id(tenant.external_id)
+
+ assert result == true
+
+ refute Realtime.Repo.get(Tenant, tenant.id)
+ end
+
+ test "update_migrations_ran automatically routes to master region", %{master_node: master_node} do
+ # Create tenant on master node first
+ tenant_attrs = %{
+ "external_id" => "test_migrations_#{System.unique_integer([:positive])}",
+ "name" => "migrations_test",
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100,
+ "migrations_ran" => 0
+ }
+
+ Realtime.GenRpc
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :create_tenant
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+ |> Mimic.expect(:call, fn node, mod, func, args, opts ->
+ assert node == master_node
+ assert mod == Realtime.Api
+ assert func == :update_migrations_ran
+ assert opts[:tenant_id] == tenant_attrs["external_id"]
+
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end)
+
+ tenant = tenant_fixture(tenant_attrs)
+
+ new_migrations_ran = 5
+ result = Api.update_migrations_ran(tenant.external_id, new_migrations_ran)
+
+ assert {:ok, updated} = result
+ assert updated.migrations_ran == new_migrations_ran
+
+ reloaded = Realtime.Repo.get(Tenant, tenant.id)
+ assert reloaded.migrations_ran == new_migrations_ran
+ end
+
+ test "returns error when Nodes.node_from_region returns {:error, :not_available}" do
+ external_id = "test_error_node_unavailable_#{System.unique_integer([:positive])}"
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(Nodes, :node_from_region, fn _region, _key -> {:error, :not_available} end)
+ result = Api.create_tenant(attrs)
+ assert {:error, :not_available} = result
+ end
+
+ test "returns error when GenRpc.call returns {:error, :rpc_error, reason}" do
+ external_id = "test_error_rpc_error_#{System.unique_integer([:positive])}"
+ rpc_error_reason = :timeout
+
+ attrs = %{
+ "external_id" => external_id,
+ "name" => external_id,
+ "jwt_secret" => "secret",
+ "public_key" => "public",
+ "extensions" => [],
+ "postgres_cdc_default" => "postgres_cdc_rls",
+ "max_concurrent_users" => 200,
+ "max_events_per_second" => 100
+ }
+
+ Mimic.expect(GenRpc, :call, fn _node, _mod, _func, _args, _opts -> {:error, :rpc_error, rpc_error_reason} end)
+ result = Api.create_tenant(attrs)
+ assert {:error, ^rpc_error_reason} = result
+ end
+end
diff --git a/test/integration/rt_channel/authorization_test.exs b/test/integration/rt_channel/authorization_test.exs
new file mode 100644
index 000000000..42e154be7
--- /dev/null
+++ b/test/integration/rt_channel/authorization_test.exs
@@ -0,0 +1,163 @@
+defmodule Realtime.Integration.RtChannel.AuthorizationTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Integration.WebsocketClient
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "private only channels" do
+ setup [:rls_context]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "user with only private channels enabled will not be able to join public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ change_tenant_configuration(tenant, :private_only, true)
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "reason" => "PrivateOnly: This project only allows private channels"
+ },
+ "status" => "error"
+ }
+ },
+ 500
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "user with only private channels enabled will be able to join private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ change_tenant_configuration(tenant, :private_only, true)
+
+ Process.sleep(100)
+
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ end
+ end
+
+ describe "RLS policy enforcement" do
+ setup [:rls_context]
+
+ @tag policies: [:read_matching_user_role, :write_matching_user_role], role: "anon"
+ test "role policies are respected when accessing the channel", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "anon")
+ config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+ topic = random_string()
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+
+ {socket, _} = get_connection(tenant, serializer, role: "potato")
+ topic = random_string()
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+ refute_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+ end
+
+ @tag policies: [:authenticated_read_matching_user_sub, :authenticated_write_matching_user_sub],
+ sub: Ecto.UUID.generate()
+ test "sub policies are respected when accessing the channel", %{tenant: tenant, sub: sub, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: sub})
+ config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+ topic = random_string()
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: Ecto.UUID.generate()})
+ topic = random_string()
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+ refute_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+ end
+
+ @tag role: "authenticated", policies: [:broken_read_presence, :broken_write_presence]
+ test "handle failing rls policy", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = random_string()
+ realtime_topic = "realtime:#{topic}"
+
+ log =
+ capture_log(fn ->
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ msg = "Unauthorized: You do not have permissions to read from this Channel topic: #{topic}"
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "reason" => ^msg
+ },
+ "status" => "error"
+ }
+ },
+ 500
+
+ refute_receive %Message{event: "phx_reply"}
+ refute_receive %Message{event: "presence_state"}
+ end)
+
+ assert log =~ "RlsPolicyError"
+ end
+ end
+
+ describe "topic validation" do
+ test "handle empty topic by closing the socket", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "reason" => "TopicNameRequired: You must provide a topic name"
+ },
+ "status" => "error"
+ }
+ },
+ 500
+
+ refute_receive %Message{event: "phx_reply"}
+ refute_receive %Message{event: "presence_state"}
+ end
+ end
+end
diff --git a/test/integration/rt_channel/billable_events_test.exs b/test/integration/rt_channel/billable_events_test.exs
new file mode 100644
index 000000000..984696097
--- /dev/null
+++ b/test/integration/rt_channel/billable_events_test.exs
@@ -0,0 +1,282 @@
+defmodule Realtime.Integration.RtChannel.BillableEventsTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Postgrex
+ alias Realtime.Database
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_connect_and_setup_postgres_changes]
+
+ setup %{tenant: tenant} do
+ events = [
+ [:realtime, :rate_counter, :channel, :joins],
+ [:realtime, :rate_counter, :channel, :events],
+ [:realtime, :rate_counter, :channel, :db_events],
+ [:realtime, :rate_counter, :channel, :presence_events]
+ ]
+
+ name = :"TestCounter_#{tenant.external_id}"
+
+ {:ok, _} =
+ start_supervised(%{
+ id: 1,
+ start: {Agent, :start_link, [fn -> %{} end, [name: name]]}
+ })
+
+ RateCounterHelper.stop(tenant.external_id)
+ on_exit(fn -> :telemetry.detach({__MODULE__, tenant.external_id}) end)
+ :telemetry.attach_many({__MODULE__, tenant.external_id}, events, &__MODULE__.handle_telemetry/4, name)
+
+ :ok
+ end
+
+ def handle_telemetry(event, measurements, metadata, name) do
+ tenant = metadata[:tenant]
+ [key] = Enum.take(event, -1)
+ value = Map.get(measurements, :sum) || Map.get(measurements, :value) || Map.get(measurements, :size) || 0
+
+ Agent.update(name, fn state ->
+ state =
+ Map.put_new(
+ state,
+ tenant,
+ %{
+ joins: 0,
+ events: 0,
+ db_events: 0,
+ presence_events: 0,
+ output_bytes: 0,
+ input_bytes: 0
+ }
+ )
+
+ update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + value end)
+ end)
+ end
+
+ describe "join events" do
+ test "join events", %{tenant: tenant, serializer: serializer} do
+ external_id = tenant.external_id
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}
+ assert_receive %Message{topic: ^topic, event: "system"}, 5000
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ # Expected billed
+ # 1 joins due to two sockets
+ # 1 presence events due to two sockets
+ # 0 db events as no postgres changes used
+ # 0 events broadcast is not used
+ assert 1 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
+ assert 1 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
+ end
+ end
+
+ describe "broadcast events" do
+ test "broadcast events", %{tenant: tenant, serializer: serializer} do
+ external_id = tenant.external_id
+ {socket1, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket1, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}
+
+ # Add second client so we can test the "multiplication" of billable events
+ {socket2, _} = get_connection(tenant, serializer)
+ WebsocketClient.join(socket2, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}
+
+ # Broadcast event
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+
+ for _ <- 1..5 do
+ WebsocketClient.send_event(socket1, topic, "broadcast", payload)
+ # both sockets
+ assert_receive %Message{topic: ^topic, event: "broadcast", payload: ^payload}
+ assert_receive %Message{topic: ^topic, event: "broadcast", payload: ^payload}
+ end
+
+ refute_receive _any
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ # Expected billed
+ # 2 joins due to two sockets
+ # 2 presence events due to two sockets
+ # 0 db events as no postgres changes used
+ # 15 events as 5 events sent, 5 events received on client 1 and 5 events received on client 2
+ assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
+ assert 2 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
+ assert 15 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
+ end
+ end
+
+ describe "presence events" do
+ test "presence events", %{tenant: tenant, serializer: serializer} do
+ external_id = tenant.external_id
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}, presence: %{enabled: true}}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", topic: ^topic}, 1000
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_1", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
+
+ # Presence events
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_2", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ # Expected billed
+ # 2 joins due to two sockets
+ # 7 presence events
+ # 0 db events as no postgres changes used
+ # 0 events as no broadcast used
+ assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
+ assert 7 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
+ end
+ end
+
+ describe "postgres changes events" do
+ test "postgres changes events", %{tenant: tenant, serializer: serializer} do
+ external_id = tenant.external_id
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
+ assert_receive %Message{topic: ^topic, event: "system"}, 5000
+
+ # Add second user to test the "multiplication" of billable events
+ {socket, _} = get_connection(tenant, serializer)
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
+ assert_receive %Message{topic: ^topic, event: "system"}, 5000
+
+ tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+
+ # Postgres Change events
+ for _ <- 1..5, do: Postgrex.query!(conn, "insert into test (details) values ('test')", [])
+
+ for _ <- 1..10 do
+ assert_receive %Message{
+ topic: ^topic,
+ event: "postgres_changes",
+ payload: %{"data" => %{"schema" => "public", "table" => "test", "type" => "INSERT"}}
+ },
+ 5000
+ end
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ # Expected billed
+ # 2 joins due to two sockets
+ # 2 presence events due to two sockets
+ # 10 db events due to 5 inserts events sent to client 1 and 5 inserts events sent to client 2
+ # 0 events as no broadcast used
+ assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
+ assert 2 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ # (5 for each websocket)
+ assert 10 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
+ end
+
+ test "postgres changes error events", %{tenant: tenant, serializer: serializer} do
+ external_id = tenant.external_id
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "none"}]}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ # Join events
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
+ assert_receive %Message{topic: ^topic, event: "system"}, 5000
+
+ # Wait for RateCounter to run
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ # Expected billed
+ # 1 joins due to one socket
+ # 1 presence events due to one socket
+ # 0 db events
+ # 0 events as no broadcast used
+ assert 1 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
+ assert 1 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
+ assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
+ end
+ end
+
+ defp get_count(event, tenant) do
+ [key] = Enum.take(event, -1)
+ Agent.get(:"TestCounter_#{tenant}", fn state -> get_in(state, [tenant, key]) || 0 end)
+ end
+end
diff --git a/test/integration/rt_channel/broadcast_test.exs b/test/integration/rt_channel/broadcast_test.exs
new file mode 100644
index 000000000..d94c2b293
--- /dev/null
+++ b/test/integration/rt_channel/broadcast_test.exs
@@ -0,0 +1,483 @@
+defmodule Realtime.Integration.RtChannel.BroadcastTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Postgrex
+ alias Realtime.Database
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants.Connect
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "public broadcast" do
+ setup [:rls_context]
+
+ test "public broadcast", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: true}, private: false}
+ topic = "realtime:any"
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+ end
+
+ test "broadcast to another tenant does not get mixed up", %{tenant: tenant, serializer: serializer} do
+ other_tenant = Containers.checkout_tenant(run_migrations: true)
+
+ Realtime.Tenants.Cache.update_cache(other_tenant)
+
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{broadcast: %{self: false}, private: false}
+ topic = "realtime:any"
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ {other_socket, _} = get_connection(other_tenant, serializer)
+ WebsocketClient.join(other_socket, topic, %{config: config})
+
+ # Both sockets joined
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+ assert_receive %Message{event: "presence_state"}
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, topic, "broadcast", payload)
+
+ # No message received
+ refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+ end
+
+ @tag policies: []
+ test "lack of connection to database error does not impact public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ topic = "realtime:#{topic}"
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: false}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
+ WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: false}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ log =
+ capture_log(fn ->
+ :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+ end)
+
+ refute log =~ "UnableToHandleBroadcast"
+ end
+ end
+
+ describe "private broadcast" do
+ setup [:rls_context]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "private broadcast with valid channel with permissions sends message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
+ topic: "topic"
+ test "private broadcast with valid channel a colon character sends message and won't intercept in public channels",
+ %{topic: topic, tenant: tenant, serializer: serializer} do
+ {anon_socket, _} = get_connection(tenant, serializer, role: "anon")
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ valid_topic = "realtime:#{topic}"
+ malicious_topic = "realtime:private:#{topic}"
+
+ WebsocketClient.join(socket, valid_topic, %{config: %{broadcast: %{self: true}, private: true}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^valid_topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ WebsocketClient.join(anon_socket, malicious_topic, %{config: %{broadcast: %{self: true}, private: false}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^malicious_topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, valid_topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^valid_topic}, 500
+ refute_receive %Message{event: "broadcast"}
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence]
+ test "private broadcast with valid channel no write permissions won't send message but will receive message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
+ WebsocketClient.join(service_role_socket, topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+
+ WebsocketClient.send_event(socket, topic, "broadcast", payload)
+ refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+
+ WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
+ end
+
+ @tag policies: []
+ test "private broadcast with valid channel and no read permissions won't join", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ config = %{private: true}
+ expected = "Unauthorized: You do not have permissions to read from this Channel topic: #{topic}"
+
+ topic = "realtime:#{topic}"
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+
+ log =
+ capture_log(fn ->
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{
+ topic: ^topic,
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "reason" => ^expected
+ },
+ "status" => "error"
+ }
+ },
+ 300
+
+ refute_receive %Message{event: "phx_reply", topic: ^topic}, 300
+ refute_receive %Message{event: "presence_state"}, 300
+ end)
+
+ assert log =~ expected
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence]
+ test "handles lack of connection to database error on private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ topic = "realtime:#{topic}"
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: true}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ {service_role_socket, _} = get_connection(tenant, serializer, role: "service_role")
+ WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: true}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ log =
+ capture_log(fn ->
+ :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
+ # Waiting more than 15 seconds as this is the amount of time we will wait for the Connection to be ready
+ refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 16000
+ end)
+
+ assert log =~ "UnableToHandleBroadcast"
+ end
+ end
+
+ describe "trigger-based broadcast changes" do
+ setup [:rls_context, :setup_trigger]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "broadcast insert event changes on insert in table with trigger", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn,
+ table_name: table_name,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ value = random_string()
+ Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
+
+ record = %{"details" => value, "id" => 1}
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => "INSERT",
+ "payload" => %{
+ "old_record" => nil,
+ "operation" => "INSERT",
+ "record" => ^record,
+ "schema" => "public",
+ "table" => ^table_name
+ },
+ "type" => "broadcast"
+ },
+ topic: ^topic
+ },
+ 1000
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
+ requires_data: true
+ test "broadcast update event changes on update in table with trigger", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn,
+ table_name: table_name,
+ serializer: serializer
+ } do
+ value = random_string()
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ new_value = random_string()
+
+ Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
+ Postgrex.query!(db_conn, "UPDATE #{table_name} SET details = $1 WHERE details = $2", [new_value, value])
+
+ old_record = %{"details" => value, "id" => 1}
+ record = %{"details" => new_value, "id" => 1}
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => "UPDATE",
+ "payload" => %{
+ "old_record" => ^old_record,
+ "operation" => "UPDATE",
+ "record" => ^record,
+ "schema" => "public",
+ "table" => ^table_name
+ },
+ "type" => "broadcast"
+ },
+ topic: ^topic
+ },
+ 1000
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "broadcast delete event changes on delete in table with trigger", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn,
+ table_name: table_name,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ value = random_string()
+
+ Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
+ Postgrex.query!(db_conn, "DELETE FROM #{table_name} WHERE details = $1", [value])
+
+ record = %{"details" => value, "id" => 1}
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => "DELETE",
+ "payload" => %{
+ "old_record" => ^record,
+ "operation" => "DELETE",
+ "record" => nil,
+ "schema" => "public",
+ "table" => ^table_name
+ },
+ "type" => "broadcast"
+ },
+ topic: ^topic
+ },
+ 1000
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "broadcast event when function 'send' is called with private topic", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: true}
+ full_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, full_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ value = random_string()
+ event = random_string()
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, TRUE::bool);",
+ [value, event, topic]
+ )
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => ^event,
+ "payload" => %{"value" => ^value},
+ "type" => "broadcast"
+ },
+ topic: ^full_topic,
+ join_ref: nil,
+ ref: nil
+ },
+ 1000
+ end
+
+ test "broadcast event when function 'send' is called with public topic", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ full_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, full_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ value = random_string()
+ event = random_string()
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, FALSE::bool);",
+ [value, event, topic]
+ )
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => ^event,
+ "payload" => %{"value" => ^value},
+ "type" => "broadcast"
+ },
+ topic: ^full_topic
+ },
+ 1000
+ end
+ end
+
+ defp setup_trigger(%{tenant: tenant, topic: topic}) do
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+ random_name = String.downcase("test_#{random_string()}")
+ query = "CREATE TABLE #{random_name} (id serial primary key, details text)"
+ Postgrex.query!(db_conn, query, [])
+
+ query = """
+ CREATE OR REPLACE FUNCTION broadcast_changes_for_table_#{random_name}_trigger ()
+ RETURNS TRIGGER
+ AS $$
+ DECLARE
+ topic text;
+ BEGIN
+ topic = '#{topic}';
+ PERFORM
+ realtime.broadcast_changes (topic, TG_OP, TG_OP, TG_TABLE_NAME, TG_TABLE_SCHEMA, NEW, OLD, TG_LEVEL);
+ RETURN NULL;
+ END;
+ $$
+ LANGUAGE plpgsql;
+ """
+
+ Postgrex.query!(db_conn, query, [])
+
+ query = """
+ CREATE TRIGGER broadcast_changes_for_#{random_name}_table
+ AFTER INSERT OR UPDATE OR DELETE ON #{random_name}
+ FOR EACH ROW
+ EXECUTE FUNCTION broadcast_changes_for_table_#{random_name}_trigger ();
+ """
+
+ Postgrex.query!(db_conn, query, [])
+
+ on_exit(fn ->
+ {:ok, cleanup_conn} = Database.connect(tenant, "realtime_test", :stop)
+ Postgrex.query!(cleanup_conn, "DROP TABLE #{random_name} CASCADE", [])
+ GenServer.stop(cleanup_conn)
+ end)
+
+ %{table_name: random_name, db_conn: db_conn}
+ end
+end
diff --git a/test/integration/rt_channel/connection_lifecycle_test.exs b/test/integration/rt_channel/connection_lifecycle_test.exs
new file mode 100644
index 000000000..40f6636d7
--- /dev/null
+++ b/test/integration/rt_channel/connection_lifecycle_test.exs
@@ -0,0 +1,330 @@
+defmodule Realtime.Integration.RtChannel.ConnectionLifecycleTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants
+ alias RealtimeWeb.SocketDisconnect
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "socket disconnect - tenant suspension" do
+ setup [:rls_context]
+
+ test "tenant already suspended", %{tenant: tenant, serializer: serializer} do
+ log =
+ capture_log(fn ->
+ change_tenant_configuration(tenant, :suspend, true)
+ {:error, %Mint.WebSocket.UpgradeFailureError{}} = get_connection(tenant, serializer, role: "anon")
+ refute_receive _any
+ end)
+
+ assert log =~ "RealtimeDisabledForTenant"
+ end
+ end
+
+ describe "socket disconnect - configuration changes" do
+ setup [:rls_context]
+
+ test "on jwks the socket closes and sends a system message", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{jwt_jwks: %{keys: ["potato"]}})
+ assert_process_down(socket)
+ end
+
+ test "on jwt_secret the socket closes and sends a system message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
+ assert_process_down(socket)
+ end
+
+ test "on private_only the socket closes and sends a system message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{private_only: true})
+ assert_process_down(socket)
+ end
+
+ test "on other param changes the socket won't close and no message is sent", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{max_concurrent_users: 100})
+
+ refute_receive %Message{
+ topic: ^realtime_topic,
+ event: "system",
+ payload: %{
+ "extension" => "system",
+ "message" => "Server requested disconnect",
+ "status" => "ok"
+ }
+ },
+ 500
+
+ Process.sleep(500)
+ assert :ok = WebsocketClient.send_heartbeat(socket)
+ end
+ end
+
+ describe "socket disconnect - token expiry" do
+ setup [:rls_context]
+
+ test "invalid JWT with expired token", %{tenant: tenant, serializer: serializer} do
+ log =
+ capture_log(fn ->
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{:exp => System.system_time(:second) - 1000},
+ params: %{log_level: :info}
+ )
+ end)
+
+ assert log =~ "InvalidJWTToken: Token has expired"
+ end
+ end
+
+ describe "socket disconnect - distributed disconnect" do
+ setup [:rls_context]
+
+ test "check registry of SocketDisconnect and on distribution called, kill socket", %{
+ tenant: tenant,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 500
+ assert_receive %Message{event: "presence_state", topic: ^topic}, 500
+ end
+
+ assert :ok = WebsocketClient.send_heartbeat(socket)
+
+ SocketDisconnect.distributed_disconnect(tenant.external_id)
+
+ assert_process_down(socket)
+ end
+ end
+
+ describe "rate limits - concurrent users" do
+ setup [:rls_context]
+
+ test "max_concurrent_users limit respected", %{tenant: tenant, serializer: serializer} do
+ Tenants.get_tenant_by_external_id(tenant.external_id)
+ change_tenant_configuration(tenant, :max_concurrent_users, 1)
+
+ {socket1, _} = get_connection(tenant, serializer, role: "authenticated")
+ {socket2, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ topic1 = "realtime:#{random_string()}"
+ topic2 = "realtime:#{random_string()}"
+ WebsocketClient.join(socket1, topic1, %{config: config})
+ WebsocketClient.join(socket1, topic2, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ topic: ^topic1,
+ payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"}
+ },
+ 500
+
+ assert_receive %Message{
+ event: "phx_reply",
+ topic: ^topic2,
+ payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"}
+ },
+ 500
+
+ topic3 = "realtime:#{random_string()}"
+ WebsocketClient.join(socket2, topic3, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ topic: ^topic3,
+ payload: %{
+ "response" => %{
+ "reason" => "ConnectionRateLimitReached: Too many connected users"
+ },
+ "status" => "error"
+ }
+ },
+ 500
+
+ Realtime.Tenants.Cache.update_cache(%{tenant | max_concurrent_users: 2})
+
+ WebsocketClient.join(socket2, topic3, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ topic: ^topic3,
+ payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"}
+ },
+ 500
+ end
+ end
+
+ describe "rate limits - events per second" do
+ setup [:rls_context]
+
+ test "max_events_per_second limit respected", %{tenant: tenant, serializer: serializer} do
+ RateCounterHelper.stop(tenant.external_id)
+
+ log =
+ capture_log(fn ->
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true, ack: false}, private: false, presence: %{enabled: false}}
+ realtime_topic = "realtime:#{random_string()}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+
+ for _ <- 1..1000, Process.alive?(socket) do
+ WebsocketClient.send_event(socket, realtime_topic, "broadcast", %{})
+ assert_receive %Message{event: "broadcast", topic: ^realtime_topic}, 500
+ end
+
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ WebsocketClient.send_event(socket, realtime_topic, "broadcast", %{})
+
+ assert_receive %Message{event: "phx_close"}, 1000
+ end)
+
+ assert log =~ "MessagePerSecondRateLimitReached"
+ end
+ end
+
+ describe "rate limits - channels per client" do
+ setup [:rls_context]
+
+ test "max_channels_per_client limit respected", %{tenant: tenant, serializer: serializer} do
+ change_tenant_configuration(tenant, :max_channels_per_client, 1)
+
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic_1 = "realtime:#{random_string()}"
+ realtime_topic_2 = "realtime:#{random_string()}"
+
+ WebsocketClient.join(socket, realtime_topic_1, %{config: config})
+ WebsocketClient.join(socket, realtime_topic_2, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"},
+ topic: ^realtime_topic_1
+ },
+ 500
+
+ assert_receive %Message{event: "presence_state", topic: ^realtime_topic_1}, 500
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "status" => "error",
+ "response" => %{
+ "reason" => "ChannelRateLimitReached: Too many channels"
+ }
+ },
+ topic: ^realtime_topic_2
+ },
+ 500
+
+ refute_receive %Message{event: "phx_reply", topic: ^realtime_topic_2}, 500
+ refute_receive %Message{event: "presence_state", topic: ^realtime_topic_2}, 500
+
+ Realtime.Tenants.Cache.update_cache(%{tenant | max_channels_per_client: 2})
+
+ WebsocketClient.join(socket, realtime_topic_2, %{config: config})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"},
+ topic: ^realtime_topic_2
+ },
+ 500
+ end
+ end
+
+ describe "rate limits - joins per second" do
+ setup [:rls_context]
+
+ test "max_joins_per_second limit respected", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{random_string()}"
+
+ log =
+ capture_log(fn ->
+ for _ <- 1..300 do
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+ end
+
+ RateCounterHelper.tick_tenant_rate_counters!(tenant.external_id)
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config})
+ assert_process_down(socket)
+ end)
+
+ assert log =~
+ "project=#{tenant.external_id} external_id=#{tenant.external_id} [critical] ClientJoinRateLimitReached: Too many joins per second"
+
+ assert length(String.split(log, "ClientJoinRateLimitReached")) <= 3
+ end
+ end
+end
diff --git a/test/integration/rt_channel/postgres_changes_test.exs b/test/integration/rt_channel/postgres_changes_test.exs
new file mode 100644
index 000000000..85673260c
--- /dev/null
+++ b/test/integration/rt_channel/postgres_changes_test.exs
@@ -0,0 +1,461 @@
+defmodule Realtime.Integration.RtChannel.PostgresChangesTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Extensions.PostgresCdcRls
+ alias Phoenix.Socket.Message
+ alias Postgrex
+ alias Realtime.Database
+ alias Realtime.Integration.WebsocketClient
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_connect_and_setup_postgres_changes]
+
+ describe "insert" do
+ test "handle insert", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ sub_id = :erlang.phash2(%{"event" => "INSERT", "schema" => "public"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "postgres_changes" => [
+ %{"event" => "INSERT", "id" => ^sub_id, "schema" => "public"}
+ ]
+ },
+ "status" => "ok"
+ },
+ topic: ^topic
+ },
+ 200
+
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+
+ {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "record" => %{"details" => "test", "id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "INSERT"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+ end
+ end
+
+ describe "bytea column" do
+ test "handle insert with bytea data without double-encoding", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ sub_id = :erlang.phash2(%{"event" => "INSERT", "schema" => "public"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{"status" => "ok"},
+ topic: ^topic
+ },
+ 200
+
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+
+ {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
+
+ binary_value = <<1, 2, 3, 4, 5>>
+
+ %{rows: [[_id]]} =
+ Postgrex.query!(conn, "insert into test (details, binary_data) values ('test', $1) returning id", [binary_value])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "record" => record,
+ "type" => "INSERT"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+
+ # The bytea value should be the hex string as provided by wal2json
+ assert record["binary_data"] == "0102030405"
+ end
+ end
+
+ describe "update" do
+ test "handle update", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "UPDATE", schema: "public"}]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ sub_id = :erlang.phash2(%{"event" => "UPDATE", "schema" => "public"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "postgres_changes" => [
+ %{"event" => "UPDATE", "id" => ^sub_id, "schema" => "public"}
+ ]
+ },
+ "status" => "ok"
+ },
+ ref: "1",
+ topic: ^topic
+ },
+ 200
+
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+
+ {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ Postgrex.query!(conn, "update test set details = 'test' where id = #{id}", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "old_record" => %{"id" => ^id},
+ "record" => %{"details" => "test", "id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "UPDATE"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+ end
+ end
+
+ describe "delete" do
+ test "handle delete", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "DELETE", schema: "public"}]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ sub_id = :erlang.phash2(%{"event" => "DELETE", "schema" => "public"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "postgres_changes" => [
+ %{"event" => "DELETE", "id" => ^sub_id, "schema" => "public"}
+ ]
+ },
+ "status" => "ok"
+ },
+ ref: "1",
+ topic: ^topic
+ },
+ 200
+
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+
+ {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+ Postgrex.query!(conn, "delete from test where id = #{id}", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "old_record" => %{"id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "DELETE"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+ end
+ end
+
+ describe "wildcard" do
+ test "handle wildcard", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "*", schema: "public"}]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ sub_id = :erlang.phash2(%{"event" => "*", "schema" => "public"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "response" => %{
+ "postgres_changes" => [
+ %{"event" => "*", "id" => ^sub_id, "schema" => "public"}
+ ]
+ },
+ "status" => "ok"
+ },
+ ref: "1",
+ topic: ^topic
+ },
+ 200
+
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+
+ {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "record" => %{"id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "INSERT"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+
+ Postgrex.query!(conn, "update test set details = 'test' where id = #{id}", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "old_record" => %{"id" => ^id},
+ "record" => %{"details" => "test", "id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "UPDATE"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+
+ Postgrex.query!(conn, "delete from test where id = #{id}", [])
+
+ assert_receive %Message{
+ event: "postgres_changes",
+ payload: %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _ts,
+ "errors" => nil,
+ "old_record" => %{"id" => ^id},
+ "schema" => "public",
+ "table" => "test",
+ "type" => "DELETE"
+ },
+ "ids" => [^sub_id]
+ },
+ ref: nil,
+ topic: "realtime:any"
+ },
+ 500
+ end
+ end
+
+ describe "error handling" do
+ test "error subscribing", %{tenant: tenant, serializer: serializer} do
+ {:ok, conn} = Database.connect(tenant, "realtime_test")
+
+ {:ok, _} =
+ Database.transaction(conn, fn db_conn ->
+ Postgrex.query!(db_conn, "drop publication if exists supabase_realtime_test")
+ end)
+
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
+
+ log =
+ capture_log(fn ->
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" =>
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: INSERT, schema: public, table: *, filters: []]",
+ "status" => "error"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 8000
+ end)
+
+ assert log =~ "RealtimeDisabledForConfiguration"
+ assert log =~ "Unable to subscribe to changes with given parameters"
+ end
+
+ test "handle nil postgres changes params as empty param changes", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ topic = "realtime:any"
+ config = %{postgres_changes: [nil]}
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 200
+ assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ refute_receive %Message{
+ event: "system",
+ payload: %{
+ "channel" => "any",
+ "extension" => "postgres_changes",
+ "message" => "Subscribed to PostgreSQL",
+ "status" => "ok"
+ },
+ ref: nil,
+ topic: ^topic
+ },
+ 1000
+ end
+ end
+end
diff --git a/test/integration/rt_channel/presence_test.exs b/test/integration/rt_channel/presence_test.exs
new file mode 100644
index 000000000..d4c125a10
--- /dev/null
+++ b/test/integration/rt_channel/presence_test.exs
@@ -0,0 +1,316 @@
+defmodule Realtime.Integration.RtChannel.PresenceTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants.Connect
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "public presence" do
+ setup [:rls_context]
+
+ test "public presence", %{tenant: tenant, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{presence: %{key: "", enabled: true}, private: false}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}
+
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+
+ test "presence enabled if param enabled is set in configuration for public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ end
+
+ test "presence disabled if param 'enabled' is set to false in configuration for public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: false}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ refute_receive %Message{event: "presence_state"}, 500
+ end
+
+ test "presence automatically enabled when user sends track message for public channel", %{
+ tenant: tenant,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer)
+ config = %{presence: %{key: "", enabled: false}, private: false}
+ topic = "realtime:any"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ refute_receive %Message{event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}
+
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+ end
+
+ describe "private presence" do
+ setup [:rls_context]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "private presence with read and write permissions will be able to track and receive presence changes",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{presence: %{key: "", enabled: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+ refute_receive %Message{event: "phx_leave", topic: ^topic}
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
+ mode: :distributed
+ test "private presence with read and write permissions will be able to track and receive presence changes using a remote node",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{presence: %{key: "", enabled: true}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+ refute_receive %Message{event: "phx_leave", topic: ^topic}
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence]
+ test "private presence with read permissions will be able to receive presence changes but won't be able to track",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ {secondary_socket, _} = get_connection(tenant, serializer, role: "service_role")
+ config = fn key -> %{presence: %{key: key, enabled: true}, private: true} end
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config.("authenticated")})
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ # This will be ignored
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state", payload: %{}, ref: nil, topic: ^topic}
+ refute_receive %Message{event: "presence_diff", payload: _, ref: _, topic: ^topic}
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_97", t: 1814.7000000029802}
+ }
+
+ # This will be tracked
+ WebsocketClient.join(secondary_socket, topic, %{config: config.("service_role")})
+ WebsocketClient.send_event(secondary_socket, topic, "presence", payload)
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{topic: ^topic, event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}}
+ assert_receive %Message{event: "presence_state", payload: %{}, ref: nil, topic: ^topic}
+
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+
+ assert_receive %Message{topic: ^topic, event: "presence_diff"} = res
+
+ assert join_payload =
+ res
+ |> Map.from_struct()
+ |> get_in([:payload, "joins", "service_role", "metas"])
+ |> hd()
+
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "presence enabled if param enabled is set in configuration for private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "presence disabled if param 'enabled' is set to false in configuration for private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: false}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ refute_receive %Message{event: "presence_state"}, 500
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "presence automatically enabled when user sends track message for private channel",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{presence: %{key: "", enabled: false}, private: true}
+ topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ refute_receive %Message{event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
+ }
+
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
+ join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
+ assert get_in(join_payload, ["name"]) == payload.payload.name
+ assert get_in(join_payload, ["t"]) == payload.payload.t
+ end
+ end
+
+ describe "database connection errors" do
+ setup [:rls_context]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "handles lack of connection to database error on private channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ topic = "realtime:#{topic}"
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ log =
+ capture_log(fn ->
+ :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
+ payload = %{type: "presence", event: "TRACK", payload: %{name: "realtime_presence_96", t: 1814.7000000029802}}
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ refute_receive %Message{event: "presence_diff"}, 500
+ # Waiting more than 5 seconds as this is the amount of time we will wait for the Connection to be ready
+ refute_receive %Message{event: "phx_leave", topic: ^topic}, 16000
+ end)
+
+ assert log =~ ~r/external_id=#{tenant.external_id}.*UnableToHandlePresence/
+ end
+
+ @tag policies: []
+ test "lack of connection to database error does not impact public channels", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ topic = "realtime:#{topic}"
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
+ assert_receive %Message{event: "presence_state"}
+
+ log =
+ capture_log(fn ->
+ :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
+ payload = %{type: "presence", event: "TRACK", payload: %{name: "realtime_presence_96", t: 1814.7000000029802}}
+ WebsocketClient.send_event(socket, topic, "presence", payload)
+
+ assert_receive %Message{event: "presence_diff"}, 500
+ refute_receive %Message{event: "phx_leave", topic: ^topic}
+ end)
+
+ refute log =~ ~r/external_id=#{tenant.external_id}.*UnableToHandlePresence/
+ end
+ end
+end
diff --git a/test/integration/rt_channel/token_handling_test.exs b/test/integration/rt_channel/token_handling_test.exs
new file mode 100644
index 000000000..96c05999e
--- /dev/null
+++ b/test/integration/rt_channel/token_handling_test.exs
@@ -0,0 +1,474 @@
+defmodule Realtime.Integration.RtChannel.TokenHandlingTest do
+ use RealtimeWeb.ConnCase,
+ async: true,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
+ import ExUnit.CaptureLog
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Realtime.Database
+ alias Realtime.Integration.WebsocketClient
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "token validation" do
+ setup [:rls_context]
+
+ @tag policies: [
+ :authenticated_read_broadcast_and_presence,
+ :authenticated_write_broadcast_and_presence
+ ]
+ test "badly formatted jwt token", %{tenant: tenant, serializer: serializer} do
+ log =
+ capture_log(fn ->
+ WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", "bad_token"}])
+ end)
+
+ assert log =~ "MalformedJWT: The token provided is not a valid JWT"
+ end
+
+ test "invalid JWT with expired token", %{tenant: tenant, serializer: serializer} do
+ log =
+ capture_log(fn ->
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{:exp => System.system_time(:second) - 1000},
+ params: %{log_level: :info}
+ )
+ end)
+
+ assert log =~ "InvalidJWTToken: Token has expired"
+ end
+
+ test "token required the role key", %{tenant: tenant, serializer: serializer} do
+ {:ok, token} = token_no_role(tenant)
+
+ assert {:error, %{status_code: 403}} =
+ WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
+ end
+
+ test "handles connection with valid api-header but ignorable access_token payload", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ realtime_topic = "realtime:#{topic}"
+
+ log =
+ capture_log(fn ->
+ {:ok, token} =
+ generate_token(tenant, %{
+ exp: System.system_time(:second) + 1000,
+ role: "authenticated",
+ sub: random_string()
+ })
+
+ {:ok, socket} = WebsocketClient.connect(self(), uri(tenant, serializer), serializer, [{"x-api-key", token}])
+
+ WebsocketClient.join(socket, realtime_topic, %{
+ config: %{broadcast: %{self: true}, private: false},
+ access_token: "sb_#{random_string()}"
+ })
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ end)
+
+ refute log =~ "MalformedJWT: The token provided is not a valid JWT"
+ end
+
+ test "missing claims close connection", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) + 2000})
+
+ # Update token to be a near expiring token
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{
+ "extension" => "system",
+ "message" => "Fields `role` and `exp` are required in JWT",
+ "status" => "error"
+ }
+ },
+ 500
+
+ assert_receive %Message{event: "phx_close"}
+ end
+ end
+
+ describe "access token refresh" do
+ setup [:rls_context]
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "on new access_token and channel is private policies are reevaluated for read policy",
+ %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{
+ config: %{broadcast: %{self: true}, private: true},
+ access_token: access_token
+ })
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ {:ok, new_token} = token_valid(tenant, "anon")
+
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
+
+ error_message = "You do not have permissions to read from this Channel topic: #{topic}"
+
+ assert_receive %Message{
+ event: "system",
+ payload: %{"channel" => ^topic, "extension" => "system", "message" => ^error_message, "status" => "error"},
+ topic: ^realtime_topic
+ }
+
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
+ end
+
+ @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
+ test "on new access_token and channel is private policies are reevaluated for write policy", %{
+ topic: topic,
+ tenant: tenant,
+ serializer: serializer
+ } do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ realtime_topic = "realtime:#{topic}"
+ config = %{broadcast: %{self: true}, private: true}
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ # Checks first send which will set write policy to true
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, realtime_topic, "broadcast", payload)
+
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^realtime_topic}, 500
+
+ # RLS policies changed to only allow read
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test")
+ clean_table(db_conn, "realtime", "messages")
+ create_rls_policies(db_conn, [:authenticated_read_broadcast_and_presence], %{topic: topic})
+
+ # Set new token to recheck policies
+ {:ok, new_token} =
+ generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
+
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
+
+ # Send message to be ignored
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, realtime_topic, "broadcast", payload)
+
+ refute_receive %Message{
+ event: "broadcast",
+ payload: ^payload,
+ topic: ^realtime_topic
+ },
+ 1500
+ end
+
+ test "on new access_token and channel is public policies are not reevaluated", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ {:ok, new_token} = token_valid(tenant, "anon")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
+
+ refute_receive %Message{}
+ end
+
+ test "on empty string access_token the socket sends an error message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => ""})
+
+ assert_receive %Message{
+ topic: ^realtime_topic,
+ event: "system",
+ payload: %{
+ "extension" => "system",
+ "message" => msg,
+ "status" => "error"
+ }
+ }
+
+ assert_receive %Message{event: "phx_close"}
+ assert msg =~ "The token provided is not a valid JWT"
+ end
+
+ test "on expired access_token the socket sends an error message", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ sub = random_string()
+
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated", claims: %{sub: sub})
+
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) - 1000, sub: sub})
+
+ log =
+ capture_log(fn ->
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
+
+ assert_receive %Message{
+ topic: ^realtime_topic,
+ event: "system",
+ payload: %{"extension" => "system", "message" => "Token has expired " <> _, "status" => "error"}
+ }
+
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
+ end)
+
+ assert log =~ "ChannelShutdown: Token has expired"
+ end
+
+ test "ChannelShutdown include sub if available in jwt claims", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ exp = System.system_time(:second) + 10_000
+
+ {socket, access_token} =
+ get_connection(tenant, serializer, role: "authenticated", claims: %{exp: exp}, params: %{log_level: :warning})
+
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+ sub = random_string()
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
+ assert_receive %Message{event: "presence_state", topic: ^realtime_topic}, 500
+
+ {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) - 1000, sub: sub})
+
+ log =
+ capture_log([level: :warning], fn ->
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
+
+ assert_receive %Message{event: "system"}, 1000
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
+ end)
+
+ assert log =~ "ChannelShutdown"
+ assert log =~ "sub=#{sub}"
+ end
+
+ test "on sb prefixed access_token the socket ignores the message and respects JWT expiry time", %{
+ tenant: tenant,
+ topic: topic,
+ serializer: serializer
+ } do
+ sub = random_string()
+
+ {socket, access_token} =
+ get_connection(tenant, serializer,
+ role: "authenticated",
+ claims: %{sub: sub, exp: System.system_time(:second) + 5}
+ )
+
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{
+ "access_token" => "sb_publishable_-fake_key"
+ })
+
+ # Check if the new token does not trigger a shutdown
+ refute_receive %Message{event: "system", topic: ^realtime_topic}, 100
+
+ # Await to check if channel respects token expiry time
+ assert_receive %Message{
+ event: "system",
+ payload: %{"extension" => "system", "message" => msg, "status" => "error"},
+ topic: ^realtime_topic
+ },
+ 5000
+
+ assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
+ assert msg =~ "Token has expired"
+ end
+ end
+
+ describe "token expiry" do
+ setup [:rls_context]
+
+ test "checks token periodically", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ {:ok, token} =
+ generate_token(tenant, %{:exp => System.system_time(:second) + 2, role: "authenticated"})
+
+ # Update token to be a near expiring token
+ WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
+
+ # Awaits to see if connection closes automatically
+ assert_receive %Message{
+ event: "system",
+ payload: %{"extension" => "system", "message" => msg, "status" => "error"}
+ },
+ 3000
+
+ assert_receive %Message{event: "phx_close"}
+
+ assert msg =~ "Token has expired"
+ end
+
+ test "token expires in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ {:ok, access_token} =
+ generate_token(tenant, %{:exp => System.system_time(:second) + 1, role: "authenticated"})
+
+ # token expires in between joins so it needs to be handled by the channel and not the socket
+ Process.sleep(1000)
+ realtime_topic = "realtime:#{topic}"
+
+ log =
+ capture_log(fn ->
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "status" => "error",
+ "response" => %{"reason" => reason}
+ },
+ topic: ^realtime_topic
+ },
+ 500
+
+ assert reason =~ "InvalidJWTToken: Token has expired"
+ end)
+
+ assert_receive %Message{event: "phx_close"}
+ assert log =~ "#{tenant.external_id}"
+ end
+
+ test "token loses claims in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ {:ok, access_token} = generate_token(tenant, %{:exp => System.system_time(:second) + 10})
+
+ # token breaks claims in between joins so it needs to be handled by the channel and not the socket
+ realtime_topic = "realtime:#{topic}"
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "status" => "error",
+ "response" => %{
+ "reason" => "InvalidJWTToken: Fields `role` and `exp` are required in JWT"
+ }
+ },
+ topic: ^realtime_topic
+ },
+ 500
+
+ assert_receive %Message{event: "phx_close"}
+ end
+
+ test "token is badly formatted in between joins", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, access_token} = get_connection(tenant, serializer, role: "authenticated")
+ config = %{broadcast: %{self: true}, private: false}
+ realtime_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
+
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+
+ # token becomes a string in between joins so it needs to be handled by the channel and not the socket
+ WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: "potato"})
+
+ assert_receive %Message{
+ event: "phx_reply",
+ payload: %{
+ "status" => "error",
+ "response" => %{
+ "reason" => "MalformedJWT: The token provided is not a valid JWT"
+ }
+ },
+ topic: ^realtime_topic
+ },
+ 500
+
+ assert_receive %Message{event: "phx_close"}
+ end
+ end
+end
diff --git a/test/integration/rt_channel/wal_bloat_test.exs b/test/integration/rt_channel/wal_bloat_test.exs
new file mode 100644
index 000000000..3e063d90d
--- /dev/null
+++ b/test/integration/rt_channel/wal_bloat_test.exs
@@ -0,0 +1,183 @@
+defmodule Realtime.Integration.RtChannel.WalBloatTest do
+ use RealtimeWeb.ConnCase,
+ async: false,
+ parameterize: [
+ %{serializer: Phoenix.Socket.V1.JSONSerializer},
+ %{serializer: RealtimeWeb.Socket.V2Serializer}
+ ]
+
+ import Generators
+
+ alias Phoenix.Socket.Message
+ alias Postgrex
+ alias Realtime.Database
+ alias Realtime.Integration.WebsocketClient
+ alias Realtime.Tenants.Connect
+ alias Realtime.Tenants.ReplicationConnection
+
+ @moduletag :capture_log
+
+ setup [:checkout_tenant_and_connect]
+
+ describe "WAL bloat handling" do
+ setup %{tenant: tenant} do
+ topic = random_string()
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+
+ %{rows: [[max_wal_size]]} = Postgrex.query!(db_conn, "SHOW max_wal_size", [])
+ %{rows: [[wal_keep_size]]} = Postgrex.query!(db_conn, "SHOW wal_keep_size", [])
+ %{rows: [[max_slot_wal_keep_size]]} = Postgrex.query!(db_conn, "SHOW max_slot_wal_keep_size", [])
+
+ assert max_wal_size == "32MB"
+ assert wal_keep_size == "32MB"
+ assert max_slot_wal_keep_size == "32MB"
+
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS wal_test (id INT, data TEXT)", [])
+
+ Postgrex.query!(
+ db_conn,
+ """
+ CREATE OR REPLACE FUNCTION wal_test_trigger_func() RETURNS TRIGGER AS $$
+ BEGIN
+ PERFORM realtime.send(json_build_object ('value', 'test' :: text)::jsonb, 'test', '#{topic}', false);
+ RETURN NULL;
+ END;
+ $$ LANGUAGE plpgsql;
+ """,
+ []
+ )
+
+ Postgrex.query!(db_conn, "DROP TRIGGER IF EXISTS wal_test_trigger ON wal_test", [])
+
+ Postgrex.query!(
+ db_conn,
+ """
+ CREATE TRIGGER wal_test_trigger
+ AFTER INSERT OR UPDATE OR DELETE ON wal_test
+ FOR EACH ROW
+ EXECUTE FUNCTION wal_test_trigger_func()
+ """,
+ []
+ )
+
+ GenServer.stop(db_conn)
+
+ on_exit(fn ->
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+
+ Postgrex.query!(db_conn, "DROP TABLE IF EXISTS wal_test CASCADE", [])
+ GenServer.stop(db_conn)
+ end)
+
+ %{topic: topic}
+ end
+
+ @tag timeout: :timer.minutes(3)
+ test "track PID changes during WAL bloat creation", %{tenant: tenant, topic: topic, serializer: serializer} do
+ {socket, _} = get_connection(tenant, serializer, role: "authenticated")
+ full_topic = "realtime:#{topic}"
+
+ WebsocketClient.join(socket, full_topic, %{config: %{broadcast: %{self: true}, private: false}})
+ assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert_receive %Message{event: "presence_state"}, 500
+ assert Connect.ready?(tenant.external_id)
+
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ original_connect_pid = Connect.whereis(tenant.external_id)
+ original_replication_pid = ReplicationConnection.whereis(tenant.external_id)
+ await_replication_slot_active(db_conn, 30, 500)
+ original_db_pid = active_replication_slot_pid!(db_conn)
+
+ replication_ref = Process.monitor(original_replication_pid)
+
+ generate_wal_bloat(tenant)
+ terminate_bloat_connections(db_conn)
+
+ assert_receive {:DOWN, ^replication_ref, :process, ^original_replication_pid, _}, 60_000
+
+ assert Connect.ready?(tenant.external_id)
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ new_db_pid = await_replication_slot_active(db_conn, 60, 1000)
+
+ assert new_db_pid != original_db_pid
+ assert ^original_connect_pid = Connect.whereis(tenant.external_id)
+ assert original_replication_pid != ReplicationConnection.whereis(tenant.external_id)
+
+ payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
+ WebsocketClient.send_event(socket, full_topic, "broadcast", payload)
+ assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^full_topic}, 500
+
+ Postgrex.query!(db_conn, "INSERT INTO wal_test VALUES (1, 'test')", [])
+
+ assert_receive %Message{
+ event: "broadcast",
+ payload: %{
+ "event" => "test",
+ "payload" => %{"value" => "test"},
+ "type" => "broadcast"
+ },
+ join_ref: nil,
+ ref: nil,
+ topic: ^full_topic
+ },
+ 5000
+ end
+ end
+
+ defp active_replication_slot_pid!(db_conn) do
+ %{rows: [[pid]]} =
+ Postgrex.query!(
+ db_conn,
+ "SELECT active_pid FROM pg_replication_slots WHERE active_pid IS NOT NULL AND slot_name = 'supabase_realtime_messages_replication_slot_'",
+ []
+ )
+
+ pid
+ end
+
+ defp await_replication_slot_active(db_conn, retries, interval_ms) do
+ Enum.reduce_while(1..retries, nil, fn _, _ ->
+ case Postgrex.query!(
+ db_conn,
+ "SELECT active_pid FROM pg_replication_slots WHERE active_pid IS NOT NULL AND slot_name = 'supabase_realtime_messages_replication_slot_'",
+ []
+ ) do
+ %{rows: [[pid]]} ->
+ {:halt, pid}
+
+ _ ->
+ Process.sleep(interval_ms)
+ {:cont, nil}
+ end
+ end)
+ |> then(fn
+ nil -> flunk("Replication slot did not become active within #{retries}s")
+ pid -> pid
+ end)
+ end
+
+ defp generate_wal_bloat(tenant) do
+ 1..5
+ |> Enum.map(fn _ ->
+ Task.async(fn ->
+ {:ok, conn} = Database.connect(tenant, "realtime_bloat", :stop)
+
+ Postgrex.transaction(conn, fn tx ->
+ Postgrex.query(tx, "INSERT INTO wal_test SELECT generate_series(1, 100000), repeat('x', 2000)", [])
+ {:error, "test"}
+ end)
+
+ Process.exit(conn, :normal)
+ end)
+ end)
+ |> Task.await_many(20_000)
+ end
+
+ defp terminate_bloat_connections(db_conn) do
+ Postgrex.query!(
+ db_conn,
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE application_name = 'realtime_bloat'",
+ []
+ )
+ end
+end
diff --git a/test/integration/rt_channel_test.exs b/test/integration/rt_channel_test.exs
deleted file mode 100644
index 806a5ad7e..000000000
--- a/test/integration/rt_channel_test.exs
+++ /dev/null
@@ -1,2414 +0,0 @@
-defmodule Realtime.Integration.RtChannelTest do
- # async: false due to the fact that multiple operations against the same tenant and usage of mocks
- # Also using dev_tenant due to distributed test
- alias Realtime.Api
- use RealtimeWeb.ConnCase, async: false
- use Mimic
- import ExUnit.CaptureLog
- import Generators
-
- setup :set_mimic_global
-
- require Logger
-
- alias Extensions.PostgresCdcRls
-
- alias Phoenix.Socket.Message
- alias Phoenix.Socket.V1
-
- alias Postgrex
-
- alias Realtime.Api.Tenant
- alias Realtime.Database
- alias Realtime.Integration.WebsocketClient
- alias Realtime.RateCounter
- alias Realtime.Tenants
- alias Realtime.Tenants.Authorization
- alias Realtime.Tenants.Connect
-
- alias RealtimeWeb.RealtimeChannel.Tracker
- alias RealtimeWeb.SocketDisconnect
-
- @moduletag :capture_log
- @port 4003
- @serializer V1.JSONSerializer
-
- Application.put_env(:phoenix, TestEndpoint,
- https: false,
- http: [port: @port],
- debug_errors: false,
- server: true,
- pubsub_server: __MODULE__,
- secret_key_base: String.duplicate("a", 64)
- )
-
- setup_all do
- capture_log(fn -> start_supervised!(TestEndpoint) end)
- start_supervised!({Phoenix.PubSub, name: __MODULE__})
- :ok
- end
-
- setup [:mode]
-
- describe "postgres changes" do
- setup %{tenant: tenant} do
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
-
- :ok
- end
-
- test "error subscribing", %{tenant: tenant} do
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- # Let's drop the publication to cause an error
- Database.transaction(conn, fn db_conn ->
- Postgrex.query!(db_conn, "drop publication if exists supabase_realtime_test")
- end)
-
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
-
- log =
- capture_log(fn ->
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" =>
- "{:error, \"Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: INSERT, schema: public]\"}",
- "status" => "error"
- },
- ref: nil,
- topic: ^topic
- },
- 8000
- end)
-
- assert log =~ "RealtimeDisabledForConfiguration"
- assert log =~ "Unable to subscribe to changes with given parameters"
- end
-
- test "handle insert", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [%{event: "INSERT", schema: "public"}]}
-
- WebsocketClient.join(socket, topic, %{config: config})
- sub_id = :erlang.phash2(%{"event" => "INSERT", "schema" => "public"})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "postgres_changes" => [
- %{"event" => "INSERT", "id" => ^sub_id, "schema" => "public"}
- ]
- },
- "status" => "ok"
- },
- topic: ^topic
- },
- 200
-
- assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" => "Subscribed to PostgreSQL",
- "status" => "ok"
- },
- ref: nil,
- topic: ^topic
- },
- 8000
-
- {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
- %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "record" => %{"details" => "test", "id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "INSERT"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
- end
-
- test "handle update", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [%{event: "UPDATE", schema: "public"}]}
-
- WebsocketClient.join(socket, topic, %{config: config})
- sub_id = :erlang.phash2(%{"event" => "UPDATE", "schema" => "public"})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "postgres_changes" => [
- %{"event" => "UPDATE", "id" => ^sub_id, "schema" => "public"}
- ]
- },
- "status" => "ok"
- },
- ref: "1",
- topic: ^topic
- },
- 200
-
- assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" => "Subscribed to PostgreSQL",
- "status" => "ok"
- },
- ref: nil,
- topic: ^topic
- },
- 8000
-
- {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
- %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
-
- Postgrex.query!(conn, "update test set details = 'test' where id = #{id}", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "old_record" => %{"id" => ^id},
- "record" => %{"details" => "test", "id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "UPDATE"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
- end
-
- test "handle delete", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [%{event: "DELETE", schema: "public"}]}
-
- WebsocketClient.join(socket, topic, %{config: config})
- sub_id = :erlang.phash2(%{"event" => "DELETE", "schema" => "public"})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "postgres_changes" => [
- %{"event" => "DELETE", "id" => ^sub_id, "schema" => "public"}
- ]
- },
- "status" => "ok"
- },
- ref: "1",
- topic: ^topic
- },
- 200
-
- assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" => "Subscribed to PostgreSQL",
- "status" => "ok"
- },
- ref: nil,
- topic: ^topic
- },
- 8000
-
- {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
- %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
- Postgrex.query!(conn, "delete from test where id = #{id}", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "old_record" => %{"id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "DELETE"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
- end
-
- test "handle wildcard", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [%{event: "*", schema: "public"}]}
-
- WebsocketClient.join(socket, topic, %{config: config})
- sub_id = :erlang.phash2(%{"event" => "*", "schema" => "public"})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "postgres_changes" => [
- %{"event" => "*", "id" => ^sub_id, "schema" => "public"}
- ]
- },
- "status" => "ok"
- },
- ref: "1",
- topic: ^topic
- },
- 200
-
- assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" => "Subscribed to PostgreSQL",
- "status" => "ok"
- },
- ref: nil,
- topic: ^topic
- },
- 8000
-
- {:ok, _, conn} = PostgresCdcRls.get_manager_conn(tenant.external_id)
- %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "record" => %{"id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "INSERT"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
-
- Postgrex.query!(conn, "update test set details = 'test' where id = #{id}", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "old_record" => %{"id" => ^id},
- "record" => %{"details" => "test", "id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "UPDATE"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
-
- Postgrex.query!(conn, "delete from test where id = #{id}", [])
-
- assert_receive %Message{
- event: "postgres_changes",
- payload: %{
- "data" => %{
- "columns" => [
- %{"name" => "id", "type" => "int4"},
- %{"name" => "details", "type" => "text"}
- ],
- "commit_timestamp" => _ts,
- "errors" => nil,
- "old_record" => %{"id" => ^id},
- "schema" => "public",
- "table" => "test",
- "type" => "DELETE"
- },
- "ids" => [^sub_id]
- },
- ref: nil,
- topic: "realtime:any"
- },
- 500
- end
-
- test "handle nil postgres changes params as empty param changes", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- topic = "realtime:any"
- config = %{postgres_changes: [nil]}
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 200
- assert_receive %Phoenix.Socket.Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- refute_receive %Message{
- event: "system",
- payload: %{
- "channel" => "any",
- "extension" => "postgres_changes",
- "message" => "Subscribed to PostgreSQL",
- "status" => "ok"
- },
- ref: nil,
- topic: ^topic
- },
- 1000
- end
- end
-
- describe "handle broadcast extension" do
- setup [:rls_context]
-
- test "public broadcast", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, private: false}
- topic = "realtime:any"
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- test "broadcast to another tenant does not get mixed up", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: false}, private: false}
- topic = "realtime:any"
- WebsocketClient.join(socket, topic, %{config: config})
-
- other_tenant = Containers.checkout_tenant(run_migrations: true)
-
- {other_socket, _} = get_connection(other_tenant)
- WebsocketClient.join(other_socket, topic, %{config: config})
-
- # Both sockets joined
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
- assert_receive %Message{event: "presence_state"}
-
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
-
- # No message received
- refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "private broadcast with valid channel with permissions sends message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- mode: :distributed
- test "private broadcast with valid channel with permissions sends message using a remote node (phoenix adapter)", %{
- tenant: tenant,
- topic: topic
- } do
- {:ok, token} =
- generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
-
- {:ok, remote_socket} = WebsocketClient.connect(self(), uri(tenant, 4012), @serializer, [{"x-api-key", token}])
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
-
- config = %{broadcast: %{self: false}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(remote_socket, topic, %{config: config})
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Send through one socket and receive through the other (self: false)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- mode: :distributed
- test "private broadcast with valid channel with permissions sends message using a remote node", %{
- tenant: tenant,
- topic: topic
- } do
- {:ok, token} =
- generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
-
- {:ok, remote_socket} = WebsocketClient.connect(self(), uri(tenant, 4012), @serializer, [{"x-api-key", token}])
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
-
- config = %{broadcast: %{self: false}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(remote_socket, topic, %{config: config})
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Send through one socket and receive through the other (self: false)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- topic: "topic"
- test "private broadcast with valid channel a colon character sends message and won't intercept in public channels",
- %{topic: topic, tenant: tenant} do
- {anon_socket, _} = get_connection(tenant, "anon")
- {socket, _} = get_connection(tenant, "authenticated")
- valid_topic = "realtime:#{topic}"
- malicious_topic = "realtime:private:#{topic}"
-
- WebsocketClient.join(socket, valid_topic, %{config: %{broadcast: %{self: true}, private: true}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^valid_topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- WebsocketClient.join(anon_socket, malicious_topic, %{config: %{broadcast: %{self: true}, private: false}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^malicious_topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, valid_topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^valid_topic}, 500
- refute_receive %Message{event: "broadcast"}
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence]
- test "private broadcast with valid channel no write permissions won't send message but will receive message", %{
- tenant: tenant,
- topic: topic
- } do
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
-
- {service_role_socket, _} = get_connection(tenant, "service_role")
- WebsocketClient.join(service_role_socket, topic, %{config: config})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
-
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
- refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
-
- WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end
-
- @tag policies: []
- test "private broadcast with valid channel and no read permissions won't join", %{tenant: tenant, topic: topic} do
- config = %{private: true}
- expected = "Unauthorized: You do not have permissions to read from this Channel topic: #{topic}"
-
- topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
-
- log =
- capture_log(fn ->
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{
- topic: ^topic,
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => ^expected
- },
- "status" => "error"
- }
- },
- 300
-
- refute_receive %Message{event: "phx_reply", topic: ^topic}, 300
- refute_receive %Message{event: "presence_state"}, 300
- end)
-
- assert log =~ expected
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence]
- test "handles lack of connection to database error on private channels", %{tenant: tenant, topic: topic} do
- topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: true}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- {service_role_socket, _} = get_connection(tenant, "service_role")
- WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: true}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- log =
- capture_log(fn ->
- :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
- # Waiting more than 5 seconds as this is the amount of time we will wait for the Connection to be ready
- refute_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 6000
- end)
-
- assert log =~ "UnableToHandleBroadcast"
- end
-
- @tag policies: []
- test "lack of connection to database error does not impact public channels", %{tenant: tenant, topic: topic} do
- topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: %{broadcast: %{self: true}, private: false}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- {service_role_socket, _} = get_connection(tenant, "service_role")
- WebsocketClient.join(service_role_socket, topic, %{config: %{broadcast: %{self: false}, private: false}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- log =
- capture_log(fn ->
- :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(service_role_socket, topic, "broadcast", payload)
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^topic}, 500
- end)
-
- refute log =~ "UnableToHandleBroadcast"
- end
- end
-
- describe "handle presence extension" do
- setup [:rls_context]
-
- test "public presence", %{tenant: tenant} do
- {socket, _} = get_connection(tenant)
- config = %{presence: %{key: "", enabled: true}, private: false}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
- }
-
- WebsocketClient.send_event(socket, topic, "presence", payload)
-
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}
-
- join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
- assert get_in(join_payload, ["name"]) == payload.payload.name
- assert get_in(join_payload, ["t"]) == payload.payload.t
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "private presence with read and write permissions will be able to track and receive presence changes",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{presence: %{key: "", enabled: true}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
- }
-
- WebsocketClient.send_event(socket, topic, "presence", payload)
- refute_receive %Message{event: "phx_leave", topic: ^topic}
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
- join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
- assert get_in(join_payload, ["name"]) == payload.payload.name
- assert get_in(join_payload, ["t"]) == payload.payload.t
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- mode: :distributed
- test "private presence with read and write permissions will be able to track and receive presence changes using a remote node",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{presence: %{key: "", enabled: true}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{event: "presence_state", payload: %{}, topic: ^topic}, 500
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
- }
-
- WebsocketClient.send_event(socket, topic, "presence", payload)
- refute_receive %Message{event: "phx_leave", topic: ^topic}
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}, topic: ^topic}, 500
- join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
- assert get_in(join_payload, ["name"]) == payload.payload.name
- assert get_in(join_payload, ["t"]) == payload.payload.t
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence]
- test "private presence with read permissions will be able to receive presence changes but won't be able to track",
- %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- {secondary_socket, _} = get_connection(tenant, "service_role")
- config = fn key -> %{presence: %{key: key, enabled: true}, private: true} end
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config.("authenticated")})
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_96", t: 1814.7000000029802}
- }
-
- # This will be ignored
- WebsocketClient.send_event(socket, topic, "presence", payload)
-
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state", payload: %{}, ref: nil, topic: ^topic}
- refute_receive %Message{event: "presence_diff", payload: _, ref: _, topic: ^topic}
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_97", t: 1814.7000000029802}
- }
-
- # This will be tracked
- WebsocketClient.join(secondary_socket, topic, %{config: config.("service_role")})
- WebsocketClient.send_event(secondary_socket, topic, "presence", payload)
-
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{topic: ^topic, event: "presence_diff", payload: %{"joins" => joins, "leaves" => %{}}}
- assert_receive %Message{event: "presence_state", payload: %{}, ref: nil, topic: ^topic}
-
- join_payload = joins |> Map.values() |> hd() |> get_in(["metas"]) |> hd()
- assert get_in(join_payload, ["name"]) == payload.payload.name
- assert get_in(join_payload, ["t"]) == payload.payload.t
-
- assert_receive %Message{topic: ^topic, event: "presence_diff"} = res
-
- assert join_payload =
- res
- |> Map.from_struct()
- |> get_in([:payload, "joins", "service_role", "metas"])
- |> hd()
-
- assert get_in(join_payload, ["name"]) == payload.payload.name
- assert get_in(join_payload, ["t"]) == payload.payload.t
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "handles lack of connection to database error on private channels", %{tenant: tenant, topic: topic} do
- topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- log =
- capture_log(fn ->
- :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
- payload = %{type: "presence", event: "TRACK", payload: %{name: "realtime_presence_96", t: 1814.7000000029802}}
- WebsocketClient.send_event(socket, topic, "presence", payload)
-
- refute_receive %Message{event: "presence_diff"}, 500
- # Waiting more than 5 seconds as this is the amount of time we will wait for the Connection to be ready
- refute_receive %Message{event: "phx_leave", topic: ^topic}, 6000
- end)
-
- assert log =~ "UnableToHandlePresence"
- end
-
- @tag policies: []
- test "lack of connection to database error does not impact public channels", %{tenant: tenant, topic: topic} do
- topic = "realtime:#{topic}"
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{event: "presence_state"}
-
- log =
- capture_log(fn ->
- :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: nil} end)
- payload = %{type: "presence", event: "TRACK", payload: %{name: "realtime_presence_96", t: 1814.7000000029802}}
- WebsocketClient.send_event(socket, topic, "presence", payload)
-
- assert_receive %Message{event: "presence_diff"}, 500
- refute_receive %Message{event: "phx_leave", topic: ^topic}
- end)
-
- refute log =~ "UnableToHandlePresence"
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
-
- test "presence enabled if param enabled is set in configuration for private channels", %{
- tenant: tenant,
- topic: topic
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: true}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
-
- test "presence disabled if param 'enabled' is set to false in configuration for private channels", %{
- tenant: tenant,
- topic: topic
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: %{private: true, presence: %{enabled: false}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- refute_receive %Message{event: "presence_state"}, 500
- end
-
- test "presence enabled if param enabled is set in configuration for public channels", %{
- tenant: tenant,
- topic: topic
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: true}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- end
-
- test "presence disabled if param 'enabled' is set to false in configuration for public channels", %{
- tenant: tenant,
- topic: topic
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: %{private: false, presence: %{enabled: false}}})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- refute_receive %Message{event: "presence_state"}, 500
- end
- end
-
- describe "token handling" do
- setup [:rls_context]
-
- @tag policies: [
- :authenticated_read_broadcast_and_presence,
- :authenticated_write_broadcast_and_presence
- ]
- test "badly formatted jwt token", %{tenant: tenant} do
- log =
- capture_log(fn ->
- WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", "bad_token"}])
- end)
-
- assert log =~ "MalformedJWT: The token provided is not a valid JWT"
- end
-
- test "invalid JWT with expired token", %{tenant: tenant} do
- log =
- capture_log(fn ->
- get_connection(tenant, "authenticated", %{:exp => System.system_time(:second) - 1000}, %{log_level: :info})
- end)
-
- assert log =~ "InvalidJWTToken: Token has expired"
- end
-
- test "token required the role key", %{tenant: tenant} do
- {:ok, token} = token_no_role(tenant)
-
- assert {:error, %{status_code: 403}} =
- WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
- end
-
- test "handles connection with valid api-header but ignorable access_token payload", %{tenant: tenant, topic: topic} do
- realtime_topic = "realtime:#{topic}"
-
- log =
- capture_log(fn ->
- {:ok, token} =
- generate_token(tenant, %{
- exp: System.system_time(:second) + 1000,
- role: "authenticated",
- sub: random_string()
- })
-
- {:ok, socket} = WebsocketClient.connect(self(), uri(tenant), @serializer, [{"x-api-key", token}])
-
- WebsocketClient.join(socket, realtime_topic, %{
- config: %{broadcast: %{self: true}, private: false},
- access_token: "sb_#{random_string()}"
- })
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- end)
-
- refute log =~ "MalformedJWT: The token provided is not a valid JWT"
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "on new access_token and channel is private policies are reevaluated for read policy",
- %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
-
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{
- config: %{broadcast: %{self: true}, private: true},
- access_token: access_token
- })
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- {:ok, new_token} = token_valid(tenant, "anon")
-
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
-
- error_message = "You do not have permissions to read from this Channel topic: #{topic}"
-
- assert_receive %Message{
- event: "system",
- payload: %{"channel" => ^topic, "extension" => "system", "message" => ^error_message, "status" => "error"},
- topic: ^realtime_topic
- }
-
- assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "on new access_token and channel is private policies are reevaluated for write policy", %{
- topic: topic,
- tenant: tenant
- } do
- {socket, access_token} = get_connection(tenant, "authenticated")
- realtime_topic = "realtime:#{topic}"
- config = %{broadcast: %{self: true}, private: true}
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- # Checks first send which will set write policy to true
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, realtime_topic, "broadcast", payload)
-
- assert_receive %Message{event: "broadcast", payload: ^payload, topic: ^realtime_topic}, 500
-
- # RLS policies changed to only allow read
- {:ok, db_conn} = Database.connect(tenant, "realtime_test")
- clean_table(db_conn, "realtime", "messages")
- create_rls_policies(db_conn, [:authenticated_read_broadcast_and_presence], %{topic: topic})
-
- # Set new token to recheck policies
- {:ok, new_token} =
- generate_token(tenant, %{exp: System.system_time(:second) + 1000, role: "authenticated", sub: random_string()})
-
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
-
- # Send message to be ignored
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
- WebsocketClient.send_event(socket, realtime_topic, "broadcast", payload)
-
- refute_receive %Message{
- event: "broadcast",
- payload: ^payload,
- topic: ^realtime_topic
- },
- 1500
- end
-
- test "on new access_token and channel is public policies are not reevaluated", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
- {:ok, new_token} = token_valid(tenant, "anon")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => new_token})
-
- refute_receive %Message{}
- end
-
- test "on empty string access_token the socket sends an error message", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => ""})
-
- assert_receive %Message{
- topic: ^realtime_topic,
- event: "system",
- payload: %{
- "extension" => "system",
- "message" => msg,
- "status" => "error"
- }
- }
-
- assert_receive %Message{event: "phx_close"}
- assert msg =~ "The token provided is not a valid JWT"
- end
-
- test "on expired access_token the socket sends an error message", %{tenant: tenant, topic: topic} do
- sub = random_string()
-
- {socket, access_token} = get_connection(tenant, "authenticated", %{sub: sub})
-
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) - 1000, sub: sub})
-
- log =
- capture_log([log_level: :warning], fn ->
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
-
- assert_receive %Message{
- topic: ^realtime_topic,
- event: "system",
- payload: %{"extension" => "system", "message" => "Token has expired 1000 seconds ago", "status" => "error"}
- }
- end)
-
- assert log =~ "ChannelShutdown: Token has expired 1000 seconds ago"
- end
-
- test "ChannelShutdown include sub if available in jwt claims", %{tenant: tenant, topic: topic} do
- exp = System.system_time(:second) + 10_000
-
- {socket, access_token} = get_connection(tenant, "authenticated", %{exp: exp}, %{log_level: :warning})
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
- sub = random_string()
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
- {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) - 1000, sub: sub})
-
- log =
- capture_log([level: :warning], fn ->
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
-
- assert_receive %Message{event: "system"}, 1000
- end)
-
- assert log =~ "ChannelShutdown"
- assert log =~ "sub=#{sub}"
- end
-
- test "missing claims close connection", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
-
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) + 2000})
-
- # Update token to be a near expiring token
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
-
- assert_receive %Message{
- event: "system",
- payload: %{
- "extension" => "system",
- "message" => "Fields `role` and `exp` are required in JWT",
- "status" => "error"
- }
- },
- 500
-
- assert_receive %Message{event: "phx_close"}
- end
-
- test "checks token periodically", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
-
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- {:ok, token} = generate_token(tenant, %{:exp => System.system_time(:second) + 2, role: "authenticated"})
-
- # Update token to be a near expiring token
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => token})
-
- # Awaits to see if connection closes automatically
- assert_receive %Message{
- event: "system",
- payload: %{"extension" => "system", "message" => msg, "status" => "error"}
- },
- 3000
-
- assert_receive %Message{event: "phx_close"}
-
- assert msg =~ "Token has expired"
- end
-
- test "token expires in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- {:ok, access_token} = generate_token(tenant, %{:exp => System.system_time(:second) + 1, role: "authenticated"})
-
- # token expires in between joins so it needs to be handled by the channel and not the socket
- Process.sleep(1000)
- realtime_topic = "realtime:#{topic}"
-
- log =
- capture_log(fn ->
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "status" => "error",
- "response" => %{"reason" => "InvalidJWTToken: Token has expired 0 seconds ago"}
- },
- topic: ^realtime_topic
- },
- 500
- end)
-
- assert_receive %Message{event: "phx_close"}
- assert log =~ "#{tenant.external_id}"
- end
-
- test "token loses claims in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- {:ok, access_token} = generate_token(tenant, %{:exp => System.system_time(:second) + 10})
-
- # token breaks claims in between joins so it needs to be handled by the channel and not the socket
- realtime_topic = "realtime:#{topic}"
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "status" => "error",
- "response" => %{
- "reason" => "InvalidJWTToken: Fields `role` and `exp` are required in JWT"
- }
- },
- topic: ^realtime_topic
- },
- 500
-
- assert_receive %Message{event: "phx_close"}
- end
-
- test "token is badly formatted in between joins", %{tenant: tenant, topic: topic} do
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- # token becomes a string in between joins so it needs to be handled by the channel and not the socket
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: "potato"})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "status" => "error",
- "response" => %{
- "reason" => "MalformedJWT: The token provided is not a valid JWT"
- }
- },
- topic: ^realtime_topic
- },
- 500
-
- assert_receive %Message{event: "phx_close"}
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "handles RPC error on token refreshed", %{tenant: tenant, topic: topic} do
- Authorization
- |> expect(:get_read_authorizations, fn conn, db_conn, context ->
- call_original(Authorization, :get_read_authorizations, [conn, db_conn, context])
- end)
- |> expect(:get_read_authorizations, fn _, _, _ -> {:error, "RPC Error"} end)
-
- {socket, access_token} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Phoenix.Socket.Message{event: "phx_reply"}, 500
- assert_receive %Phoenix.Socket.Message{event: "presence_state"}, 500
-
- # Update token to force update
- {:ok, access_token} =
- generate_token(tenant, %{:exp => System.system_time(:second) + 1000, role: "authenticated"})
-
- log =
- capture_log([log_level: :warning], fn ->
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{"access_token" => access_token})
-
- assert_receive %Phoenix.Socket.Message{
- event: "system",
- payload: %{
- "status" => "error",
- "extension" => "system",
- "message" => "Realtime was unable to connect to the project database"
- },
- topic: ^realtime_topic
- },
- 500
-
- assert_receive %Phoenix.Socket.Message{event: "phx_close", topic: ^realtime_topic}
- end)
-
- assert log =~ "Realtime was unable to connect to the project database"
- end
-
- test "on sb prefixed access_token the socket ignores the message and respects JWT expiry time", %{
- tenant: tenant,
- topic: topic
- } do
- sub = random_string()
-
- {socket, access_token} =
- get_connection(tenant, "authenticated", %{sub: sub, exp: System.system_time(:second) + 5})
-
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config, access_token: access_token})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- WebsocketClient.send_event(socket, realtime_topic, "access_token", %{
- "access_token" => "sb_publishable_-fake_key"
- })
-
- # Check if the new token does not trigger a shutdown
- refute_receive %Message{event: "system", topic: ^realtime_topic}, 100
-
- # Await to check if channel respects token expiry time
- assert_receive %Message{
- event: "system",
- payload: %{"extension" => "system", "message" => msg, "status" => "error"},
- topic: ^realtime_topic
- },
- 5000
-
- assert_receive %Message{event: "phx_close", topic: ^realtime_topic}
- msg =~ "Token has expired"
- end
- end
-
- describe "handle broadcast changes" do
- setup [:rls_context, :setup_trigger]
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "broadcast insert event changes on insert in table with trigger", %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn,
- table_name: table_name
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- value = random_string()
- Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
-
- record = %{"details" => value, "id" => 1}
-
- assert_receive %Message{
- event: "broadcast",
- payload: %{
- "event" => "INSERT",
- "payload" => %{
- "old_record" => nil,
- "operation" => "INSERT",
- "record" => ^record,
- "schema" => "public",
- "table" => ^table_name
- },
- "type" => "broadcast"
- },
- topic: ^topic
- },
- 1000
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence],
- requires_data: true
- test "broadcast update event changes on update in table with trigger", %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn,
- table_name: table_name
- } do
- value = random_string()
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- new_value = random_string()
-
- Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
- Postgrex.query!(db_conn, "UPDATE #{table_name} SET details = $1 WHERE details = $2", [new_value, value])
-
- old_record = %{"details" => value, "id" => 1}
- record = %{"details" => new_value, "id" => 1}
-
- assert_receive %Message{
- event: "broadcast",
- payload: %{
- "event" => "UPDATE",
- "payload" => %{
- "old_record" => ^old_record,
- "operation" => "UPDATE",
- "record" => ^record,
- "schema" => "public",
- "table" => ^table_name
- },
- "type" => "broadcast"
- },
- topic: ^topic
- },
- 1000
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "broadcast delete event changes on delete in table with trigger", %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn,
- table_name: table_name
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- value = random_string()
-
- Postgrex.query!(db_conn, "INSERT INTO #{table_name} (details) VALUES ($1)", [value])
- Postgrex.query!(db_conn, "DELETE FROM #{table_name} WHERE details = $1", [value])
-
- record = %{"details" => value, "id" => 1}
-
- assert_receive %Message{
- event: "broadcast",
- payload: %{
- "event" => "DELETE",
- "payload" => %{
- "old_record" => ^record,
- "operation" => "DELETE",
- "record" => nil,
- "schema" => "public",
- "table" => ^table_name
- },
- "type" => "broadcast"
- },
- topic: ^topic
- },
- 1000
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "broadcast event when function 'send' is called with private topic", %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- full_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, full_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- value = random_string()
- event = random_string()
-
- Postgrex.query!(
- db_conn,
- "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, TRUE::bool);",
- [value, event, topic]
- )
-
- assert_receive %Message{
- event: "broadcast",
- payload: %{
- "event" => ^event,
- "payload" => %{"value" => ^value},
- "type" => "broadcast"
- },
- topic: ^full_topic,
- join_ref: nil,
- ref: nil
- },
- 1000
- end
-
- test "broadcast event when function 'send' is called with public topic", %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn
- } do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- full_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, full_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- value = random_string()
- event = random_string()
-
- Postgrex.query!(
- db_conn,
- "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, FALSE::bool);",
- [value, event, topic]
- )
-
- assert_receive %Message{
- event: "broadcast",
- payload: %{
- "event" => ^event,
- "payload" => %{"value" => ^value},
- "type" => "broadcast"
- },
- topic: ^full_topic
- },
- 1000
- end
- end
-
- describe "only private channels" do
- setup [:rls_context]
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "user with only private channels enabled will not be able to join public channels", %{
- tenant: tenant,
- topic: topic
- } do
- change_tenant_configuration(tenant, :private_only, true)
- on_exit(fn -> change_tenant_configuration(tenant, :private_only, false) end)
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => "PrivateOnly: This project only allows private channels"
- },
- "status" => "error"
- }
- },
- 500
- end
-
- @tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
- test "user with only private channels enabled will be able to join private channels", %{
- tenant: tenant,
- topic: topic
- } do
- change_tenant_configuration(tenant, :private_only, true)
- on_exit(fn -> change_tenant_configuration(tenant, :private_only, false) end)
-
- Process.sleep(100)
-
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = "realtime:#{topic}"
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- end
- end
-
- describe "socket disconnect" do
- setup [:rls_context]
-
- test "tenant already suspended", %{topic: _topic} do
- tenant = Containers.checkout_tenant(run_migrations: true)
-
- log =
- capture_log(fn ->
- {:ok, _} = Realtime.Api.update_tenant(tenant, %{suspend: true})
- {:error, %Mint.WebSocket.UpgradeFailureError{}} = get_connection(tenant, "anon")
- refute_receive _any
- end)
-
- assert log =~ "RealtimeDisabledForTenant"
- end
-
- test "on jwks the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{jwt_jwks: %{keys: ["potato"]}})
-
- assert_process_down(socket)
- end
-
- test "on jwt_secret the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{jwt_secret: "potato"})
-
- assert_process_down(socket)
- end
-
- test "on private_only the socket closes and sends a system message", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{private_only: true})
-
- assert_process_down(socket)
- end
-
- test "on other param changes the socket won't close and no message is sent", %{tenant: tenant, topic: topic} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert_receive %Message{event: "presence_state"}, 500
-
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- Realtime.Api.update_tenant(tenant, %{max_concurrent_users: 100})
-
- refute_receive %Message{
- topic: ^realtime_topic,
- event: "system",
- payload: %{
- "extension" => "system",
- "message" => "Server requested disconnect",
- "status" => "ok"
- }
- },
- 500
-
- Process.sleep(500)
- assert :ok = WebsocketClient.send_heartbeat(socket)
- end
-
- test "invalid JWT with expired token", %{tenant: tenant} do
- log =
- capture_log(fn ->
- get_connection(tenant, "authenticated", %{:exp => System.system_time(:second) - 1000}, %{log_level: :info})
- end)
-
- assert log =~ "InvalidJWTToken: Token has expired"
- end
-
- test "check registry of SocketDisconnect and on distribution called, kill socket", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
-
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 500
- assert_receive %Message{event: "presence_state", topic: ^topic}, 500
- end
-
- assert :ok = WebsocketClient.send_heartbeat(socket)
-
- SocketDisconnect.distributed_disconnect(tenant)
-
- assert_process_down(socket)
- end
- end
-
- describe "rate limits" do
- setup [:rls_context]
-
- test "max_concurrent_users limit respected", %{tenant: tenant} do
- %{max_concurrent_users: max_concurrent_users} = Tenants.get_tenant_by_external_id(tenant.external_id)
- change_tenant_configuration(tenant, :max_concurrent_users, 1)
-
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{random_string()}"
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => "ConnectionRateLimitReached: Too many connected users"
- },
- "status" => "error"
- }
- },
- 500
-
- assert_receive %Message{event: "phx_close"}
-
- change_tenant_configuration(tenant, :max_concurrent_users, max_concurrent_users)
- end
-
- test "max_events_per_second limit respected", %{tenant: tenant} do
- %{max_events_per_second: max_events_per_second} = Tenants.get_tenant_by_external_id(tenant.external_id)
- on_exit(fn -> change_tenant_configuration(tenant, :max_events_per_second, max_events_per_second) end)
- RateCounter.stop(tenant.external_id)
-
- log =
- capture_log(fn ->
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
- realtime_topic = "realtime:#{random_string()}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
-
- for _ <- 1..1000, Process.alive?(socket) do
- WebsocketClient.send_event(socket, realtime_topic, "broadcast", %{})
- Process.sleep(10)
- end
-
- # Wait for the rate counter to run logger function
- Process.sleep(1500)
- assert_receive %Message{event: "phx_close"}
- end)
-
- assert log =~ "MessagePerSecondRateLimitReached"
- end
-
- test "max_channels_per_client limit respected", %{tenant: tenant} do
- %{max_events_per_second: max_concurrent_users} = Tenants.get_tenant_by_external_id(tenant.external_id)
- change_tenant_configuration(tenant, :max_channels_per_client, 1)
-
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic_1 = "realtime:#{random_string()}"
- realtime_topic_2 = "realtime:#{random_string()}"
-
- WebsocketClient.join(socket, realtime_topic_1, %{config: config})
- WebsocketClient.join(socket, realtime_topic_2, %{config: config})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{"response" => %{"postgres_changes" => []}, "status" => "ok"},
- topic: ^realtime_topic_1
- },
- 500
-
- assert_receive %Message{event: "presence_state", topic: ^realtime_topic_1}, 500
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "status" => "error",
- "response" => %{
- "reason" => "ChannelRateLimitReached: Too many channels"
- }
- },
- topic: ^realtime_topic_2
- },
- 500
-
- refute_receive %Message{event: "phx_reply", topic: ^realtime_topic_2}, 500
- refute_receive %Message{event: "presence_state", topic: ^realtime_topic_2}, 500
-
- change_tenant_configuration(tenant, :max_channels_per_client, max_concurrent_users)
- end
-
- test "max_joins_per_second limit respected", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:#{random_string()}"
-
- log =
- capture_log(fn ->
- # Burst of joins that won't be blocked as RateCounter tick won't run
- for _ <- 1..300 do
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- end
-
- # Wait for RateCounter tick
- Process.sleep(1000)
- # These ones will be blocked
- for _ <- 1..300 do
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- end
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => "ClientJoinRateLimitReached: Too many joins per second"
- },
- "status" => "error"
- }
- },
- 2000
- end)
-
- assert log =~
- "project=#{tenant.external_id} external_id=#{tenant.external_id} [critical] ClientJoinRateLimitReached: Too many joins per second"
-
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
- assert length(String.split(log, "ClientJoinRateLimitReached")) == 2
- end
- end
-
- describe "authorization handling" do
- setup [:rls_context]
-
- @tag policies: [:read_matching_user_role, :write_matching_user_role], role: "anon"
- test "role policies are respected when accessing the channel", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "anon")
- config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
- topic = random_string()
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
-
- {socket, _} = get_connection(tenant, "potato")
- topic = random_string()
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- refute_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
- end
-
- @tag policies: [:authenticated_read_matching_user_sub, :authenticated_write_matching_user_sub],
- sub: Ecto.UUID.generate()
- test "sub policies are respected when accessing the channel", %{tenant: tenant, sub: sub} do
- {socket, _} = get_connection(tenant, "authenticated", %{sub: sub})
- config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
- topic = random_string()
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
-
- {socket, _} = get_connection(tenant, "authenticated", %{sub: Ecto.UUID.generate()})
- topic = random_string()
- realtime_topic = "realtime:#{topic}"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
- refute_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^realtime_topic}, 500
- end
-
- @tag role: "authenticated",
- policies: [:broken_read_presence, :broken_write_presence]
-
- test "handle failing rls policy", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: true}
- topic = random_string()
- realtime_topic = "realtime:#{topic}"
-
- log =
- capture_log(fn ->
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- msg = "Unauthorized: You do not have permissions to read from this Channel topic: #{topic}"
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => ^msg
- },
- "status" => "error"
- }
- },
- 500
-
- refute_receive %Message{event: "phx_reply"}
- refute_receive %Message{event: "presence_state"}
- end)
-
- assert log =~ "RlsPolicyError"
- end
- end
-
- test "handle empty topic by closing the socket", %{tenant: tenant} do
- {socket, _} = get_connection(tenant, "authenticated")
- config = %{broadcast: %{self: true}, private: false}
- realtime_topic = "realtime:"
-
- WebsocketClient.join(socket, realtime_topic, %{config: config})
-
- assert_receive %Message{
- event: "phx_reply",
- payload: %{
- "response" => %{
- "reason" => "TopicNameRequired: You must provide a topic name"
- },
- "status" => "error"
- }
- },
- 500
-
- refute_receive %Message{event: "phx_reply"}
- refute_receive %Message{event: "presence_state"}
- end
-
- def handle_telemetry(event, %{sum: sum}, metadata, _) do
- tenant = metadata[:tenant]
- [key] = Enum.take(event, -1)
-
- Agent.update(TestCounter, fn state ->
- state = Map.put_new(state, tenant, %{joins: 0, events: 0, db_events: 0, presence_events: 0})
- update_in(state, [metadata[:tenant], key], fn v -> (v || 0) + sum end)
- end)
- end
-
- defp get_count(event, tenant) do
- [key] = Enum.take(event, -1)
-
- Agent.get(TestCounter, fn state -> get_in(state, [tenant, key]) || 0 end)
- end
-
- describe "billable events" do
- setup %{tenant: tenant} do
- events = [
- [:realtime, :rate_counter, :channel, :joins],
- [:realtime, :rate_counter, :channel, :events],
- [:realtime, :rate_counter, :channel, :db_events],
- [:realtime, :rate_counter, :channel, :presence_events]
- ]
-
- {:ok, _} =
- start_supervised(%{
- id: 1,
- start: {Agent, :start_link, [fn -> %{} end, [name: TestCounter]]}
- })
-
- RateCounter.stop(tenant.external_id)
- on_exit(fn -> :telemetry.detach(__MODULE__) end)
- :telemetry.attach_many(__MODULE__, events, &__MODULE__.handle_telemetry/4, [])
-
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- # Setup for postgres changes
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
-
- :ok
- end
-
- test "join events", %{tenant: tenant} do
- external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}
- assert_receive %Message{topic: ^topic, event: "system"}, 5000
-
- # Wait for RateCounter to run
- Process.sleep(2000)
-
- # Expected billed
- # 1 joins due to two sockets
- # 1 presence events due to two sockets
- # 0 db events as no postgres changes used
- # 0 events broadcast is not used
- assert 1 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
- assert 1 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
- end
-
- test "broadcast events", %{tenant: tenant} do
- external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}
-
- # Add second client so we can test the "multiplication" of billable events
- {socket, _} = get_connection(tenant)
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}
-
- # Broadcast event
- payload = %{"event" => "TEST", "payload" => %{"msg" => 1}, "type" => "broadcast"}
-
- for _ <- 1..5 do
- WebsocketClient.send_event(socket, topic, "broadcast", payload)
- assert_receive %Message{topic: ^topic, event: "broadcast", payload: ^payload}
- end
-
- # Wait for RateCounter to run
- Process.sleep(2000)
-
- # Expected billed
- # 2 joins due to two sockets
- # 2 presence events due to two sockets
- # 0 db events as no postgres changes used
- # 15 events as 5 events sent, 5 events received on client 1 and 5 events received on client 2
- assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
- assert 2 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
- assert 15 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
- end
-
- test "presence events", %{tenant: tenant} do
- external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, presence: %{enabled: true}}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", topic: ^topic}, 1000
- assert_receive %Message{topic: ^topic, event: "presence_state"}, 1000
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_1", t: 1814.7000000029802}
- }
-
- WebsocketClient.send_event(socket, topic, "presence", payload)
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
-
- # Presence events
- {socket, _} = get_connection(tenant, "authenticated")
- WebsocketClient.join(socket, topic, %{config: config})
-
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}
-
- payload = %{
- type: "presence",
- event: "TRACK",
- payload: %{name: "realtime_presence_2", t: 1814.7000000029802}
- }
-
- WebsocketClient.send_event(socket, topic, "presence", payload)
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
- assert_receive %Message{event: "presence_diff", payload: %{"joins" => _, "leaves" => %{}}, topic: ^topic}
-
- # Wait for RateCounter to run
- Process.sleep(2000)
-
- # Expected billed
- # 2 joins due to two sockets
- # 7 presence events
- # 0 db events as no postgres changes used
- # 0 events as no broadcast used
- assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
- assert 7 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
- end
-
- test "postgres changes events", %{tenant: tenant} do
- external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "public"}]}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
- assert_receive %Message{topic: ^topic, event: "system"}, 5000
-
- # Add second user to test the "multiplication" of billable events
- {socket, _} = get_connection(tenant)
- WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
- assert_receive %Message{topic: ^topic, event: "system"}, 5000
-
- tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
- {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
-
- # Postgres Change events
- for _ <- 1..5, do: Postgrex.query!(conn, "insert into test (details) values ('test')", [])
-
- for _ <- 1..5 do
- assert_receive %Message{
- topic: ^topic,
- event: "postgres_changes",
- payload: %{"data" => %{"schema" => "public", "table" => "test", "type" => "INSERT"}}
- },
- 5000
- end
-
- # Wait for RateCounter to run
- Process.sleep(2000)
-
- # Expected billed
- # 2 joins due to two sockets
- # 2 presence events due to two sockets
- # 10 db events due to 5 inserts events sent to client 1 and 5 inserts events sent to client 2
- # 0 events as no broadcast used
- assert 2 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
- assert 2 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
- assert 10 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
- end
-
- test "postgres changes error events", %{tenant: tenant} do
- external_id = tenant.external_id
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, postgres_changes: [%{event: "*", schema: "none"}]}
- topic = "realtime:any"
-
- WebsocketClient.join(socket, topic, %{config: config})
-
- # Join events
- assert_receive %Message{event: "phx_reply", payload: %{"status" => "ok"}, topic: ^topic}, 300
- assert_receive %Message{topic: ^topic, event: "presence_state"}, 500
- assert_receive %Message{topic: ^topic, event: "system"}, 5000
-
- # Wait for RateCounter to run
- Process.sleep(2000)
-
- # Expected billed
- # 1 joins due to one socket
- # 1 presence events due to one socket
- # 0 db events
- # 0 events as no broadcast used
- assert 1 = get_count([:realtime, :rate_counter, :channel, :joins], external_id)
- assert 1 = get_count([:realtime, :rate_counter, :channel, :presence_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :db_events], external_id)
- assert 0 = get_count([:realtime, :rate_counter, :channel, :events], external_id)
- end
- end
-
- test "tracks and untracks properly channels", %{tenant: tenant} do
- assert [] = Tracker.list_pids()
-
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
-
- topics =
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
- :ok = WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{topic: ^topic, event: "phx_reply"}, 500
- topic
- end
-
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == length(topics)
-
- for topic <- topics do
- :ok = WebsocketClient.leave(socket, topic, %{})
- assert_receive %Message{topic: ^topic, event: "phx_close"}, 500
- end
-
- # wait to trigger tracker
- assert_process_down(socket, 5000)
- assert [] = Tracker.list_pids()
- end
-
- test "failed connections are present in tracker with counter counter lower than 0 so they are actioned on by tracker",
- %{tenant: tenant} do
- assert [] = Tracker.list_pids()
-
- {socket, _} = get_connection(tenant)
- config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
-
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
- :ok = WebsocketClient.join(socket, topic, %{config: config})
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
- end
-
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 0
- end
-
- test "failed connections but one succeeds properly tracks",
- %{tenant: tenant} do
- assert [] = Tracker.list_pids()
-
- {socket, _} = get_connection(tenant)
- topic = "realtime:#{random_string()}"
-
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
- })
-
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 1
-
- for _ <- 1..10 do
- topic = "realtime:#{random_string()}"
-
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
- })
-
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
- end
-
- topic = "realtime:#{random_string()}"
-
- :ok =
- WebsocketClient.join(socket, topic, %{
- config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
- })
-
- assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
- assert [{_pid, count}] = Tracker.list_pids()
- assert count == 2
- end
-
- defp mode(%{mode: :distributed}) do
- tenant = Api.get_tenant_by_external_id("dev_tenant")
-
- RateCounter.stop(tenant.external_id)
- :ets.delete_all_objects(Tracker.table_name())
-
- Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
-
- on_exit(fn ->
- Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
- end)
-
- on_exit(fn -> Connect.shutdown(tenant.external_id) end)
- {:ok, node} = Clustered.start()
- region = Tenants.region(tenant)
- {:ok, db_conn} = :erpc.call(node, Connect, :connect, ["dev_tenant", region])
- assert Connect.ready?(tenant.external_id)
-
- assert node(db_conn) == node
- %{db_conn: db_conn, node: node, tenant: tenant}
- end
-
- defp mode(_) do
- tenant = Containers.checkout_tenant(run_migrations: true)
- RateCounter.stop(tenant.external_id)
-
- :ets.delete_all_objects(Tracker.table_name())
- Realtime.Tenants.Connect.shutdown(tenant.external_id)
- # Sleeping so that syn can forget about this Connect process
- Process.sleep(100)
- {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
- assert Connect.ready?(tenant.external_id)
- %{db_conn: db_conn, tenant: tenant}
- end
-
- defp rls_context(%{tenant: tenant} = context) do
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- clean_table(db_conn, "realtime", "messages")
- topic = Map.get(context, :topic, random_string())
- policies = Map.get(context, :policies, nil)
- role = Map.get(context, :role, nil)
- sub = Map.get(context, :sub, nil)
-
- if policies, do: create_rls_policies(db_conn, policies, %{topic: topic, role: role, sub: sub})
-
- %{topic: topic, role: role, sub: sub}
- end
-
- defp setup_trigger(%{tenant: tenant, topic: topic}) do
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- random_name = String.downcase("test_#{random_string()}")
- query = "CREATE TABLE #{random_name} (id serial primary key, details text)"
- Postgrex.query!(db_conn, query, [])
-
- query = """
- CREATE OR REPLACE FUNCTION broadcast_changes_for_table_#{random_name}_trigger ()
- RETURNS TRIGGER
- AS $$
- DECLARE
- topic text;
- BEGIN
- topic = '#{topic}';
- PERFORM
- realtime.broadcast_changes (topic, TG_OP, TG_OP, TG_TABLE_NAME, TG_TABLE_SCHEMA, NEW, OLD, TG_LEVEL);
- RETURN NULL;
- END;
- $$
- LANGUAGE plpgsql;
- """
-
- Postgrex.query!(db_conn, query, [])
-
- query = """
- CREATE TRIGGER broadcast_changes_for_#{random_name}_table
- AFTER INSERT OR UPDATE OR DELETE ON #{random_name}
- FOR EACH ROW
- EXECUTE FUNCTION broadcast_changes_for_table_#{random_name}_trigger ();
- """
-
- Postgrex.query!(db_conn, query, [])
-
- on_exit(fn ->
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- query = "DROP TABLE #{random_name} CASCADE"
- Postgrex.query!(db_conn, query, [])
- end)
-
- %{table_name: random_name}
- end
-
- defp change_tenant_configuration(%Tenant{external_id: external_id}, limit, value) do
- external_id
- |> Realtime.Tenants.get_tenant_by_external_id()
- |> Realtime.Api.Tenant.changeset(%{limit => value})
- |> Realtime.Repo.update!()
-
- Realtime.Tenants.Cache.invalidate_tenant_cache(external_id)
- end
-
- defp assert_process_down(pid, timeout \\ 100) do
- ref = Process.monitor(pid)
- assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
- end
-end
diff --git a/test/integration/tests.ts b/test/integration/tests.ts
new file mode 100644
index 000000000..036255f17
--- /dev/null
+++ b/test/integration/tests.ts
@@ -0,0 +1,204 @@
+import { RealtimeClient } from "npm:@supabase/supabase-js@latest";
+import { sleep } from "https://deno.land/x/sleep/mod.ts";
+import { describe, it } from "jsr:@std/testing/bdd";
+import { assertEquals } from "jsr:@std/assert";
+import { deadline } from "jsr:@std/async/deadline";
+
+const withDeadline =
Promise>(fn: Fn, ms: number): Fn =>
+ ((...args) => deadline(fn(...args), ms)) as Fn;
+
+const url = "http://realtime-dev.localhost:4100/socket";
+const serviceRoleKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNzU3NzYzODIsInJlZiI6IjEyNy4wLjAuMSIsInJvbGUiOiJzZXJ2aWNlX3JvbGUiLCJpYXQiOjE3NjA3NzYzODJ9.nupH8pnrOTgK9Xaq8-D4Ry-yQ-PnlXEagTVywQUJVIE"
+const apiKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNzU2NjE3MjEsInJlZiI6IjEyNy4wLjAuMSIsInJvbGUiOiJhdXRoZW50aWNhdGVkIiwiaWF0IjoxNzYwNjYxNzIxfQ.PxpBoelC9vWQ2OVhmwKBUDEIKgX7MpgSdsnmXw7UdYk";
+
+const realtimeV1 = { vsn: '1.0.0', params: { apikey: apiKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+const realtimeV2 = { vsn: '2.0.0', params: { apikey: apiKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+const realtimeServiceRole = { vsn: '2.0.0', logger: console.log, params: { apikey: serviceRoleKey } , heartbeatIntervalMs: 5000, timeout: 5000 };
+
+let clientV1: RealtimeClient | null;
+let clientV2: RealtimeClient | null;
+
+describe("broadcast extension", { sanitizeOps: false, sanitizeResources: false }, () => {
+ it("users with different versions can receive self broadcast", withDeadline(async () => {
+ clientV1 = new RealtimeClient(url, realtimeV1)
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let resultV1 = null;
+ let resultV2 = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ let expectedPayload = { message: crypto.randomUUID() };
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV1 = clientV1
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV1 = payload))
+ .subscribe();
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV2 = payload))
+ .subscribe();
+
+ while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+
+ // Send from V1 client - both should receive
+ await channelV1.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload,
+ });
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload);
+ assertEquals(resultV2, expectedPayload);
+
+ // Reset results for second test
+ resultV1 = null;
+ resultV2 = null;
+ let expectedPayload2 = { message: crypto.randomUUID() };
+
+ // Send from V2 client - both should receive
+ await channelV2.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload2,
+ });
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload2);
+ assertEquals(resultV2, expectedPayload2);
+
+ await channelV1.unsubscribe();
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV1);
+ await stopClient(clientV2);
+ clientV1 = null;
+ clientV2 = null;
+ }, 5000));
+
+ it("v2 can send/receive binary payload", withDeadline(async () => {
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let result = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ const expectedPayload = new ArrayBuffer(2);
+ const uint8 = new Uint8Array(expectedPayload); // View the buffer as unsigned 8-bit integers
+ uint8[0] = 125;
+ uint8[1] = 255;
+
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (result = payload))
+ .subscribe();
+
+ while (channelV2.state != "joined") await sleep(0.2);
+
+ await channelV2.send({
+ type: "broadcast",
+ event,
+ payload: expectedPayload,
+ });
+
+ while (result == null) await sleep(0.2);
+
+ assertEquals(result, expectedPayload);
+
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV2);
+ clientV2 = null;
+ }, 5000));
+
+ it("users with different versions can receive broadcasts from endpoint", withDeadline(async () => {
+ clientV1 = new RealtimeClient(url, realtimeV1)
+ clientV2 = new RealtimeClient(url, realtimeV2)
+ let resultV1 = null;
+ let resultV2 = null;
+ let event = crypto.randomUUID();
+ let topic = "topic:" + crypto.randomUUID();
+ let expectedPayload = { message: crypto.randomUUID() };
+ const config = { config: { broadcast: { ack: true, self: true } } };
+
+ const channelV1 = clientV1
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV1 = payload))
+ .subscribe();
+
+ const channelV2 = clientV2
+ .channel(topic, config)
+ .on("broadcast", { event }, ({ payload }) => (resultV2 = payload))
+ .subscribe();
+
+ while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+
+ // Send from unsubscribed channel - both should receive
+ new RealtimeClient(url, realtimeServiceRole).channel(topic, config).httpSend(event, expectedPayload);
+
+ while (resultV1 == null || resultV2 == null) await sleep(0.2);
+
+ assertEquals(resultV1, expectedPayload);
+ assertEquals(resultV2, expectedPayload);
+
+ await channelV1.unsubscribe();
+ await channelV2.unsubscribe();
+
+ await stopClient(clientV1);
+ await stopClient(clientV2);
+ clientV1 = null;
+ clientV2 = null;
+ }, 5000));
+});
+
+// describe("presence extension", () => {
+// it("user is able to receive presence updates", async () => {
+// let result: any = [];
+// let error = null;
+// let topic = "topic:" + crypto.randomUUID();
+// let keyV1 = "key V1";
+// let keyV2 = "key V2";
+//
+// const configV1 = { config: { presence: { keyV1 } } };
+// const configV2 = { config: { presence: { keyV1 } } };
+//
+// const channelV1 = clientV1
+// .channel(topic, configV1)
+// .on("presence", { event: "join" }, ({ key, newPresences }) =>
+// result.push({ key, newPresences })
+// )
+// .subscribe();
+//
+// const channelV2 = clientV2
+// .channel(topic, configV2)
+// .on("presence", { event: "join" }, ({ key, newPresences }) =>
+// result.push({ key, newPresences })
+// )
+// .subscribe();
+//
+// while (channelV1.state != "joined" || channelV2.state != "joined") await sleep(0.2);
+//
+// const resV1 = await channelV1.track({ key: keyV1 });
+// const resV2 = await channelV2.track({ key: keyV2 });
+//
+// if (resV1 == "timed out" || resV2 == "timed out") error = resV1 || resV2;
+//
+// sleep(2.2);
+//
+// // FIXME write assertions
+// console.log(result)
+// let presences = result[0].newPresences[0];
+// assertEquals(result[0].key, keyV1);
+// assertEquals(presences.message, message);
+// assertEquals(error, null);
+// });
+// });
+
+async function stopClient(client: RealtimeClient | null) {
+ if (client) {
+ await client.removeAllChannels();
+ }
+}
diff --git a/test/integration/tracker_test.exs b/test/integration/tracker_test.exs
new file mode 100644
index 000000000..3f232d4bd
--- /dev/null
+++ b/test/integration/tracker_test.exs
@@ -0,0 +1,96 @@
+defmodule Integration.TrackerTest do
+ # Changing the Tracker ETS table
+ use RealtimeWeb.ConnCase, async: false
+
+ alias RealtimeWeb.RealtimeChannel.Tracker
+ alias Phoenix.Socket.Message
+ alias Realtime.Tenants.Connect
+ alias Realtime.Integration.WebsocketClient
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ :ets.delete_all_objects(Tracker.table_name())
+
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert Connect.ready?(tenant.external_id)
+ %{db_conn: db_conn, tenant: tenant}
+ end
+
+ test "tracks and untracks properly channels", %{tenant: tenant} do
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+
+ topics =
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{topic: ^topic, event: "phx_reply"}, 500
+ topic
+ end
+
+ for topic <- topics do
+ :ok = WebsocketClient.leave(socket, topic, %{})
+ assert_receive %Message{topic: ^topic, event: "phx_close"}, 500
+ end
+
+ start_supervised!({Tracker, check_interval_in_ms: 100})
+ # wait to trigger tracker
+ assert_process_down(socket, 1000)
+ end
+
+ test "failed connections are present in tracker with counter lower than 0 so they are actioned on by tracker", %{
+ tenant: tenant
+ } do
+ assert [] = Tracker.list_pids()
+
+ {socket, _} = get_connection(tenant)
+ config = %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+ :ok = WebsocketClient.join(socket, topic, %{config: config})
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
+ end
+
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 0
+ end
+
+ test "failed connections but one succeeds properly tracks", %{tenant: tenant} do
+ assert [] = Tracker.list_pids()
+
+ {socket, _} = get_connection(tenant)
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 1
+
+ for _ <- 1..10 do
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: true, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "error"}}, 500
+ end
+
+ topic = "realtime:#{random_string()}"
+
+ :ok =
+ WebsocketClient.join(socket, topic, %{
+ config: %{broadcast: %{self: true}, private: false, presence: %{enabled: false}}
+ })
+
+ assert_receive %Message{topic: ^topic, event: "phx_reply", payload: %{"status" => "ok"}}, 500
+ assert [{_pid, count}] = Tracker.list_pids()
+ assert count == 2
+ end
+end
diff --git a/test/realtime/adapters/postgres/protocol_test.exs b/test/realtime/adapters/postgres/protocol_test.exs
index 778a96244..3f4a17abc 100644
--- a/test/realtime/adapters/postgres/protocol_test.exs
+++ b/test/realtime/adapters/postgres/protocol_test.exs
@@ -1,6 +1,9 @@
defmodule Realtime.Adapters.Postgres.ProtocolTest do
use ExUnit.Case, async: true
+
alias Realtime.Adapters.Postgres.Protocol
+ alias Realtime.Adapters.Postgres.Protocol.Write
+ alias Realtime.Adapters.Postgres.Protocol.KeepAlive
test "defguard is_write/1" do
require Protocol
@@ -13,4 +16,70 @@ defmodule Realtime.Adapters.Postgres.ProtocolTest do
assert Protocol.is_keep_alive("k")
refute Protocol.is_keep_alive("w")
end
+
+ describe "parse/1" do
+ test "parses a write message" do
+ wal_start = 100
+ wal_end = 200
+ clock = 300
+ message = "some wal data"
+
+ binary = <>
+
+ assert %Write{
+ server_wal_start: ^wal_start,
+ server_wal_end: ^wal_end,
+ server_system_clock: ^clock,
+ message: ^message
+ } = Protocol.parse(binary)
+ end
+
+ test "parses a keep alive message with reply now" do
+ wal_end = 500
+ clock = 600
+
+ binary = <>
+
+ assert %KeepAlive{wal_end: ^wal_end, clock: ^clock, reply: :now} = Protocol.parse(binary)
+ end
+
+ test "parses a keep alive message with reply later" do
+ wal_end = 500
+ clock = 600
+
+ binary = <>
+
+ assert %KeepAlive{wal_end: ^wal_end, clock: ^clock, reply: :later} = Protocol.parse(binary)
+ end
+ end
+
+ describe "standby_status/5" do
+ test "returns binary message with reply now" do
+ [message] = Protocol.standby_status(100, 200, 300, :now, 400)
+
+ assert <> = message
+ end
+
+ test "returns binary message with reply later" do
+ [message] = Protocol.standby_status(100, 200, 300, :later, 400)
+
+ assert <> = message
+ end
+
+ test "uses current_time when clock is nil" do
+ [message] = Protocol.standby_status(100, 200, 300, :now)
+
+ assert <> = message
+ end
+ end
+
+ test "hold/0 returns empty list" do
+ assert Protocol.hold() == []
+ end
+
+ test "current_time/0 returns a positive integer" do
+ time = Protocol.current_time()
+ assert is_integer(time)
+ assert time > 0
+ end
end
diff --git a/test/realtime/api/extensions_test.exs b/test/realtime/api/extensions_test.exs
new file mode 100644
index 000000000..3cec96703
--- /dev/null
+++ b/test/realtime/api/extensions_test.exs
@@ -0,0 +1,106 @@
+defmodule Realtime.Api.ExtensionsTest do
+ use ExUnit.Case, async: true
+
+ alias Realtime.Api.Extensions
+
+ describe "changeset/2 with nil type" do
+ test "skips default settings merge" do
+ changeset = Extensions.changeset(%Extensions{}, %{"settings" => %{"foo" => "bar"}})
+ assert changeset.changes[:settings] == %{"foo" => "bar"}
+ end
+
+ test "validates required fields" do
+ changeset = Extensions.changeset(%Extensions{}, %{})
+ refute changeset.valid?
+ assert {"can't be blank", _} = changeset.errors[:type]
+ assert {"can't be blank", _} = changeset.errors[:settings]
+ end
+ end
+
+ describe "changeset/2 with type" do
+ test "merges default settings for postgres_cdc_rls" do
+ attrs = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "region" => "us-east-1",
+ "db_host" => "localhost",
+ "db_name" => "postgres",
+ "db_user" => "user",
+ "db_port" => "5432",
+ "db_password" => "pass"
+ }
+ }
+
+ changeset = Extensions.changeset(%Extensions{}, attrs)
+ settings = changeset.changes[:settings]
+
+ assert settings["publication"] == "supabase_realtime"
+ assert settings["slot_name"] == "supabase_realtime_replication_slot"
+ assert settings["region"] == "us-east-1"
+ end
+ end
+
+ describe "validate_required_settings/2" do
+ test "adds error when required field is nil" do
+ required = [{"db_host", &is_binary/1, false}]
+
+ changeset =
+ %Extensions{}
+ |> Ecto.Changeset.cast(%{type: "test", settings: %{}}, [:type, :settings])
+ |> Extensions.validate_required_settings(required)
+
+ refute changeset.valid?
+ assert {"db_host can't be blank", []} = changeset.errors[:settings]
+ end
+
+ test "adds error when checker function fails" do
+ required = [{"db_port", &is_binary/1, false}]
+
+ changeset =
+ %Extensions{}
+ |> Ecto.Changeset.cast(%{type: "test", settings: %{"db_port" => 5432}}, [:type, :settings])
+ |> Extensions.validate_required_settings(required)
+
+ refute changeset.valid?
+ assert {"db_port is invalid", []} = changeset.errors[:settings]
+ end
+
+ test "passes when all required fields are valid" do
+ required = [{"db_host", &is_binary/1, false}]
+
+ changeset =
+ %Extensions{}
+ |> Ecto.Changeset.cast(%{type: "test", settings: %{"db_host" => "localhost"}}, [:type, :settings])
+ |> Extensions.validate_required_settings(required)
+
+ assert changeset.valid?
+ end
+ end
+
+ describe "encrypt_settings/2" do
+ test "encrypts fields marked for encryption" do
+ required = [{"db_password", &is_binary/1, true}]
+
+ changeset =
+ %Extensions{}
+ |> Ecto.Changeset.cast(%{type: "test", settings: %{"db_password" => "secret"}}, [:type, :settings])
+ |> Extensions.encrypt_settings(required)
+
+ settings = Ecto.Changeset.get_change(changeset, :settings)
+ assert settings["db_password"] != "secret"
+ assert Realtime.Crypto.decrypt!(settings["db_password"]) == "secret"
+ end
+
+ test "does not modify fields not marked for encryption" do
+ required = [{"region", &is_binary/1, false}]
+
+ changeset =
+ %Extensions{}
+ |> Ecto.Changeset.cast(%{type: "test", settings: %{"region" => "us-east-1"}}, [:type, :settings])
+ |> Extensions.encrypt_settings(required)
+
+ settings = Ecto.Changeset.get_change(changeset, :settings)
+ assert settings["region"] == "us-east-1"
+ end
+ end
+end
diff --git a/test/realtime/api_test.exs b/test/realtime/api_test.exs
index 1c4a816b0..312954965 100644
--- a/test/realtime/api_test.exs
+++ b/test/realtime/api_test.exs
@@ -1,34 +1,37 @@
defmodule Realtime.ApiTest do
- use Realtime.DataCase, async: false
+ use Realtime.DataCase, async: true
use Mimic
alias Realtime.Api
- alias Realtime.Api.Extensions
+ alias Realtime.Api.Extensions, as: ApiExtensions
alias Realtime.Api.Tenant
alias Realtime.Crypto
alias Realtime.GenCounter
alias Realtime.RateCounter
alias Realtime.Tenants.Connect
+ alias Extensions.PostgresCdcRls
@db_conf Application.compile_env(:realtime, Realtime.Repo)
- setup do
- tenant1 = Containers.checkout_tenant(run_migrations: true)
- tenant2 = Containers.checkout_tenant(run_migrations: true)
- Api.update_tenant(tenant1, %{max_concurrent_users: 10_000_000})
- Api.update_tenant(tenant2, %{max_concurrent_users: 20_000_000})
-
- %{tenants: Api.list_tenants(), tenant: tenant1}
+ defp create_tenants(_) do
+ tenant1 = tenant_fixture(%{max_concurrent_users: 10_000_000})
+ tenant2 = tenant_fixture(%{max_concurrent_users: 20_000_000})
+ tenant3 = tenant_fixture(%{max_concurrent_users: 30_000_000})
+ %{tenants: [tenant1, tenant2, tenant3]}
end
describe "list_tenants/0" do
+ setup [:create_tenants]
+
test "returns all tenants", %{tenants: tenants} do
assert Enum.sort(Api.list_tenants()) == Enum.sort(tenants)
end
end
describe "list_tenants/1" do
+ setup [:create_tenants]
+
test "list_tenants/1 returns filtered tenants", %{tenants: tenants} do
assert hd(Api.list_tenants(search: hd(tenants).external_id)) == hd(tenants)
@@ -38,6 +41,8 @@ defmodule Realtime.ApiTest do
end
describe "get_tenant!/1" do
+ setup [:create_tenants]
+
test "returns the tenant with given id", %{tenants: [tenant | _]} do
result = tenant.id |> Api.get_tenant!() |> Map.delete(:extensions)
expected = tenant |> Map.delete(:extensions)
@@ -51,6 +56,10 @@ defmodule Realtime.ApiTest do
external_id = random_string()
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.external_id == external_id
+ end)
+
valid_attrs = %{
external_id: external_id,
name: external_id,
@@ -85,100 +94,130 @@ defmodule Realtime.ApiTest do
end
test "invalid data returns error changeset" do
+ reject(&Realtime.Tenants.Cache.global_cache_update/1)
assert {:error, %Ecto.Changeset{}} = Api.create_tenant(%{external_id: nil, jwt_secret: nil, name: nil})
end
end
- describe "get_tenant_by_external_id/1" do
+ describe "get_tenant_by_external_id/2" do
+ setup [:create_tenants]
+
test "fetch by external id", %{tenants: [tenant | _]} do
- %Tenant{extensions: [%Extensions{} = extension]} =
+ %Tenant{extensions: [%ApiExtensions{} = extension]} =
Api.get_tenant_by_external_id(tenant.external_id)
assert Map.has_key?(extension.settings, "db_password")
password = extension.settings["db_password"]
assert ^password = "v1QVng3N+pZd/0AEObABwg=="
end
+
+ test "fetch by external id using replica", %{tenants: [tenant | _]} do
+ %Tenant{extensions: [%ApiExtensions{} = extension]} =
+ Api.get_tenant_by_external_id(tenant.external_id, use_replica?: true)
+
+ assert Map.has_key?(extension.settings, "db_password")
+ password = extension.settings["db_password"]
+ assert ^password = "v1QVng3N+pZd/0AEObABwg=="
+ end
+
+ test "fetch by external id using no replica", %{tenants: [tenant | _]} do
+ %Tenant{extensions: [%ApiExtensions{} = extension]} =
+ Api.get_tenant_by_external_id(tenant.external_id, use_replica?: false)
+
+ assert Map.has_key?(extension.settings, "db_password")
+ password = extension.settings["db_password"]
+ assert ^password = "v1QVng3N+pZd/0AEObABwg=="
+ end
end
- describe "update_tenant/2" do
- test "valid data updates the tenant", %{tenant: tenant} do
+ describe "update_tenant_by_external_id/2" do
+ setup [:create_tenants]
+
+ test "valid data updates the tenant using external_id", %{tenants: [tenant | _]} do
update_attrs = %{
external_id: tenant.external_id,
jwt_secret: "some updated jwt_secret",
name: "some updated name"
}
- assert {:ok, %Tenant{} = tenant} = Api.update_tenant(tenant, update_attrs)
+ assert {:ok, %Tenant{} = tenant} = Api.update_tenant_by_external_id(tenant.external_id, update_attrs)
assert tenant.external_id == tenant.external_id
assert tenant.jwt_secret == Crypto.encrypt!("some updated jwt_secret")
assert tenant.name == "some updated name"
end
- test "invalid data returns error changeset", %{tenant: tenant} do
- assert {:error, %Ecto.Changeset{}} = Api.update_tenant(tenant, %{external_id: nil, jwt_secret: nil, name: nil})
+ test "invalid data returns error changeset", %{tenants: [tenant | _]} do
+ assert {:error, %Ecto.Changeset{}} =
+ Api.update_tenant_by_external_id(tenant.external_id, %{external_id: nil, jwt_secret: nil, name: nil})
end
- test "valid data and jwks change will send disconnect event", %{tenant: tenant} do
+ test "valid data and jwks change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_jwks: %{keys: ["test"]}})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_jwks: %{keys: ["test"]}})
assert_receive :disconnect, 500
end
- test "valid data and jwt_secret change will send disconnect event", %{tenant: tenant} do
+ test "valid data and jwt_secret change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_secret: "potato"})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
assert_receive :disconnect, 500
end
- test "valid data and suspend change will send disconnect event", %{tenant: tenant} do
+ test "valid data and suspend change will send disconnect event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{suspend: true})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{suspend: true})
assert_receive :disconnect, 500
end
- test "valid data but not updating jwt_secret or jwt_jwks won't send event", %{tenant: tenant} do
+ test "valid data but not updating jwt_secret or jwt_jwks won't send event", %{tenants: [tenant | _]} do
:ok = Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> tenant.external_id)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{max_events_per_second: 100})
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_events_per_second: 100})
refute_receive :disconnect, 500
end
- test "valid data and jwt_secret change will restart the database connection", %{tenant: tenant} do
- {:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
+ test "valid data and jwt_secret change will restart the database connection", %{tenants: [tenant | _]} do
+ expect(Connect, :shutdown, fn external_id ->
+ assert external_id == tenant.external_id
+ :ok
+ end)
- Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{jwt_secret: "potato"})
- assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
- refute Process.alive?(old_pid)
- Process.sleep(100)
- assert {:ok, new_pid} = Connect.lookup_or_start_connection(tenant.external_id)
- assert %Postgrex.Result{} = Postgrex.query!(new_pid, "SELECT 1", [])
+ expect(PostgresCdcRls, :handle_stop, fn external_id, timeout ->
+ assert external_id == tenant.external_id
+ assert timeout == 5_000
+ :ok
+ end)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
end
- test "valid data and suspend change will restart the database connection", %{tenant: tenant} do
- {:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
+ test "valid data and suspend change will restart the database connection", %{tenants: [tenant | _]} do
+ expect(Connect, :shutdown, fn external_id ->
+ assert external_id == tenant.external_id
+ :ok
+ end)
+
+ expect(PostgresCdcRls, :handle_stop, fn external_id, timeout ->
+ assert external_id == tenant.external_id
+ assert timeout == 5_000
+ :ok
+ end)
- Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{suspend: true})
- assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
- refute Process.alive?(old_pid)
- Process.sleep(100)
- assert {:error, :tenant_suspended} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{suspend: true})
end
- test "valid data and tenant data change will not restart the database connection", %{tenant: tenant} do
- {:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
+ test "valid data and tenant data change will not restart the database connection", %{tenants: [tenant | _]} do
+ reject(&Connect.shutdown/1)
+ reject(&PostgresCdcRls.handle_stop/2)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{max_concurrent_users: 100})
- refute_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
- assert Process.alive?(old_pid)
- assert {:ok, new_pid} = Connect.lookup_or_start_connection(tenant.external_id)
- assert old_pid == new_pid
- end
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.max_concurrent_users == 101
+ end)
- test "valid data and extensions data change will restart the database connection", %{tenant: tenant} do
- config = Realtime.Database.from_tenant(tenant, "realtime_test", :stop)
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_concurrent_users: 101})
+ end
+ test "valid data and extensions data change will restart the database connection", %{tenants: [tenant | _]} do
extensions = [
%{
"type" => "postgres_cdc_rls",
@@ -187,7 +226,7 @@ defmodule Realtime.ApiTest do
"db_name" => "postgres",
"db_user" => "supabase_admin",
"db_password" => "postgres",
- "db_port" => "#{config.port}",
+ "db_port" => "5432",
"poll_interval" => 100,
"poll_max_changes" => 100,
"poll_max_record_bytes" => 1_048_576,
@@ -198,32 +237,135 @@ defmodule Realtime.ApiTest do
}
]
- {:ok, old_pid} = Connect.lookup_or_start_connection(tenant.external_id)
- Process.monitor(old_pid)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{extensions: extensions})
- assert_receive {:DOWN, _, :process, ^old_pid, :shutdown}, 500
- refute Process.alive?(old_pid)
- Process.sleep(100)
- assert {:ok, new_pid} = Connect.lookup_or_start_connection(tenant.external_id)
- assert %Postgrex.Result{} = Postgrex.query!(new_pid, "SELECT 1", [])
+ expect(Connect, :shutdown, fn external_id ->
+ assert external_id == tenant.external_id
+ :ok
+ end)
+
+ expect(PostgresCdcRls, :handle_stop, fn external_id, timeout ->
+ assert external_id == tenant.external_id
+ assert timeout == 5_000
+ :ok
+ end)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
end
- test "valid data and change to tenant data will refresh cache", %{tenant: tenant} do
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{name: "new_name"})
- assert %Tenant{name: "new_name"} = Realtime.Tenants.Cache.get_tenant_by_external_id(tenant.external_id)
+ test "valid data and jwt_jwks change will restart the database connection", %{tenants: [tenant | _]} do
+ expect(Connect, :shutdown, fn external_id ->
+ assert external_id == tenant.external_id
+ :ok
+ end)
+
+ expect(PostgresCdcRls, :handle_stop, fn external_id, timeout ->
+ assert external_id == tenant.external_id
+ assert timeout == 5_000
+ :ok
+ end)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_jwks: %{keys: ["test"]}})
end
- test "valid data and no changes to tenant will not refresh cache", %{tenant: tenant} do
- reject(&Realtime.Tenants.Cache.get_tenant_by_external_id/1)
- assert {:ok, %Tenant{}} = Api.update_tenant(tenant, %{name: tenant.name})
+ test "valid data and jwt_secret change will restart DB connection even if handle_stop times out", %{
+ tenants: [tenant | _]
+ } do
+ expect(Connect, :shutdown, fn external_id ->
+ assert external_id == tenant.external_id
+ :ok
+ end)
+
+ expect(PostgresCdcRls, :handle_stop, fn _external_id, _timeout ->
+ # Simulate timeout exit like DynamicSupervisor.stop/3 does
+ exit(:timeout)
+ end)
+
+ # Update should still succeed even if handle_stop times out
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{jwt_secret: "potato"})
end
- end
- describe "delete_tenant/1" do
- test "deletes the tenant" do
- tenant = tenant_fixture()
- assert {:ok, %Tenant{}} = Api.delete_tenant(tenant)
- assert_raise Ecto.NoResultsError, fn -> Api.get_tenant!(tenant.id) end
+ test "valid data and change to tenant data will refresh cache", %{tenants: [tenant | _]} do
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.name == "new_name"
+ end)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{name: "new_name"})
+ end
+
+ test "valid data and no changes to tenant will not refresh cache", %{tenants: [tenant | _]} do
+ reject(&Realtime.Tenants.Cache.global_cache_update/1)
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{name: tenant.name})
+ end
+
+ test "change to max_events_per_second publishes update to respective rate counters", %{tenants: [tenant | _]} do
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.events_per_second_key(tenant.external_id)
+ end)
+
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.db_events_per_second_key(tenant.external_id)
+ end)
+
+ reject(&RateCounter.publish_update/1)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_events_per_second: 123})
+ end
+
+ test "change to max_joins_per_second publishes update to rate counters", %{tenants: [tenant | _]} do
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.joins_per_second_key(tenant.external_id)
+ end)
+
+ reject(&RateCounter.publish_update/1)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{max_joins_per_second: 123})
+ end
+
+ test "change to max_presence_events_per_second publishes update to rate counters", %{tenants: [tenant | _]} do
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.presence_events_per_second_key(tenant.external_id)
+ end)
+
+ reject(&RateCounter.publish_update/1)
+
+ assert {:ok, %Tenant{}} =
+ Api.update_tenant_by_external_id(tenant.external_id, %{max_presence_events_per_second: 123})
+ end
+
+ test "change to extensions publishes update to rate counters", %{tenants: [tenant | _]} do
+ extensions = [
+ %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "db_port" => "1234",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "publication" => "supabase_realtime_test",
+ "ssl_enforced" => false
+ }
+ }
+ ]
+
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.connect_errors_per_second_key(tenant.external_id)
+ end)
+
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.subscription_errors_per_second_key(tenant.external_id)
+ end)
+
+ expect(RateCounter, :publish_update, fn key ->
+ assert key == Realtime.Tenants.authorization_errors_per_second_key(tenant.external_id)
+ end)
+
+ reject(&RateCounter.publish_update/1)
+
+ assert {:ok, %Tenant{}} = Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
end
end
@@ -236,11 +378,9 @@ defmodule Realtime.ApiTest do
end
end
- test "list_extensions/1 ", %{tenants: tenants} do
- assert length(Api.list_extensions()) == length(tenants)
- end
-
describe "preload_counters/1" do
+ setup [:create_tenants]
+
test "preloads counters for a given tenant ", %{tenants: [tenant | _]} do
tenant = Repo.reload!(tenant)
assert Api.preload_counters(nil) == nil
@@ -256,6 +396,7 @@ defmodule Realtime.ApiTest do
end
describe "rename_settings_field/2" do
+ @tag skip: "** (Postgrex.Error) ERROR 0A000 (feature_not_supported) cached plan must not change result type"
test "renames setting fields" do
tenant = tenant_fixture()
Api.rename_settings_field("poll_interval_ms", "poll_interval")
@@ -340,4 +481,18 @@ defmodule Realtime.ApiTest do
refute TestRequiresRestartingDbConnection.check(changeset)
end
end
+
+ describe "update_migrations_ran/1" do
+ test "updates migrations_ran to the count of all migrations" do
+ tenant = tenant_fixture(%{migrations_ran: 0})
+
+ expect(Realtime.Tenants.Cache, :global_cache_update, fn tenant ->
+ assert tenant.migrations_ran == 1
+ :ok
+ end)
+
+ assert {:ok, tenant} = Api.update_migrations_ran(tenant.external_id, 1)
+ assert tenant.migrations_ran == 1
+ end
+ end
end
diff --git a/test/realtime/database_distributed_test.exs b/test/realtime/database_distributed_test.exs
new file mode 100644
index 000000000..cb952c861
--- /dev/null
+++ b/test/realtime/database_distributed_test.exs
@@ -0,0 +1,96 @@
+defmodule Realtime.DatabaseDistributedTest do
+ # async: false due to usage of Clustered
+ use Realtime.DataCase, async: false
+
+ import ExUnit.CaptureLog
+
+ alias Realtime.Database
+ alias Realtime.Rpc
+ alias Realtime.Tenants.Connect
+
+ doctest Realtime.Database
+ def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
+
+ setup do
+ tenant = Containers.checkout_tenant()
+ :telemetry.attach(__MODULE__, [:realtime, :database, :transaction], &__MODULE__.handle_telemetry/4, pid: self())
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ %{tenant: tenant}
+ end
+
+ @aux_mod (quote do
+ defmodule DatabaseAux do
+ def checker(transaction_conn) do
+ Postgrex.query!(transaction_conn, "SELECT 1", [])
+ end
+
+ def error(transaction_conn) do
+ Postgrex.query!(transaction_conn, "SELECT 1/0", [])
+ end
+
+ def exception(_) do
+ raise RuntimeError, "💣"
+ end
+ end
+ end)
+
+ Code.eval_quoted(@aux_mod)
+
+ describe "transaction/1 in clustered mode" do
+ setup do
+ tenant = Containers.checkout_tenant_unboxed(run_migrations: true)
+ %{distributed_tenant: tenant}
+ end
+
+ test "success call returns output", %{distributed_tenant: tenant} do
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, [tenant.external_id, "us-east-1"])
+ assert node(db_conn) == node
+ assert {:ok, %Postgrex.Result{rows: [[1]]}} = Database.transaction(db_conn, &DatabaseAux.checker/1)
+ end
+
+ test "handles database errors", %{distributed_tenant: tenant} do
+ metadata = [external_id: "123", project: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, [tenant.external_id, "us-east-1"])
+ assert node(db_conn) == node
+
+ assert capture_log(fn ->
+ assert {:error, %Postgrex.Error{}} = Database.transaction(db_conn, &DatabaseAux.error/1, [], metadata)
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+
+ test "handles exception", %{distributed_tenant: tenant} do
+ metadata = [external_id: "123", project: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, db_conn} = Rpc.call(node, Connect, :connect, [tenant.external_id, "us-east-1"])
+ assert node(db_conn) == node
+
+ assert capture_log(fn ->
+ assert {:error, %RuntimeError{}} = Database.transaction(db_conn, &DatabaseAux.exception/1, [], metadata)
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+
+ test "db process is not alive anymore" do
+ metadata = [external_id: "123", project: "123", tenant_id: "123"]
+ {:ok, node} = Clustered.start(@aux_mod)
+
+ pid = Rpc.call(node, :erlang, :self, [])
+ assert node(pid) == node
+
+ assert capture_log(fn ->
+ assert {:error, {:exit, {:noproc, {DBConnection.Holder, :checkout, [^pid, []]}}}} =
+ Database.transaction(pid, &DatabaseAux.checker/1, [], metadata)
+
+ # We have to wait for logs to be relayed to this node
+ Process.sleep(100)
+ end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
+ end
+ end
+end
diff --git a/test/realtime/database_test.exs b/test/realtime/database_test.exs
index f48de14b6..7d3a85a5e 100644
--- a/test/realtime/database_test.exs
+++ b/test/realtime/database_test.exs
@@ -1,12 +1,9 @@
defmodule Realtime.DatabaseTest do
- # async: false due to usage of Clustered
- use Realtime.DataCase, async: false
+ use Realtime.DataCase, async: true
import ExUnit.CaptureLog
alias Realtime.Database
- alias Realtime.Rpc
- alias Realtime.Tenants.Connect
doctest Realtime.Database
def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
@@ -42,14 +39,35 @@ defmodule Realtime.DatabaseTest do
%{tenant: tenant}
end
+ test "returns error when tenant is nil" do
+ assert {:error, :tenant_not_found} = Database.check_tenant_connection(nil)
+ end
+
test "connects to a tenant database", %{tenant: tenant} do
- assert {:ok, _} = Database.check_tenant_connection(tenant)
+ assert {:ok, _conn, migrations_ran} = Database.check_tenant_connection(tenant)
+ assert is_integer(migrations_ran)
+ assert migrations_ran >= 0
+ end
+
+ test "returns 0 migrations when realtime.schema_migrations does not exist", %{tenant: tenant} do
+ # by default new containers do not have the schema_migrations table
+ assert {:ok, _conn, 0} = Database.check_tenant_connection(tenant)
+ end
+
+ test "returns migration count when realtime.schema_migrations exists", %{tenant: tenant} do
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+
+ Postgrex.query!(conn, "CREATE TABLE IF NOT EXISTS realtime.schema_migrations (version bigint PRIMARY KEY)", [])
+ Postgrex.query!(conn, "INSERT INTO realtime.schema_migrations VALUES (1), (2), (3)", [])
+
+ assert {:ok, check_conn, 3} = Database.check_tenant_connection(tenant)
+ GenServer.stop(check_conn)
+ GenServer.stop(conn)
end
# Connection limit for docker tenant db is 100
@tag db_pool: 50,
- subs_pool_size: 21,
- subcriber_pool_size: 33
+ subs_pool_size: 73
test "restricts connection if tenant database cannot receive more connections based on tenant pool",
%{tenant: tenant} do
assert capture_log(fn ->
@@ -164,6 +182,13 @@ defmodule Realtime.DatabaseTest do
assert log =~ "project=123 external_id=123 [error] ErrorExecutingTransaction"
end
+ test "handles exit signals in transactions", %{db_conn: db_conn} do
+ assert capture_log(fn ->
+ assert {:error, {:exit, _}} =
+ Database.transaction(db_conn, fn _conn -> exit(:test_exit) end)
+ end) =~ "ErrorExecutingTransaction"
+ end
+
test "run call using RPC", %{db_conn: db_conn} do
assert {:ok, %{rows: [[1]]}} =
Realtime.Rpc.enhanced_call(
@@ -215,84 +240,6 @@ defmodule Realtime.DatabaseTest do
end
end
- @aux_mod (quote do
- defmodule DatabaseAux do
- def checker(transaction_conn) do
- Postgrex.query!(transaction_conn, "SELECT 1", [])
- end
-
- def error(transaction_conn) do
- Postgrex.query!(transaction_conn, "SELECT 1/0", [])
- end
-
- def exception(_) do
- raise RuntimeError, "💣"
- end
- end
- end)
-
- Code.eval_quoted(@aux_mod)
-
- describe "transaction/1 in clustered mode" do
- setup do
- Connect.shutdown("dev_tenant")
- # Waiting for :syn to "unregister" if the Connect process was up
- Process.sleep(100)
- :ok
- end
-
- test "success call returns output" do
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
- assert {:ok, %Postgrex.Result{rows: [[1]]}} = Database.transaction(db_conn, &DatabaseAux.checker/1)
- end
-
- test "handles database errors" do
- metadata = [external_id: "123", project: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
-
- assert capture_log(fn ->
- assert {:error, %Postgrex.Error{}} = Database.transaction(db_conn, &DatabaseAux.error/1, [], metadata)
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
-
- test "handles exception" do
- metadata = [external_id: "123", project: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- {:ok, db_conn} = Rpc.call(node, Connect, :connect, ["dev_tenant", "us-east-1"])
- assert node(db_conn) == node
-
- assert capture_log(fn ->
- assert {:error, %RuntimeError{}} = Database.transaction(db_conn, &DatabaseAux.exception/1, [], metadata)
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
-
- test "db process is not alive anymore" do
- metadata = [external_id: "123", project: "123", tenant_id: "123"]
- {:ok, node} = Clustered.start(@aux_mod)
- # Grab a remote pid that will not exist. :erpc uses a new process to perform the call.
- # Once it has returned the process is not alive anymore
-
- pid = Rpc.call(node, :erlang, :self, [])
- assert node(pid) == node
-
- assert capture_log(fn ->
- assert {:error, {:exit, {:noproc, {DBConnection.Holder, :checkout, [^pid, []]}}}} =
- Database.transaction(pid, &DatabaseAux.checker/1, [], metadata)
-
- # We have to wait for logs to be relayed to this node
- Process.sleep(100)
- end) =~ "project=123 external_id=123 [error] ErrorExecutingTransaction:"
- end
- end
-
describe "pool_size_by_application_name/2" do
test "returns the number of connections per application name" do
assert Database.pool_size_by_application_name("realtime_connect", %{}) == 1
@@ -360,7 +307,20 @@ defmodule Realtime.DatabaseTest do
end
end
+ describe "from_tenant/3" do
+ test "uses default backoff when not provided", %{tenant: tenant} do
+ settings = Database.from_tenant(tenant, "realtime_test")
+ assert settings.backoff_type == :rand_exp
+ end
+ end
+
describe "from_settings/3" do
+ test "uses default backoff when not provided", %{tenant: tenant} do
+ settings = Realtime.PostgresCdc.filter_settings("postgres_cdc_rls", tenant.extensions)
+ result = Database.from_settings(settings, "realtime_connect")
+ assert result.backoff_type == :rand_exp
+ end
+
test "returns struct with correct setup", %{tenant: tenant} do
application_name = "realtime_connect"
backoff = :stop
@@ -386,6 +346,11 @@ defmodule Realtime.DatabaseTest do
} = settings
end
+ test "defaults ssl to true when ssl_enforced is not set" do
+ assert Database.default_ssl_param(%{}) == true
+ assert Database.default_ssl_param(%{"other" => "value"}) == true
+ end
+
test "handles SSL properties", %{tenant: tenant} do
application_name = "realtime_connect"
backoff = :stop
@@ -409,6 +374,6 @@ defmodule Realtime.DatabaseTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
end
end
diff --git a/test/realtime/extensions/cdc_rls/cdc_rls_test.exs b/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
index 5f341c134..20c0795af 100644
--- a/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
+++ b/test/realtime/extensions/cdc_rls/cdc_rls_test.exs
@@ -1,7 +1,6 @@
defmodule Realtime.Extensions.CdcRlsTest do
- # async: false due to usage of dev_tenant
- # Also global mimic mock
- use RealtimeWeb.ChannelCase, async: false
+ # async: false due to global mimic mock
+ use Realtime.DataCase, async: false
use Mimic
import ExUnit.CaptureLog
@@ -9,9 +8,9 @@ defmodule Realtime.Extensions.CdcRlsTest do
setup :set_mimic_global
alias Extensions.PostgresCdcRls
+ alias Extensions.PostgresCdcRls.Subscriptions
alias PostgresCdcRls.SubscriptionManager
alias Postgrex
- alias Realtime.Api
alias Realtime.Api.Tenant
alias Realtime.Database
alias Realtime.PostgresCdc
@@ -23,77 +22,39 @@ defmodule Realtime.Extensions.CdcRlsTest do
describe "Postgres extensions" do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
-
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+ Integrations.setup_postgres_changes(conn)
+ GenServer.stop(conn)
%Tenant{extensions: extensions, external_id: external_id} = tenant
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- args = Map.put(postgres_extension, "id", external_id)
+ args = %{"id" => external_id, "region" => postgres_extension["region"]}
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
-
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
-
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
-
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params = pubsub_subscribe(external_id)
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
# First time it will return nil
PostgresCdcRls.handle_connect(args)
# Wait for it to start
- Process.sleep(3000)
+ assert_receive %{event: "ready"}, 1000
+
+ on_exit(fn -> PostgresCdcRls.handle_stop(external_id, 10_000) end)
{:ok, response} = PostgresCdcRls.handle_connect(args)
# Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
- on_exit(fn -> PostgresCdcRls.handle_stop(external_id, 10_000) end)
+ RealtimeWeb.Endpoint.unsubscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
%{tenant: tenant}
end
- @tag skip: "Flaky test. When logger handle_sasl_reports is enabled this test doesn't break"
- test "Check supervisor crash and respawn", %{tenant: tenant} do
+ test "supervisor crash must not respawn", %{tenant: tenant} do
+ scope = Realtime.Syn.PostgresCdc.scope(tenant.external_id)
+
sup =
Enum.reduce_while(1..30, nil, fn _, acc ->
- :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id)
+ scope
+ |> :syn.lookup(tenant.external_id)
|> case do
:undefined ->
Process.sleep(500)
@@ -107,27 +68,22 @@ defmodule Realtime.Extensions.CdcRlsTest do
assert Process.alive?(sup)
Process.monitor(sup)
- RealtimeWeb.Endpoint.subscribe(PostgresCdcRls.syn_topic(tenant.external_id))
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
Process.exit(sup, :kill)
- assert_receive {:DOWN, _, :process, ^sup, _reason}, 5000
-
- assert_receive %{event: "ready"}, 5000
+ scope_down = Atom.to_string(scope) <> "_down"
- {sup2, _} = :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id)
+ assert_receive {:DOWN, _, :process, ^sup, _reason}, 5000
+ assert_receive %{event: ^scope_down}
+ refute_receive %{event: "ready"}, 1000
- assert(sup != sup2)
- assert Process.alive?(sup2)
+ :undefined = :syn.lookup(Realtime.Syn.PostgresCdc.scope(tenant.external_id), tenant.external_id)
end
test "Subscription manager updates oids", %{tenant: tenant} do
{subscriber_manager_pid, conn} =
Enum.reduce_while(1..25, nil, fn _, acc ->
case PostgresCdcRls.get_manager_conn(tenant.external_id) do
- nil ->
- Process.sleep(200)
- {:cont, acc}
-
{:error, :wait} ->
Process.sleep(200)
{:cont, acc}
@@ -153,7 +109,10 @@ defmodule Realtime.Extensions.CdcRlsTest do
test "Stop tenant supervisor", %{tenant: tenant} do
sup =
Enum.reduce_while(1..10, nil, fn _, acc ->
- case :syn.lookup(Extensions.PostgresCdcRls, tenant.external_id) do
+ tenant.external_id
+ |> Realtime.Syn.PostgresCdc.scope()
+ |> :syn.lookup(tenant.external_id)
+ |> case do
:undefined ->
Process.sleep(500)
{:cont, acc}
@@ -169,16 +128,46 @@ defmodule Realtime.Extensions.CdcRlsTest do
end
end
+ describe "handle_after_connect/4" do
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ %{tenant: tenant}
+ end
+
+ test "subscription error rate limit", %{tenant: tenant} do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+
+ stub(Subscriptions, :create, fn _conn, _publication, _subscription_list, _manager, _caller ->
+ {:error, %DBConnection.ConnectionError{}}
+ end)
+
+ # Now try to subscribe to the Postgres Changes
+ for _x <- 1..6 do
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({:manager_pid, self()}, postgres_extension, %{}, external_id)
+ end
+
+ rate = Realtime.Tenants.subscription_errors_per_second_rate(external_id, 4)
+
+ assert {:ok, %RateCounter{id: {:channel, :subscription_errors, ^external_id}, sum: 6, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # It won't even be called now
+ reject(&Subscriptions.create/5)
+
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({:manager_pid, self()}, postgres_extension, %{}, external_id)
+ end
+ end
+
describe "Region rebalancing" do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
%Tenant{extensions: extensions, external_id: external_id} = tenant
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- args =
- postgres_extension
- |> Map.put("id", external_id)
- |> Map.put(:check_region_interval, 100)
+ args = %{"id" => external_id, "region" => postgres_extension["region"], check_region_interval: 100}
%{tenant_id: tenant.external_id, args: args}
end
@@ -208,98 +197,63 @@ defmodule Realtime.Extensions.CdcRlsTest do
end
describe "integration" do
- setup do
- tenant = Api.get_tenant_by_external_id("dev_tenant")
- PostgresCdcRls.handle_stop(tenant.external_id, 10_000)
-
- {:ok, conn} = Database.connect(tenant, "realtime_test")
-
- Database.transaction(conn, fn db_conn ->
- queries = [
- "drop table if exists public.test",
- "drop publication if exists supabase_realtime_test",
- "create sequence if not exists test_id_seq;",
- """
- create table if not exists "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication supabase_realtime_test for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
- end)
+ setup [:integration]
- RateCounter.stop(tenant.external_id)
-
- %{tenant: tenant, conn: conn}
- end
-
- test "subscribe inserts", %{tenant: tenant, conn: conn} do
+ test "subscribe inserts only", %{tenant: tenant, conn: conn} do
on_exit(fn -> PostgresCdcRls.handle_stop(tenant.external_id, 10_000) end)
%Tenant{extensions: extensions, external_id: external_id} = tenant
postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- args = Map.put(postgres_extension, "id", external_id)
-
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
+ args = %{"id" => external_id, "region" => postgres_extension["region"]}
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
-
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
-
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params = pubsub_subscribe(external_id, "INSERT")
# First time it will return nil
PostgresCdcRls.handle_connect(args)
# Wait for it to start
- Process.sleep(3000)
+ assert_receive %{event: "ready"}, 3000
{:ok, response} = PostgresCdcRls.handle_connect(args)
+ assert_receive {
+ :telemetry,
+ [:realtime, :rpc],
+ %{latency: _},
+ %{
+ mechanism: :gen_rpc,
+ success: true
+ }
+ }
+
# Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
- assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ Postgrex.query!(conn, "delete from realtime.subscription", [])
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
+
+ assert %Postgrex.Result{num_rows: n} = Postgrex.query!(conn, "select id from realtime.subscription", [])
+ assert n >= 1
+
+ Process.sleep(500)
# Insert a record
%{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+ # Delete the record
+ %{num_rows: 1} = Postgrex.query!(conn, "delete from test", [])
assert_receive {:socket_push, :text, data}, 5000
-
- message =
- data
- |> IO.iodata_to_binary()
- |> Jason.decode!()
+ # No DELETE should be received
+ refute_receive {:socket_push, :text, _data}, 1000
assert %{
"event" => "postgres_changes",
"payload" => %{
"data" => %{
- "columns" => [%{"name" => "id", "type" => "int4"}, %{"name" => "details", "type" => "text"}],
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
"commit_timestamp" => _,
"errors" => nil,
- "record" => %{"details" => "test", "id" => ^id},
+ "record" => %{"details" => "test", "id" => ^id, "binary_data" => nil},
"schema" => "public",
"table" => "test",
"type" => "INSERT"
@@ -308,91 +262,140 @@ defmodule Realtime.Extensions.CdcRlsTest do
},
"ref" => nil,
"topic" => "realtime:test"
- } = message
+ } = Jason.decode!(data)
- # Wait for RateCounter to update
- Process.sleep(2000)
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+
+ assert {:ok, %RateCounter{id: {:channel, :db_events, ^external_id}, bucket: bucket}} =
+ RateCounterHelper.tick!(rate)
+
+ assert Enum.sum(bucket) == 1
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: _},
+ %{tenant: ^external_id, message_type: :postgres_changes}
+ }
+ end
+
+ test "db events rate limit works", %{tenant: tenant, conn: conn} do
+ on_exit(fn -> PostgresCdcRls.handle_stop(tenant.external_id, 10_000) end)
+
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+ args = %{"id" => external_id, "region" => postgres_extension["region"]}
+
+ pg_change_params = pubsub_subscribe(external_id)
+
+ # First time it will return nil
+ PostgresCdcRls.handle_connect(args)
+ # Wait for it to start
+ assert_receive %{event: "ready"}, 1000
+ {:ok, response} = PostgresCdcRls.handle_connect(args)
+
+ # Now subscribe to the Postgres Changes
+ Postgrex.query!(conn, "delete from realtime.subscription", [])
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
+ assert %Postgrex.Result{rows: [[n]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert n >= 1
rate = Realtime.Tenants.db_events_per_second_rate(tenant)
- assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket}} = RateCounter.get(rate)
- assert 1 in bucket
+ log =
+ capture_log(fn ->
+ # increment artifically the counter to reach the limit
+ tenant.external_id
+ |> Realtime.Tenants.db_events_per_second_key()
+ |> Realtime.GenCounter.add(100_000_000)
+
+ RateCounterHelper.tick!(rate)
+ end)
+
+ assert log =~ "MessagePerSecondRateLimitReached: Too many postgres changes messages per second"
+
+ # Insert a record
+ %{rows: [[_id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ refute_receive {:socket_push, :text, _}, 5000
+
+ assert {:ok, %RateCounter{id: {:channel, :db_events, ^external_id}, bucket: bucket, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # Nothing has changed
+ assert Enum.sum(bucket) == 100_000_000
end
+ end
- @aux_mod (quote do
- defmodule Subscriber do
- # Start CDC remotely
- def subscribe(tenant) do
- %Tenant{extensions: extensions, external_id: external_id} = tenant
- postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- args = Map.put(postgres_extension, "id", external_id)
-
- # Boot it
- PostgresCdcRls.start(args)
- # Wait for it to start
- Process.sleep(3000)
- {:ok, manager, conn} = PostgresCdcRls.get_manager_conn(external_id)
- {:ok, {manager, conn}}
- end
+ @aux_mod (quote do
+ defmodule Subscriber do
+ # Start CDC remotely
+ def subscribe(tenant) do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+ args = %{"id" => external_id, "region" => postgres_extension["region"]}
+
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
+ # First time it will return nil
+ PostgresCdcRls.start(args)
+ # Wait for it to start
+ assert_receive %{event: "ready"}, 3000
+ {:ok, manager, conn} = PostgresCdcRls.get_manager_conn(external_id)
+ {:ok, {manager, conn}}
end
- end)
+ end
+ end)
+ describe "distributed integration" do
+ setup [:distributed_integration]
- test "subscribe inserts distributed mode", %{tenant: tenant, conn: conn} do
+ setup(%{tenant: tenant}) do
{:ok, node} = Clustered.start(@aux_mod)
{:ok, response} = :erpc.call(node, Subscriber, :subscribe, [tenant])
- %Tenant{extensions: extensions, external_id: external_id} = tenant
- postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
+ on_exit(fn ->
+ try do
+ PostgresCdcRls.handle_stop(tenant.external_id, 5_000)
+ catch
+ _, _ -> :ok
+ end
+ end)
- pg_change_params = [
- %{
- id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"},
- channel_pid: self(),
- claims: %{
- "exp" => System.system_time(:second) + 100_000,
- "iat" => 0,
- "ref" => "127.0.0.1",
- "role" => "anon"
- }
- }
- ]
+ %{node: node, response: response}
+ end
- ids =
- Enum.map(pg_change_params, fn %{id: id, params: params} ->
- {UUID.string_to_binary!(id), :erlang.phash2(params)}
- end)
+ test "subscribe distributed mode", %{tenant: tenant, conn: conn, node: node, response: response} do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- # Subscribe to the topic as a websocket client
- topic = "realtime:test"
- serializer = Phoenix.Socket.V1.JSONSerializer
+ pg_change_params = pubsub_subscribe(external_id)
- subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, external_id, true}
- metadata = [metadata: subscription_metadata]
- :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ Postgrex.query!(conn, "delete from realtime.subscription", [])
+ {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params, external_id)
+ assert %Postgrex.Result{rows: [[n]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert n >= 1
- # Now subscribe to the Postgres Changes
- {:ok, _} = PostgresCdcRls.handle_after_connect(response, postgres_extension, pg_change_params)
- assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ # Wait for subscription to be executing
+ Process.sleep(200)
# Insert a record
%{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+ # Delete the record
+ %{num_rows: 1} = Postgrex.query!(conn, "delete from test", [])
assert_receive {:socket_push, :text, data}, 5000
- message =
- data
- |> IO.iodata_to_binary()
- |> Jason.decode!()
-
assert %{
"event" => "postgres_changes",
"payload" => %{
"data" => %{
- "columns" => [%{"name" => "id", "type" => "int4"}, %{"name" => "details", "type" => "text"}],
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
"commit_timestamp" => _,
"errors" => nil,
- "record" => %{"details" => "test", "id" => ^id},
+ "record" => %{"details" => "test", "id" => ^id, "binary_data" => nil},
"schema" => "public",
"table" => "test",
"type" => "INSERT"
@@ -401,17 +404,141 @@ defmodule Realtime.Extensions.CdcRlsTest do
},
"ref" => nil,
"topic" => "realtime:test"
- } = message
+ } = Jason.decode!(data)
- # Wait for RateCounter to update
- Process.sleep(2000)
+ assert_receive {:socket_push, :text, data}, 5000
- rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert %{
+ "event" => "postgres_changes",
+ "payload" => %{
+ "data" => %{
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "commit_timestamp" => _,
+ "errors" => nil,
+ "type" => "DELETE",
+ "old_record" => %{"id" => ^id},
+ "schema" => "public",
+ "table" => "test"
+ },
+ "ids" => _
+ },
+ "ref" => nil,
+ "topic" => "realtime:test"
+ } = Jason.decode!(data)
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :rpc],
+ %{latency: _},
+ %{
+ mechanism: :gen_rpc,
+ origin_node: _,
+ success: true,
+ target_node: ^node
+ }
+ }
+ end
+
+ test "subscription error rate limit", %{tenant: tenant, node: node} do
+ %Tenant{extensions: extensions, external_id: external_id} = tenant
+ postgres_extension = PostgresCdc.filter_settings("postgres_cdc_rls", extensions)
- assert {:ok, %RateCounter{id: {:channel, :db_events, "dev_tenant"}, bucket: bucket}} = RateCounter.get(rate)
- assert 1 in bucket
+ pg_change_params = pubsub_subscribe(external_id)
- :erpc.call(node, PostgresCdcRls, :handle_stop, [tenant.external_id, 10_000])
+ # Grab a process that is not alive to cause subscriptions to error out
+ pid = :erpc.call(node, :erlang, :self, [])
+
+ # Now subscribe to the Postgres Changes multiple times to reach the rate limit
+ for _ <- 1..6 do
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({pid, pid}, postgres_extension, pg_change_params, external_id)
+ end
+
+ rate = Realtime.Tenants.subscription_errors_per_second_rate(external_id, 4)
+
+ assert {:ok, %RateCounter{id: {:channel, :subscription_errors, ^external_id}, sum: 6, limit: %{triggered: true}}} =
+ RateCounterHelper.tick!(rate)
+
+ # It won't even be called now
+ reject(&Realtime.GenRpc.call/5)
+
+ assert {:error, "Too many database timeouts"} =
+ PostgresCdcRls.handle_after_connect({pid, pid}, postgres_extension, pg_change_params, external_id)
end
end
+
+ defp integration(_) do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, conn} = Database.connect(tenant, "realtime_test")
+ Integrations.setup_postgres_changes(conn)
+
+ on_exit(fn -> RateCounterHelper.stop(tenant.external_id) end)
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach_many(
+ __MODULE__,
+ [[:realtime, :tenants, :payload, :size], [:realtime, :rpc]],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
+
+ %{tenant: tenant, conn: conn}
+ end
+
+ defp distributed_integration(_) do
+ tenant = Containers.checkout_tenant_unboxed(run_migrations: true)
+ {:ok, conn} = Database.connect(tenant, "realtime_test")
+ Integrations.setup_postgres_changes(conn)
+
+ on_exit(fn -> RateCounterHelper.stop(tenant.external_id) end)
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach_many(
+ __MODULE__,
+ [[:realtime, :tenants, :payload, :size], [:realtime, :rpc]],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
+ RealtimeWeb.Endpoint.subscribe(Realtime.Syn.PostgresCdc.syn_topic(tenant.external_id))
+
+ %{tenant: tenant, conn: conn}
+ end
+
+ defp pubsub_subscribe(external_id, event \\ "*") do
+ pg_change_params = [
+ %{
+ id: UUID.uuid1(),
+ params: %{"event" => event, "schema" => "public"},
+ channel_pid: self(),
+ claims: %{
+ "exp" => System.system_time(:second) + 100_000,
+ "iat" => 0,
+ "ref" => "127.0.0.1",
+ "role" => "anon"
+ }
+ }
+ ]
+
+ topic = "realtime:test"
+ serializer = Phoenix.Socket.V1.JSONSerializer
+
+ ids =
+ Enum.map(pg_change_params, fn %{id: id, params: params} ->
+ {UUID.string_to_binary!(id), :erlang.phash2(params)}
+ end)
+
+ subscription_metadata = {:subscriber_fastlane, self(), serializer, ids, topic, true}
+ metadata = [metadata: subscription_metadata]
+ :ok = PostgresCdc.subscribe(PostgresCdcRls, pg_change_params, external_id, metadata)
+ pg_change_params
+ end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
end
diff --git a/test/realtime/extensions/cdc_rls/replication_poller_test.exs b/test/realtime/extensions/cdc_rls/replication_poller_test.exs
index 97d69af62..0fba63a66 100644
--- a/test/realtime/extensions/cdc_rls/replication_poller_test.exs
+++ b/test/realtime/extensions/cdc_rls/replication_poller_test.exs
@@ -1,8 +1,12 @@
-defmodule ReplicationPollerTest do
- use ExUnit.Case, async: false
+defmodule Realtime.Extensions.PostgresCdcRls.ReplicationPollerTest do
+ # Tweaking application env
+ use Realtime.DataCase, async: false
+ use Mimic
+
+ alias Extensions.PostgresCdcRls.MessageDispatcher
alias Extensions.PostgresCdcRls.ReplicationPoller, as: Poller
- import Poller, only: [generate_record: 1]
+ alias Extensions.PostgresCdcRls.Replications
alias Realtime.Adapters.Changes.{
DeletedRecord,
@@ -10,6 +14,284 @@ defmodule ReplicationPollerTest do
UpdatedRecord
}
+ alias Realtime.RateCounter
+
+ alias RealtimeWeb.TenantBroadcaster
+
+ import Poller, only: [generate_record: 1]
+
+ setup :set_mimic_global
+
+ @change_json ~s({"table":"test","type":"INSERT","record":{"id": 34, "details": "test"},"columns":[{"name": "id", "type": "int4"}, {"name": "details", "type": "text"}],"errors":null,"schema":"public","commit_timestamp":"2025-10-13T07:50:28.066Z"})
+
+ describe "poll" do
+ setup do
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :replication, :poller, :query, :stop],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ tenant = Containers.checkout_tenant(run_migrations: true)
+
+ {:ok, tenant} = Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{"max_events_per_second" => 123})
+
+ subscribers_pids_table = :ets.new(__MODULE__, [:public, :bag])
+ subscribers_nodes_table = :ets.new(__MODULE__, [:public, :set])
+
+ args =
+ hd(tenant.extensions).settings
+ |> Map.put("id", tenant.external_id)
+ |> Map.put("subscribers_pids_table", subscribers_pids_table)
+ |> Map.put("subscribers_nodes_table", subscribers_nodes_table)
+
+ # unless specified it will return empty results
+ empty_results = {:ok, %Postgrex.Result{rows: [], num_rows: 0}}
+ stub(Replications, :list_changes, fn _, _, _, _, _ -> empty_results end)
+
+ %{args: args, tenant: tenant}
+ end
+
+ test "handles no new changes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ start_link_supervised!({Poller, args})
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+
+ assert {:ok,
+ %RateCounter{
+ sum: sum,
+ limit: %{
+ value: 123,
+ measurement: :avg,
+ triggered: false
+ }
+ }} = RateCounterHelper.tick!(rate)
+
+ assert sum == 0
+ end
+
+ test "handles new changes with missing ets table", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ :ets.delete(args["subscribers_nodes_table"])
+
+ results =
+ build_result([
+ <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with no subscription nodes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with missing subscription nodes", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ sub1 = <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ # Only one subscription has node information
+ :ets.insert(args["subscribers_nodes_table"], {sub1, node()})
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_direct_broadcast/6)
+
+ # Broadcast to the whole cluster due to missing node information
+ expect(TenantBroadcaster, :pubsub_broadcast, fn ^tenant_id,
+ "realtime:postgres:" <> ^tenant_id,
+ {"INSERT", change_json, _sub_ids},
+ MessageDispatcher,
+ :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 2
+ end
+
+ test "handles new changes with subscription nodes information", %{args: args, tenant: tenant} do
+ tenant_id = args["id"]
+
+ results =
+ build_result([
+ sub1 = <<71, 36, 83, 212, 168, 9, 17, 240, 165, 186, 118, 202, 193, 157, 232, 187>>,
+ sub2 = <<251, 188, 190, 118, 168, 119, 17, 240, 188, 87, 118, 202, 193, 157, 232, 187>>,
+ sub3 = <<49, 59, 209, 112, 173, 77, 17, 240, 191, 41, 118, 202, 193, 157, 232, 187>>
+ ])
+
+ # All subscriptions have node information
+ :ets.insert(args["subscribers_nodes_table"], {sub1, node()})
+ :ets.insert(args["subscribers_nodes_table"], {sub2, :"someothernode@127.0.0.1"})
+ :ets.insert(args["subscribers_nodes_table"], {sub3, node()})
+
+ expect(Replications, :list_changes, fn _, _, _, _, _ -> results end)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ topic = "realtime:postgres:" <> tenant_id
+
+ # # Broadcast to the exact nodes only
+ expect(TenantBroadcaster, :pubsub_direct_broadcast, 2, fn
+ _node, ^tenant_id, ^topic, {"INSERT", change_json, _sub_ids}, MessageDispatcher, :postgres_changes ->
+ assert Jason.decode!(change_json) == Jason.decode!(@change_json)
+ :ok
+ end)
+
+ start_link_supervised!({Poller, args})
+
+ # First poll with changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ # Second poll without changes
+ assert_receive {
+ :telemetry,
+ [:realtime, :replication, :poller, :query, :stop],
+ %{duration: _},
+ %{tenant: ^tenant_id}
+ },
+ 500
+
+ calls = calls(TenantBroadcaster, :pubsub_direct_broadcast, 6)
+
+ assert Enum.count(calls) == 2
+
+ node_subs = Enum.map(calls, fn [node, _, _, {"INSERT", _change_json, sub_ids}, _, _] -> {node, sub_ids} end)
+
+ assert {node(), MapSet.new([sub1, sub3])} in node_subs
+ assert {:"someothernode@127.0.0.1", MapSet.new([sub2])} in node_subs
+
+ rate = Realtime.Tenants.db_events_per_second_rate(tenant)
+ assert {:ok, %RateCounter{sum: sum}} = RateCounterHelper.tick!(rate)
+ assert sum == 3
+ end
+ end
+
@columns [
%{"name" => "id", "type" => "int8"},
%{"name" => "details", "type" => "text"},
@@ -19,272 +301,277 @@ defmodule ReplicationPollerTest do
@ts "2021-11-05T17:20:51.52406+00:00"
@subscription_id "417e76fd-9bc5-4b3e-bd5d-a031389c4a6b"
+ @subscription_ids MapSet.new(["417e76fd-9bc5-4b3e-bd5d-a031389c4a6b"])
+
+ @old_record %{"id" => 12}
+ @record %{"details" => "test", "id" => 12, "user_id" => 1}
describe "generate_record/1" do
test "INSERT" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %NewRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"id" => 12},
- "record" => %{"details" => "test1", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %UpdatedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"id" => 12},
- record: %{"details" => "test1", "id" => 12, "user_id" => 1},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"id" => 15},
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", []}
]
- expected = %DeletedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"id" => 15},
- errors: nil
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: nil
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "INSERT, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %NewRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "INSERT, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "INSERT"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "INSERT"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", nil},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %NewRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "INSERT",
- subscription_ids: MapSet.new([@subscription_id]),
- record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %NewRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "INSERT",
+ subscription_ids: @subscription_ids,
+ record: record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"details" => "prev test", "id" => 12, "user_id" => 1},
- "record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %UpdatedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"details" => "prev test", "id" => 12, "user_id" => 1},
- record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "UPDATE, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "UPDATE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "UPDATE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", Jason.encode!(@record)},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %UpdatedRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "UPDATE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{},
- record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %UpdatedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "UPDATE",
+ subscription_ids: @subscription_ids,
+ record: record,
+ old_record: old_record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert record |> Jason.encode!() |> Jason.decode!() == @record
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE, large payload error present" do
- record = [
- {"wal",
- %{
- "columns" => @columns,
- "commit_timestamp" => @ts,
- "old_record" => %{"details" => "test", "id" => 12, "user_id" => 1},
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error 413: Payload Too Large"]}
]
- expected = %DeletedRecord{
- columns: @columns,
- commit_timestamp: @ts,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{"details" => "test", "id" => 12, "user_id" => 1},
- errors: ["Error 413: Payload Too Large"]
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: ["Error 413: Payload Too Large"]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
test "DELETE, other errors present" do
- record = [
- {"wal",
- %{
- "schema" => "public",
- "table" => "todos",
- "type" => "DELETE"
- }},
- {"is_rls_enabled", false},
+ wal_record = [
+ {"type", "DELETE"},
+ {"schema", "public"},
+ {"table", "todos"},
+ {"columns", Jason.encode!(@columns)},
+ {"record", nil},
+ {"old_record", Jason.encode!(@old_record)},
+ {"commit_timestamp", @ts},
{"subscription_ids", [@subscription_id]},
{"errors", ["Error..."]}
]
- expected = %DeletedRecord{
- columns: [],
- commit_timestamp: nil,
- schema: "public",
- table: "todos",
- type: "DELETE",
- subscription_ids: MapSet.new([@subscription_id]),
- old_record: %{},
- errors: ["Error..."]
- }
-
- assert expected == generate_record(record)
+ assert %DeletedRecord{
+ columns: columns,
+ commit_timestamp: @ts,
+ schema: "public",
+ table: "todos",
+ type: "DELETE",
+ subscription_ids: @subscription_ids,
+ old_record: old_record,
+ errors: ["Error..."]
+ } = generate_record(wal_record)
+
+ # Encode then decode to get rid of the fragment
+ assert old_record |> Jason.encode!() |> Jason.decode!() == @old_record
+ assert columns |> Jason.encode!() |> Jason.decode!() == @columns
end
end
@@ -305,4 +592,40 @@ defmodule ReplicationPollerTest do
assert Poller.slot_name_suffix() == ""
end
end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
+
+ defp build_result(subscription_ids) do
+ {:ok,
+ %Postgrex.Result{
+ command: :select,
+ columns: [
+ "type",
+ "schema",
+ "table",
+ "columns",
+ "record",
+ "old_record",
+ "commit_timestamp",
+ "subscription_ids",
+ "errors"
+ ],
+ rows: [
+ [
+ "INSERT",
+ "public",
+ "test",
+ "[{\"name\": \"id\", \"type\": \"int4\"}, {\"name\": \"details\", \"type\": \"text\"}]",
+ "{\"id\": 34, \"details\": \"test\"}",
+ nil,
+ "2025-10-13T07:50:28.066Z",
+ subscription_ids,
+ []
+ ]
+ ],
+ num_rows: 1,
+ connection_id: 123,
+ messages: []
+ }}
+ end
end
diff --git a/test/realtime/extensions/cdc_rls/subscription_manager_test.exs b/test/realtime/extensions/cdc_rls/subscription_manager_test.exs
new file mode 100644
index 000000000..b7fe94937
--- /dev/null
+++ b/test/realtime/extensions/cdc_rls/subscription_manager_test.exs
@@ -0,0 +1,166 @@
+defmodule Realtime.Extensions.CdcRls.SubscriptionManagerTest do
+ use Realtime.DataCase, async: true
+
+ alias Extensions.PostgresCdcRls
+ alias Extensions.PostgresCdcRls.SubscriptionManager
+ alias Extensions.PostgresCdcRls.Subscriptions
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, db_conn} = Realtime.Database.connect(tenant, "realtime_test", :stop)
+ Integrations.setup_postgres_changes(db_conn)
+ GenServer.stop(db_conn)
+ Realtime.Tenants.Cache.update_cache(tenant)
+
+ subscribers_pids_table = :ets.new(__MODULE__, [:public, :bag])
+ subscribers_nodes_table = :ets.new(__MODULE__, [:public, :set])
+
+ args = %{
+ "id" => tenant.external_id,
+ "subscribers_nodes_table" => subscribers_nodes_table,
+ "subscribers_pids_table" => subscribers_pids_table
+ }
+
+ publication = "supabase_realtime_test"
+
+ # register this process with syn as if this was the WorkersSupervisor
+
+ scope = Realtime.Syn.PostgresCdc.scope(tenant.external_id)
+ :syn.register(scope, tenant.external_id, self(), %{region: "us-east-1", manager: nil, subs_pool: nil})
+
+ {:ok, pid} = SubscriptionManager.start_link(args)
+ # This serves so that we know that handle_continue has finished
+ :sys.get_state(pid)
+ %{args: args, pid: pid, publication: publication}
+ end
+
+ describe "subscription" do
+ test "subscription", %{pid: pid, args: args, publication: publication} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ {uuid, bin_uuid, pg_change_params} = pg_change_params()
+
+ subscriber = self()
+
+ assert {:ok, [%Postgrex.Result{command: :insert, columns: ["id"], rows: [[1]], num_rows: 1}]} =
+ Subscriptions.create(conn, publication, [pg_change_params], pid, subscriber)
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ node = node()
+
+ assert [{^subscriber, ^uuid, _ref, ^node}] = :ets.tab2list(args["subscribers_pids_table"])
+
+ assert :ets.tab2list(args["subscribers_nodes_table"]) == [{bin_uuid, node}]
+ end
+
+ test "subscriber died", %{pid: pid, args: args, publication: publication} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ self = self()
+
+ subscriber =
+ spawn(fn ->
+ receive do
+ :stop -> :ok
+ end
+ end)
+
+ {uuid1, bin_uuid1, pg_change_params1} = pg_change_params()
+ {uuid2, bin_uuid2, pg_change_params2} = pg_change_params()
+ {uuid3, bin_uuid3, pg_change_params3} = pg_change_params()
+
+ assert {:ok, _} =
+ Subscriptions.create(conn, publication, [pg_change_params1, pg_change_params2], pid, subscriber)
+
+ assert {:ok, _} = Subscriptions.create(conn, publication, [pg_change_params3], pid, self())
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ node = node()
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 3
+
+ assert [{^subscriber, ^uuid1, _, ^node}, {^subscriber, ^uuid2, _, ^node}] =
+ :ets.lookup(args["subscribers_pids_table"], subscriber)
+
+ assert [{^self, ^uuid3, _ref, ^node}] = :ets.lookup(args["subscribers_pids_table"], self)
+
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 3
+ assert [{^bin_uuid1, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid1)
+ assert [{^bin_uuid2, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid2)
+ assert [{^bin_uuid3, ^node}] = :ets.lookup(args["subscribers_nodes_table"], bin_uuid3)
+
+ send(subscriber, :stop)
+ # Wait for subscription manager to receive the :DOWN message
+ Process.sleep(200)
+
+ # Only the subscription we have not stopped should remain
+
+ assert [{^self, ^uuid3, _ref, ^node}] = :ets.tab2list(args["subscribers_pids_table"])
+ assert [{^bin_uuid3, ^node}] = :ets.tab2list(args["subscribers_nodes_table"])
+ end
+ end
+
+ describe "subscription deletion" do
+ test "subscription is deleted when process goes away", %{pid: pid, args: args, publication: publication} do
+ {:ok, ^pid, conn} = PostgresCdcRls.get_manager_conn(args["id"])
+ {_uuid, _bin_uuid, pg_change_params} = pg_change_params()
+
+ subscriber =
+ spawn(fn ->
+ receive do
+ :stop -> :ok
+ end
+ end)
+
+ assert {:ok, [%Postgrex.Result{command: :insert, columns: ["id"], rows: [[1]], num_rows: 1}]} =
+ Subscriptions.create(conn, publication, [pg_change_params], pid, subscriber)
+
+ # Wait for subscription manager to process the :subscribed message
+ :sys.get_state(pid)
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 1
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 1
+
+ assert %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+
+ send(subscriber, :stop)
+ # Wait for subscription manager to receive the :DOWN message
+ Process.sleep(200)
+
+ assert :ets.info(args["subscribers_pids_table"], :size) == 0
+ assert :ets.info(args["subscribers_nodes_table"], :size) == 0
+
+ # Force check delete queue on manager
+ send(pid, :check_delete_queue)
+ Process.sleep(200)
+ end
+ end
+
+ describe "check no users" do
+ test "exit is sent to manager", %{pid: pid} do
+ :sys.replace_state(pid, fn state -> %{state | no_users_ts: 0} end)
+
+ send(pid, :check_no_users)
+
+ assert_receive {:system, {^pid, _}, {:terminate, :shutdown}}
+ end
+ end
+
+ defp pg_change_params do
+ uuid = UUID.uuid1()
+
+ pg_change_params = %{
+ id: uuid,
+ subscription_params: {"*", "public", "*", []},
+ claims: %{
+ "exp" => System.system_time(:second) + 100_000,
+ "iat" => 0,
+ "role" => "anon"
+ }
+ }
+
+ {uuid, UUID.string_to_binary!(uuid), pg_change_params}
+ end
+end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs
new file mode 100644
index 000000000..3b459e6c1
--- /dev/null
+++ b/test/realtime/extensions/cdc_rls/subscriptions_checker_distributed_test.exs
@@ -0,0 +1,66 @@
+defmodule Realtime.Extensions.CdcRls.SubscriptionsCheckerDistributedTest do
+ # Usage of Clustered
+ use ExUnit.Case, async: false
+ import ExUnit.CaptureLog
+
+ alias Extensions.PostgresCdcRls.SubscriptionsChecker, as: Checker
+
+ setup do
+ {:ok, peer, remote_node} = Clustered.start_disconnected()
+ true = Node.connect(remote_node)
+ {:ok, peer: peer, remote_node: remote_node}
+ end
+
+ describe "not_alive_pids_dist/1" do
+ test "returns empty list for all alive PIDs", %{remote_node: remote_node} do
+ assert Checker.not_alive_pids_dist(%{}) == []
+
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ assert Checker.not_alive_pids_dist(%{node() => MapSet.new([pid1, pid2, pid3]), remote_node => MapSet.new([pid4])}) ==
+ []
+ end
+
+ test "returns list of dead PIDs", %{remote_node: remote_node} do
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+ pid5 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ Process.exit(pid2, :kill)
+ Process.exit(pid5, :kill)
+
+ assert Checker.not_alive_pids_dist(%{
+ node() => MapSet.new([pid1, pid2, pid3]),
+ remote_node => MapSet.new([pid4, pid5])
+ }) == [pid2, pid5]
+ end
+
+ test "handles rpc error", %{remote_node: remote_node, peer: peer} do
+ pid1 = spawn(fn -> Process.sleep(5000) end)
+ pid2 = spawn(fn -> Process.sleep(5000) end)
+ pid3 = spawn(fn -> Process.sleep(5000) end)
+ pid4 = Node.spawn(remote_node, Process, :sleep, [5000])
+ pid5 = Node.spawn(remote_node, Process, :sleep, [5000])
+
+ Process.exit(pid2, :kill)
+
+ # Stop the other node
+ :peer.stop(peer)
+
+ log =
+ capture_log(fn ->
+ assert Checker.not_alive_pids_dist(%{
+ node() => MapSet.new([pid1, pid2, pid3]),
+ remote_node => MapSet.new([pid4, pid5])
+ }) == [pid2]
+ end)
+
+ assert log =~ "UnableToCheckProcessesOnRemoteNode"
+ end
+ end
+end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
index bfbb4bd7a..db39678ac 100644
--- a/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
+++ b/test/realtime/extensions/cdc_rls/subscriptions_checker_test.exs
@@ -1,9 +1,10 @@
-defmodule SubscriptionsCheckerTest do
+defmodule Realtime.Extensions.PostgresCdcRl.SubscriptionsCheckerTest do
use ExUnit.Case, async: true
alias Extensions.PostgresCdcRls.SubscriptionsChecker, as: Checker
+ import UUID, only: [uuid1: 0, string_to_binary!: 1]
test "subscribers_by_node/1" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
test_data = [
{:pid1, "id1", :ref, :node1},
@@ -11,9 +12,9 @@ defmodule SubscriptionsCheckerTest do
{:pid2, "id2", :ref, :node2}
]
- :ets.insert(tid, test_data)
+ :ets.insert(subscribers_pids_table, test_data)
- assert Checker.subscribers_by_node(tid) == %{
+ assert Checker.subscribers_by_node(subscribers_pids_table) == %{
node1: MapSet.new([:pid1]),
node2: MapSet.new([:pid2])
}
@@ -40,41 +41,66 @@ defmodule SubscriptionsCheckerTest do
end
end
- describe "pop_not_alive_pids/2" do
+ describe "pop_not_alive_pids/4" do
test "one subscription per channel" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
+ subscribers_nodes_table = :ets.new(:table, [:public, :set])
- uuid1 = UUID.uuid1()
- uuid2 = UUID.uuid1()
+ uuid1 = uuid1()
+ uuid2 = uuid1()
+ uuid3 = uuid1()
- test_data = [
+ pids_test_data = [
{:pid1, uuid1, :ref, :node1},
{:pid1, uuid2, :ref, :node1},
- {:pid2, "uuid", :ref, :node2}
+ {:pid2, uuid3, :ref, :node2}
]
- :ets.insert(tid, test_data)
+ :ets.insert(subscribers_pids_table, pids_test_data)
+
+ nodes_test_data = [
+ {string_to_binary!(uuid1), :node1},
+ {string_to_binary!(uuid2), :node1},
+ {string_to_binary!(uuid3), :node2}
+ ]
- not_alive = Enum.sort(Checker.pop_not_alive_pids([:pid1], tid, "id"))
- expected = Enum.sort([UUID.string_to_binary!(uuid1), UUID.string_to_binary!(uuid2)])
+ :ets.insert(subscribers_nodes_table, nodes_test_data)
+
+ not_alive = Enum.sort(Checker.pop_not_alive_pids([:pid1], subscribers_pids_table, subscribers_nodes_table, "id"))
+ expected = Enum.sort([string_to_binary!(uuid1), string_to_binary!(uuid2)])
assert not_alive == expected
- assert :ets.tab2list(tid) == [{:pid2, "uuid", :ref, :node2}]
+ assert :ets.tab2list(subscribers_pids_table) == [{:pid2, uuid3, :ref, :node2}]
+ assert :ets.tab2list(subscribers_nodes_table) == [{string_to_binary!(uuid3), :node2}]
end
test "two subscriptions per channel" do
- tid = :ets.new(:table, [:public, :bag])
+ subscribers_pids_table = :ets.new(:table, [:public, :bag])
+ subscribers_nodes_table = :ets.new(:table, [:public, :set])
- uuid1 = UUID.uuid1()
+ uuid1 = uuid1()
+ uuid2 = uuid1()
test_data = [
{:pid1, uuid1, :ref, :node1},
- {:pid2, "uuid", :ref, :node2}
+ {:pid2, uuid2, :ref, :node2}
]
- :ets.insert(tid, test_data)
- assert Checker.pop_not_alive_pids([:pid1], tid, "id") == [UUID.string_to_binary!(uuid1)]
- assert :ets.tab2list(tid) == [{:pid2, "uuid", :ref, :node2}]
+ :ets.insert(subscribers_pids_table, test_data)
+
+ nodes_test_data = [
+ {string_to_binary!(uuid1), :node1},
+ {string_to_binary!(uuid2), :node2}
+ ]
+
+ :ets.insert(subscribers_nodes_table, nodes_test_data)
+
+ assert Checker.pop_not_alive_pids([:pid1], subscribers_pids_table, subscribers_nodes_table, "id") == [
+ string_to_binary!(uuid1)
+ ]
+
+ assert :ets.tab2list(subscribers_pids_table) == [{:pid2, uuid2, :ref, :node2}]
+ assert :ets.tab2list(subscribers_nodes_table) == [{string_to_binary!(uuid2), :node2}]
end
end
end
diff --git a/test/realtime/extensions/cdc_rls/subscriptions_test.exs b/test/realtime/extensions/cdc_rls/subscriptions_test.exs
index cb53b72ed..ffa7454eb 100644
--- a/test/realtime/extensions/cdc_rls/subscriptions_test.exs
+++ b/test/realtime/extensions/cdc_rls/subscriptions_test.exs
@@ -1,13 +1,13 @@
-defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
+defmodule Realtime.Extensions.PostgresCdcRls.SubscriptionsTest do
use RealtimeWeb.ChannelCase, async: true
- doctest Extensions.PostgresCdcRls.Subscriptions
+
+ doctest Extensions.PostgresCdcRls.Subscriptions, import: true
alias Extensions.PostgresCdcRls.Subscriptions
alias Realtime.Database
- alias Realtime.Tenants
setup do
- tenant = Tenants.get_tenant_by_external_id("dev_tenant")
+ tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, conn} =
tenant
@@ -16,106 +16,225 @@ defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
|> Keyword.new()
|> Postgrex.start_link()
+ Integrations.setup_postgres_changes(conn)
+ Subscriptions.delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+
%{conn: conn}
end
- test "create", %{conn: conn} do
- Subscriptions.delete_all(conn)
+ describe "create/5" do
+ test "create all tables & all events", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"event" => "*", "schema" => "public"})
+ params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, [%Postgrex.Result{}]} =
+ Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"event" => "*", "schema" => "public"}}]
+ %Postgrex.Result{rows: [[[], "*"]]} =
+ Postgrex.query!(conn, "select filters, action_filter from realtime.subscription", [])
+ end
- assert {:ok, [%Postgrex.Result{}]} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ test "create all tables & all events on INSERT", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"event" => "INSERT", "schema" => "public"})
+ params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- Process.sleep(500)
+ assert {:ok, [%Postgrex.Result{}]} =
+ Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"schema" => "public", "table" => "test"}}]
+ %Postgrex.Result{rows: [[[], "INSERT"]]} =
+ Postgrex.query!(conn, "select filters, action_filter from realtime.subscription", [])
+ end
- assert {:ok, [%Postgrex.Result{}]} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ test "create specific table all events", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{}}]
+ assert {:ok, [%Postgrex.Result{}]} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: %{}"} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ %Postgrex.Result{rows: [[1]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- Process.sleep(500)
+ test "publication does not exist", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"user_token" => "potato"}}]
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: "} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ Postgrex.query!(conn, "drop publication if exists supabase_realtime_test", [])
- Process.sleep(500)
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: *, schema: public, table: test, filters: []]"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- params_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), params: %{"auth_token" => "potato"}}]
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- assert {:error,
- "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: "} =
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
+ test "table does not exist", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "doesnotexist"})
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- %Postgrex.Result{rows: [[num]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
- assert num != 0
- end
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: *, schema: public, table: doesnotexist, filters: []]"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- test "delete_all", %{conn: conn} do
- create_subscriptions(conn, 10)
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_all(conn)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
- end
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
- test "delete", %{conn: conn} do
- Subscriptions.delete_all(conn)
- id = UUID.uuid1()
- bin_id = UUID.string_to_binary!(id)
+ test "column does not exist", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "subject=eq.hey"
+ })
- params_list = [%{id: id, claims: %{"role" => "anon"}, params: %{"event" => "*"}}]
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete(conn, bin_id)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [event: *, schema: public, table: test, filters: [{\"subject\", \"eq\", \"hey\"}]]. Exception: ERROR P0001 (raise_exception) invalid column for filter subject"}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+
+ test "column type is wrong", %{conn: conn} do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.hey"
+ })
+
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+
+ assert {:error,
+ {:subscription_insert_failed,
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [event: *, schema: public, table: test, filters: [{\"id\", \"eq\", \"hey\"}]]. Exception: ERROR 22P02 (invalid_text_representation) invalid input syntax for type integer: \"hey\""}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+
+ %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+
+ test "connection error" do
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
+
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+ conn = spawn(fn -> :ok end)
+
+ assert {:error, {:exit, _}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+ end
+
+ test "timeout", %{conn: conn} do
+ {:ok, subscription_params} = Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "test"})
+
+ Task.start(fn -> Postgrex.query!(conn, "SELECT pg_sleep(20)", []) end)
+
+ subscription_list = [%{claims: %{"role" => "anon"}, id: UUID.uuid1(), subscription_params: subscription_params}]
+
+ assert {:error, %DBConnection.ConnectionError{reason: :queue_timeout}} =
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+ end
+
+ test "invalid table" do
+ {:error,
+ "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: %{\"schema\" => \"public\", \"table\" => %{\"actually a\" => \"map\"}}"} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => %{"actually a" => "map"}})
+ end
+
+ test "invalid schema" do
+ {:error,
+ "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: %{\"schema\" => %{\"actually a\" => \"map\"}, \"table\" => \"images\"}"} =
+ Subscriptions.parse_subscription_params(%{"table" => "images", "schema" => %{"actually a" => "map"}})
+ end
+
+ test "invalid filter" do
+ {:error,
+ "No subscription params provided. Please provide at least a `schema` or `table` to subscribe to: %{\"filter\" => ~c\"{\", \"schema\" => \"public\", \"table\" => \"images\"}"} =
+ Subscriptions.parse_subscription_params(%{"schema" => "public", "table" => "images", "filter" => [123]})
+ end
end
- test "delete_multi", %{conn: conn} do
- Subscriptions.delete_all(conn)
- id1 = UUID.uuid1()
- id2 = UUID.uuid1()
+ describe "delete_all/1" do
+ test "delete_all", %{conn: conn} do
+ create_subscriptions(conn, 10)
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+ end
- bin_id2 = UUID.string_to_binary!(id2)
- bin_id1 = UUID.string_to_binary!(id1)
+ describe "delete/2" do
+ test "delete", %{conn: conn} do
+ id = UUID.uuid1()
+ bin_id = UUID.string_to_binary!(id)
- params_list = [
- %{claims: %{"role" => "anon"}, id: id1, params: %{"event" => "*"}},
- %{claims: %{"role" => "anon"}, id: id2, params: %{"event" => "*"}}
- ]
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.hey"
+ })
- Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
+ subscription_list = [%{claims: %{"role" => "anon"}, id: id, subscription_params: subscription_params}]
+ Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
- assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_multi(conn, [bin_id1, bin_id2])
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete(conn, bin_id)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
end
- test "maybe_delete_all", %{conn: conn} do
- Subscriptions.delete_all(conn)
- create_subscriptions(conn, 10)
+ describe "delete_multi/2" do
+ test "delete_multi", %{conn: conn} do
+ Subscriptions.delete_all(conn)
+ id1 = UUID.uuid1()
+ id2 = UUID.uuid1()
+
+ bin_id2 = UUID.string_to_binary!(id2)
+ bin_id1 = UUID.string_to_binary!(id1)
+
+ {:ok, subscription_params} =
+ Subscriptions.parse_subscription_params(%{
+ "schema" => "public",
+ "table" => "test",
+ "filter" => "id=eq.123"
+ })
+
+ subscription_list = [
+ %{claims: %{"role" => "anon"}, id: id1, subscription_params: subscription_params},
+ %{claims: %{"role" => "anon"}, id: id2, subscription_params: subscription_params}
+ ]
+
+ assert {:ok, _} = Subscriptions.create(conn, "supabase_realtime_test", subscription_list, self(), self())
+
+ assert %Postgrex.Result{rows: [[2]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.delete_multi(conn, [bin_id1, bin_id2])
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
+ end
- assert {:ok, %Postgrex.Result{}} = Subscriptions.maybe_delete_all(conn)
- assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ describe "maybe_delete_all/1" do
+ test "maybe_delete_all", %{conn: conn} do
+ Subscriptions.delete_all(conn)
+ create_subscriptions(conn, 10)
+
+ assert {:ok, %Postgrex.Result{}} = Subscriptions.maybe_delete_all(conn)
+ assert %Postgrex.Result{rows: [[0]]} = Postgrex.query!(conn, "select count(*) from realtime.subscription", [])
+ end
end
- test "fetch_publication_tables", %{conn: conn} do
- tables = Subscriptions.fetch_publication_tables(conn, "supabase_realtime_test")
- assert tables[{"*"}] != nil
+ describe "fetch_publication_tables/2" do
+ test "fetch_publication_tables", %{conn: conn} do
+ tables = Subscriptions.fetch_publication_tables(conn, "supabase_realtime_test")
+ assert tables[{"*"}] != nil
+ end
end
defp create_subscriptions(conn, num) do
@@ -131,13 +250,12 @@ defmodule Realtime.Extensionsubscriptions.CdcRlsSubscriptionsTest do
"role" => "anon"
},
id: UUID.uuid1(),
- params: %{"event" => "*", "schema" => "public"}
+ subscription_params: {"*", "public", "*", []}
}
| acc
]
end)
Subscriptions.create(conn, "supabase_realtime_test", params_list, self(), self())
- Process.sleep(500)
end
end
diff --git a/test/realtime/gen_rpc_pub_sub/worker_test.exs b/test/realtime/gen_rpc_pub_sub/worker_test.exs
new file mode 100644
index 000000000..880fa5132
--- /dev/null
+++ b/test/realtime/gen_rpc_pub_sub/worker_test.exs
@@ -0,0 +1,71 @@
+defmodule Realtime.GenRpcPubSub.WorkerTest do
+ use ExUnit.Case, async: true
+ alias Realtime.GenRpcPubSub.Worker
+ alias Realtime.GenRpc
+ alias Realtime.Nodes
+
+ use Mimic
+
+ @topic "test_topic"
+
+ setup do
+ worker = start_link_supervised!({Worker, {Realtime.PubSub, __MODULE__}})
+ %{worker: worker}
+ end
+
+ describe "forward to local" do
+ test "local broadcast", %{worker: worker} do
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_local(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ refute_receive _any
+ end
+ end
+
+ describe "forward to region" do
+ setup %{worker: worker} do
+ GenRpc
+ |> stub()
+ |> allow(self(), worker)
+
+ Nodes
+ |> stub()
+ |> allow(self(), worker)
+
+ :ok
+ end
+
+ test "local broadcast + forward to other nodes", %{worker: worker} do
+ parent = self()
+ expect(Nodes, :region_nodes, fn "us-east-1" -> [node(), :node_us_2, :node_us_3] end)
+
+ expect(GenRpc, :abcast, fn [:node_us_2, :node_us_3],
+ Realtime.GenRpcPubSub.WorkerTest,
+ {:ftl, "test_topic", "le message", Phoenix.PubSub},
+ [] ->
+ send(parent, :abcast_called)
+ :ok
+ end)
+
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_region(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ assert_receive :abcast_called
+ refute_receive _any
+ end
+
+ test "local broadcast and no other nodes", %{worker: worker} do
+ expect(Nodes, :region_nodes, fn "us-east-1" -> [node()] end)
+
+ reject(GenRpc, :abcast, 4)
+
+ :ok = Phoenix.PubSub.subscribe(Realtime.PubSub, @topic)
+ send(worker, Worker.forward_to_region(@topic, "le message", Phoenix.PubSub))
+
+ assert_receive "le message"
+ refute_receive _any
+ end
+ end
+end
diff --git a/test/realtime/gen_rpc_pub_sub_test.exs b/test/realtime/gen_rpc_pub_sub_test.exs
new file mode 100644
index 000000000..05c363163
--- /dev/null
+++ b/test/realtime/gen_rpc_pub_sub_test.exs
@@ -0,0 +1,128 @@
+Application.put_env(:phoenix_pubsub, :test_adapter, {Realtime.GenRpcPubSub, []})
+Code.require_file("../../deps/phoenix_pubsub/test/shared/pubsub_test.exs", __DIR__)
+
+defmodule Realtime.GenRpcPubSubTest do
+ # Application env being changed
+ use ExUnit.Case, async: false
+
+ test "it sets off_heap message_queue_data flag on the workers" do
+ assert Realtime.PubSubElixir.Realtime.PubSub.Adapter_1
+ |> Process.whereis()
+ |> Process.info(:message_queue_data) == {:message_queue_data, :off_heap}
+ end
+
+ test "it sets fullsweep_after flag on the workers" do
+ assert Realtime.PubSubElixir.Realtime.PubSub.Adapter_1
+ |> Process.whereis()
+ |> Process.info(:fullsweep_after) == {:fullsweep_after, 20}
+ end
+
+ @aux_mod (quote do
+ defmodule Subscriber do
+ # Relay messages to testing node
+ def subscribe(subscriber, topic) do
+ spawn(fn ->
+ RealtimeWeb.Endpoint.subscribe(topic)
+ 2 = length(Realtime.Nodes.region_nodes("us-east-1"))
+ 2 = length(Realtime.Nodes.region_nodes("ap-southeast-2"))
+ send(subscriber, {:ready, Application.get_env(:realtime, :region)})
+
+ loop = fn f ->
+ receive do
+ msg -> send(subscriber, {:relay, node(), msg})
+ end
+
+ f.(f)
+ end
+
+ loop.(loop)
+ end)
+ end
+ end
+ end)
+
+ Code.eval_quoted(@aux_mod)
+
+ @topic "gen-rpc-pub-sub-test-topic"
+
+ for regional_broadcasting <- [true, false] do
+ describe "regional balancing = #{regional_broadcasting}" do
+ setup do
+ previous_region = Application.get_env(:realtime, :region)
+ Application.put_env(:realtime, :region, "us-east-1")
+ on_exit(fn -> Application.put_env(:realtime, :region, previous_region) end)
+
+ previous_regional_broadcast = Application.get_env(:realtime, :regional_broadcasting)
+ Application.put_env(:realtime, :regional_broadcasting, unquote(regional_broadcasting))
+ on_exit(fn -> Application.put_env(:realtime, :regional_broadcasting, previous_regional_broadcast) end)
+
+ :ok
+ end
+
+ @describetag regional_broadcasting: regional_broadcasting
+
+ test "all messages are received" do
+ # start 1 node in us-east-1 to test my region broadcasting
+ # start 2 nodes in ap-southeast-2 to test other region broadcasting
+
+ us_node = :us_node
+ ap2_nodeX = :ap2_nodeX
+ ap2_nodeY = :ap2_nodeY
+
+ # Avoid port collision
+ gen_rpc_port = Application.fetch_env!(:gen_rpc, :tcp_server_port)
+
+ client_config_per_node = %{
+ node() => gen_rpc_port,
+ :"#{us_node}@127.0.0.1" => 16970,
+ :"#{ap2_nodeX}@127.0.0.1" => 16971,
+ :"#{ap2_nodeY}@127.0.0.1" => 16972
+ }
+
+ extra_config = [{:gen_rpc, :client_config_per_node, {:internal, client_config_per_node}}]
+
+ on_exit(fn -> Application.put_env(:gen_rpc, :client_config_per_node, {:internal, %{}}) end)
+ Application.put_env(:gen_rpc, :client_config_per_node, {:internal, client_config_per_node})
+
+ us_extra_config =
+ [{:realtime, :region, "us-east-1"}, {:gen_rpc, :tcp_server_port, 16970}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: us_node, extra_config: us_extra_config, phoenix_port: 4014)
+
+ ap2_nodeX_extra_config =
+ [{:realtime, :region, "ap-southeast-2"}, {:gen_rpc, :tcp_server_port, 16971}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: ap2_nodeX, extra_config: ap2_nodeX_extra_config, phoenix_port: 4015)
+
+ ap2_nodeY_extra_config =
+ [{:realtime, :region, "ap-southeast-2"}, {:gen_rpc, :tcp_server_port, 16972}] ++ extra_config
+
+ {:ok, _} = Clustered.start(@aux_mod, name: ap2_nodeY, extra_config: ap2_nodeY_extra_config, phoenix_port: 4016)
+
+ # Ensuring that syn had enough time to propagate to all nodes the group information
+ Process.sleep(3000)
+
+ RealtimeWeb.Endpoint.subscribe(@topic)
+ :erpc.multicall(Node.list(), Subscriber, :subscribe, [self(), @topic])
+
+ assert length(Realtime.Nodes.region_nodes("us-east-1")) == 2
+ assert length(Realtime.Nodes.region_nodes("ap-southeast-2")) == 2
+
+ assert_receive {:ready, "us-east-1"}
+ assert_receive {:ready, "ap-southeast-2"}
+ assert_receive {:ready, "ap-southeast-2"}
+
+ message = %Phoenix.Socket.Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
+ Phoenix.PubSub.broadcast(Realtime.PubSub, @topic, message)
+
+ assert_receive ^message
+
+ # Remote nodes received the broadcast
+ assert_receive {:relay, :"us_node@127.0.0.1", ^message}, 5000
+ assert_receive {:relay, :"ap2_nodeX@127.0.0.1", ^message}, 1000
+ assert_receive {:relay, :"ap2_nodeY@127.0.0.1", ^message}, 1000
+ refute_receive _any
+ end
+ end
+ end
+end
diff --git a/test/realtime/gen_rpc_test.exs b/test/realtime/gen_rpc_test.exs
index dd837aaf8..d26f29649 100644
--- a/test/realtime/gen_rpc_test.exs
+++ b/test/realtime/gen_rpc_test.exs
@@ -28,7 +28,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -43,7 +42,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -57,7 +55,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -72,7 +69,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -87,14 +83,13 @@ defmodule Realtime.GenRpcTest do
end)
assert log =~
- "project=123 external_id=123 [error] ErrorOnRpcCall: %{error: :timeout, mod: Process, func: :sleep, target: :\"main@127.0.0.1\"}"
+ "project=123 external_id=123 [error] ErrorOnRpcCall: %{error: :timeout, mod: Process, func: :sleep, target: :\"#{current_node}\"}"
assert_receive {[:realtime, :rpc], %{latency: _},
%{
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -116,7 +111,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -131,7 +125,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -146,7 +139,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -168,10 +160,101 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
+
+ test "bad node" do
+ node = :"unknown@1.1.1.1"
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.call(node, Map, :fetch, [%{a: 1}, :a], tenant_id: 123) == {:error, :rpc_error, :badnode}
+ end)
+
+ assert log =~
+ ~r/project=123 external_id=123 \[error\] ErrorOnRpcCall: %{+error: :badnode, mod: Map, func: :fetch, target: :"#{node}"/
+ end
+ end
+
+ describe "abcast/4" do
+ test "abcast to registered process", %{node: node} do
+ name =
+ System.unique_integer()
+ |> to_string()
+ |> String.to_atom()
+
+ :erlang.register(name, self())
+
+ # Use erpc to make the other node abcast to this one
+ :erpc.call(node, GenRpc, :abcast, [[node()], name, "a message", []])
+
+ assert_receive "a message"
+ refute_receive _any
+ end
+
+ @tag extra_config: [{:gen_rpc, :tcp_server_port, 9999}]
+ test "tcp error" do
+ Logger.put_process_level(self(), :debug)
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.abcast(Node.list(), :some_process_name, "a message", []) == :ok
+ # We have to wait for gen_rpc logs to show up
+ Process.sleep(100)
+ end)
+
+ assert log =~ "[error] event=connect_to_remote_server"
+
+ refute_receive _any
+ end
+ end
+
+ describe "cast/5" do
+ test "apply on a local node" do
+ parent = self()
+
+ assert GenRpc.cast(node(), Kernel, :send, [parent, :sent]) == :ok
+
+ assert_receive :sent
+ refute_receive _any
+ end
+
+ test "apply on a remote node", %{node: node} do
+ parent = self()
+
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+
+ assert_receive :sent
+ refute_receive _any
+ end
+
+ test "bad node does nothing" do
+ node = :"unknown@1.1.1.1"
+
+ parent = self()
+
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+
+ refute_receive _any
+ end
+
+ @tag extra_config: [{:gen_rpc, :tcp_server_port, 9999}]
+ test "tcp error", %{node: node} do
+ parent = self()
+ Logger.put_process_level(self(), :debug)
+
+ log =
+ capture_log(fn ->
+ assert GenRpc.cast(node, Kernel, :send, [parent, :sent]) == :ok
+ # We have to wait for gen_rpc logs to show up
+ Process.sleep(100)
+ end)
+
+ assert log =~ "[error] event=connect_to_remote_server"
+
+ refute_receive _any
+ end
end
describe "multicast/4" do
@@ -214,7 +297,7 @@ defmodule Realtime.GenRpcTest do
current_node = node()
assert GenRpc.multicall(Map, :fetch, [%{a: 1}, :a], tenant_id: "123") == [
- {:"main@127.0.0.1", {:ok, 1}},
+ {current_node, {:ok, 1}},
{node, {:ok, 1}}
]
@@ -223,7 +306,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
@@ -232,7 +314,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: "123",
mechanism: :gen_rpc
}}
end
@@ -243,13 +324,13 @@ defmodule Realtime.GenRpcTest do
log =
capture_log(fn ->
assert GenRpc.multicall(Process, :sleep, [500], timeout: 100, tenant_id: 123) == [
- {:"main@127.0.0.1", {:error, :rpc_error, :timeout}},
+ {current_node, {:error, :rpc_error, :timeout}},
{node, {:error, :rpc_error, :timeout}}
]
end)
assert log =~
- "project=123 external_id=123 [error] ErrorOnRpcCall: %{error: :timeout, mod: Process, func: :sleep, target: :\"main@127.0.0.1\"}"
+ "project=123 external_id=123 [error] ErrorOnRpcCall: %{error: :timeout, mod: Process, func: :sleep, target: :\"#{current_node}\"}"
assert log =~
~r/project=123 external_id=123 \[error\] ErrorOnRpcCall: %{\s+error: :timeout,\s+mod: Process,\s+func: :sleep,\s+target:\s+:"#{node}"/
@@ -259,7 +340,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
@@ -268,7 +348,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
end
@@ -280,7 +359,7 @@ defmodule Realtime.GenRpcTest do
log =
capture_log(fn ->
assert GenRpc.multicall(Map, :fetch, [%{a: 1}, :a], tenant_id: 123) == [
- {:"main@127.0.0.1", {:ok, 1}},
+ {node(), {:ok, 1}},
{node, {:error, :rpc_error, :econnrefused}}
]
end)
@@ -293,7 +372,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^node,
success: false,
- tenant: 123,
mechanism: :gen_rpc
}}
@@ -302,7 +380,6 @@ defmodule Realtime.GenRpcTest do
origin_node: ^current_node,
target_node: ^current_node,
success: true,
- tenant: 123,
mechanism: :gen_rpc
}}
end
diff --git a/test/realtime/logs_test.exs b/test/realtime/logs_test.exs
index feee48ac6..3882a6750 100644
--- a/test/realtime/logs_test.exs
+++ b/test/realtime/logs_test.exs
@@ -1,6 +1,52 @@
defmodule Realtime.LogsTest do
use ExUnit.Case
+ import ExUnit.CaptureLog
+
+ alias Realtime.Logs
+
+ describe "to_log/1" do
+ test "returns binary as-is" do
+ assert Logs.to_log("hello") == "hello"
+ end
+
+ test "inspects non-binary values" do
+ assert Logs.to_log(%{key: "value"}) == inspect(%{key: "value"}, pretty: true)
+ assert Logs.to_log(123) == "123"
+ assert Logs.to_log([:a, :b]) == inspect([:a, :b], pretty: true)
+ end
+ end
+
+ describe "log_error/2" do
+ test "logs error with code and message" do
+ defmodule LogErrorTest do
+ use Realtime.Logs
+
+ def do_log do
+ log_error("TestCode", "something broke")
+ end
+ end
+
+ log = capture_log(fn -> LogErrorTest.do_log() end)
+ assert log =~ "TestCode: something broke"
+ end
+ end
+
+ describe "log_warning/2" do
+ test "logs warning with code and message" do
+ defmodule LogWarningTest do
+ use Realtime.Logs
+
+ def do_log do
+ log_warning("WarnCode", "something suspicious")
+ end
+ end
+
+ log = capture_log(fn -> LogWarningTest.do_log() end)
+ assert log =~ "WarnCode: something suspicious"
+ end
+ end
+
describe "Jason.Encoder implementation" do
test "encodes DBConnection.ConnectionError" do
error = %DBConnection.ConnectionError{
@@ -31,5 +77,15 @@ defmodule Realtime.LogsTest do
assert encoded =~ "table: \"users\""
assert encoded =~ "code: \"42P01\""
end
+
+ test "encodes Tuple with error logging" do
+ log =
+ capture_log(fn ->
+ encoded = Jason.encode!({:error, "test"})
+ assert encoded =~ "error: \"unable to parse response\""
+ end)
+
+ assert log =~ "UnableToEncodeJson"
+ end
end
end
diff --git a/test/realtime/messages_test.exs b/test/realtime/messages_test.exs
index 3bef9a5e0..5590adca9 100644
--- a/test/realtime/messages_test.exs
+++ b/test/realtime/messages_test.exs
@@ -1,10 +1,11 @@
defmodule Realtime.MessagesTest do
- use Realtime.DataCase, async: true
+ # usage of Clustered
+ use Realtime.DataCase, async: false
alias Realtime.Api.Message
alias Realtime.Database
alias Realtime.Messages
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
@@ -13,35 +14,248 @@ defmodule Realtime.MessagesTest do
date_start = Date.utc_today() |> Date.add(-10)
date_end = Date.utc_today()
create_messages_partitions(conn, date_start, date_end)
+
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :tenants, :replay],
+ &__MODULE__.handle_telemetry/4,
+ pid: self()
+ )
+
%{conn: conn, tenant: tenant, date_start: date_start, date_end: date_end}
end
- test "delete_old_messages/1 deletes messages older than 72 hours", %{
- conn: conn,
- tenant: tenant,
- date_start: date_start,
- date_end: date_end
- } do
- utc_now = NaiveDateTime.utc_now()
- limit = NaiveDateTime.add(utc_now, -72, :hour)
-
- messages =
- for date <- Date.range(date_start, date_end) do
- inserted_at = date |> NaiveDateTime.new!(Time.new!(0, 0, 0))
- message_fixture(tenant, %{inserted_at: inserted_at})
+ describe "replay/5" do
+ test "invalid replay params", %{tenant: tenant} do
+ assert Messages.replay(self(), tenant.external_id, "a topic", "not a number", 123) ==
+ {:error, :invalid_replay_params}
+
+ assert Messages.replay(self(), tenant.external_id, "a topic", 123, "not a number") ==
+ {:error, :invalid_replay_params}
+
+ assert Messages.replay(self(), tenant.external_id, "a topic", 253_402_300_800_000, 10) ==
+ {:error, :invalid_replay_params}
+ end
+
+ test "empty replay", %{conn: conn} do
+ assert Messages.replay(conn, "tenant_id", "test", 0, 10) == {:ok, [], MapSet.new()}
+ end
+
+ test "replay respects limit", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+
+ m1 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, external_id, "test", 0, 1) == {:ok, [m1], MapSet.new([m1.id])}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :replay],
+ %{latency: _},
+ %{tenant: ^external_id}
+ }
+ end
+
+ test "replay private topic only", %{conn: conn, tenant: tenant} do
+ privatem =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "private" => false,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, tenant.external_id, "test", 0, 10) == {:ok, [privatem], MapSet.new([privatem.id])}
+ end
+
+ test "replay extension=broadcast", %{conn: conn, tenant: tenant} do
+ privatem =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "new",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "new"}
+ })
+
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "old",
+ "extension" => "presence",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ assert Messages.replay(conn, tenant.external_id, "test", 0, 10) == {:ok, [privatem], MapSet.new([privatem.id])}
+ end
+
+ test "replay respects since", %{conn: conn, tenant: tenant} do
+ m1 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "first",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "first"}
+ })
+
+ m2 =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "second",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "second"}
+ })
+
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-10, :minute),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "old"}
+ })
+
+ since = DateTime.utc_now() |> DateTime.add(-3, :minute) |> DateTime.to_unix(:millisecond)
+
+ assert Messages.replay(conn, tenant.external_id, "test", since, 10) == {:ok, [m1, m2], MapSet.new([m1.id, m2.id])}
+ end
+
+ test "replay respects hard max limit of 25", %{conn: conn, tenant: tenant} do
+ for _i <- 1..30 do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
end
- assert length(messages) == 11
+ assert {:ok, messages, set} = Messages.replay(conn, tenant.external_id, "test", 0, 30)
+ assert length(messages) == 25
+ assert MapSet.size(set) == 25
+ end
+
+ test "replay respects hard min limit of 1", %{conn: conn, tenant: tenant} do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
+
+ assert {:ok, messages, set} = Messages.replay(conn, tenant.external_id, "test", 0, 0)
+ assert length(messages) == 1
+ assert MapSet.size(set) == 1
+ end
+
+ test "distributed replay", %{conn: conn, tenant: tenant} do
+ m =
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
+
+ {:ok, node} = Clustered.start()
+
+ # Call remote node passing the database connection that is local to this node
+ assert :erpc.call(node, Messages, :replay, [conn, tenant.external_id, "test", 0, 30]) ==
+ {:ok, [m], MapSet.new([m.id])}
+ end
- to_keep =
- Enum.reject(
- messages,
- &(NaiveDateTime.compare(limit, &1.inserted_at) == :gt)
- )
+ test "distributed replay error", %{tenant: tenant} do
+ message_fixture(tenant, %{
+ "inserted_at" => NaiveDateTime.utc_now(),
+ "event" => "event",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "private" => true,
+ "payload" => %{"value" => "message"}
+ })
- assert :ok = Messages.delete_old_messages(conn)
- {:ok, current} = Repo.all(conn, from(m in Message), Message)
+ {:ok, node} = Clustered.start()
- assert Enum.sort(current) == Enum.sort(to_keep)
+ # Call remote node passing the database connection that is local to this node
+ pid = spawn(fn -> :ok end)
+
+ assert :erpc.call(node, Messages, :replay, [pid, tenant.external_id, "test", 0, 30]) ==
+ {:error, :failed_to_replay_messages}
+ end
end
+
+ describe "delete_old_messages/1" do
+ test "delete_old_messages/1 deletes messages older than 72 hours", %{
+ conn: conn,
+ tenant: tenant,
+ date_start: date_start,
+ date_end: date_end
+ } do
+ utc_now = NaiveDateTime.utc_now()
+ limit = NaiveDateTime.add(utc_now, -72, :hour)
+
+ messages =
+ for date <- Date.range(date_start, date_end) do
+ inserted_at = date |> NaiveDateTime.new!(Time.new!(0, 0, 0))
+ message_fixture(tenant, %{inserted_at: inserted_at})
+ end
+
+ assert length(messages) == 11
+
+ to_keep =
+ Enum.reject(
+ messages,
+ &(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt)
+ )
+
+ assert :ok = Messages.delete_old_messages(conn)
+ {:ok, current} = Repo.all(conn, from(m in Message), Message)
+
+ assert Enum.sort(current) == Enum.sort(to_keep)
+ end
+ end
+
+ def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
end
diff --git a/test/realtime/metrics_cleaner_test.exs b/test/realtime/metrics_cleaner_test.exs
index fbe9d8515..a20e1107e 100644
--- a/test/realtime/metrics_cleaner_test.exs
+++ b/test/realtime/metrics_cleaner_test.exs
@@ -1,45 +1,260 @@
defmodule Realtime.MetricsCleanerTest do
- # async: false due to potentially polluting metrics with other tenant metrics from other tests
- use Realtime.DataCase, async: false
+ use Realtime.DataCase, async: true
alias Realtime.MetricsCleaner
alias Realtime.Tenants.Connect
- setup do
- interval = Application.get_env(:realtime, :metrics_cleaner_schedule_timer_in_ms)
- Application.put_env(:realtime, :metrics_cleaner_schedule_timer_in_ms, 100)
- tenant = Containers.checkout_tenant(run_migrations: true)
+ describe "metrics cleanup - vacant websockets" do
+ test "cleans up metrics for users that have been disconnected" do
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 1, connected_cluster: 10, limit: 100},
+ %{tenant: "occupied-tenant"}
+ )
+
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 0, connected_cluster: 20, limit: 100},
+ %{tenant: "vacant-tenant1"}
+ )
+
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 0, connected_cluster: 20, limit: 100},
+ %{tenant: "vacant-tenant2"}
+ )
+
+ pid1 = spawn_link(fn -> Process.sleep(:infinity) end)
+ pid2 = spawn_link(fn -> Process.sleep(:infinity) end)
+ pid3 = spawn_link(fn -> Process.sleep(:infinity) end)
+
+ Beacon.join(:users, "occupied-tenant", pid1)
+ Beacon.join(:users, "vacant-tenant1", pid2)
+ Beacon.join(:users, "vacant-tenant2", pid3)
+
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, "tenant=\"occupied-tenant\"")
+ assert String.contains?(metrics, "tenant=\"vacant-tenant1\"")
+ assert String.contains?(metrics, "tenant=\"vacant-tenant2\"")
+
+ start_supervised!(
+ {MetricsCleaner, [metrics_cleaner_schedule_timer_in_ms: 100, vacant_metric_threshold_in_seconds: 1]}
+ )
+
+ # Now let's disconnect vacant tenants
+ Beacon.leave(:users, "vacant-tenant1", pid2)
+ Beacon.leave(:users, "vacant-tenant2", pid3)
+
+ # Wait for clean up to run
+ Process.sleep(200)
+
+ # Nothing changes yet (threshold not reached)
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, "tenant=\"occupied-tenant\"")
+ assert String.contains?(metrics, "tenant=\"vacant-tenant1\"")
+ assert String.contains?(metrics, "tenant=\"vacant-tenant2\"")
+
+ # Wait for threshold to pass and cleanup to run
+ Process.sleep(2200)
+
+ # vacant tenant metrics are now gone
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
- on_exit(fn ->
- Application.put_env(:realtime, :metrics_cleaner_schedule_timer_in_ms, interval)
- end)
+ assert String.contains?(metrics, "tenant=\"occupied-tenant\"")
+ refute String.contains?(metrics, "tenant=\"vacant-tenant1\"")
+ refute String.contains?(metrics, "tenant=\"vacant-tenant2\"")
+ end
+
+ test "does not clean up metrics if websockets reconnect before threshold" do
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 1, connected_cluster: 10, limit: 100},
+ %{tenant: "reconnect-tenant"}
+ )
+
+ pid = spawn_link(fn -> Process.sleep(:infinity) end)
+
+ Beacon.join(:users, "reconnect-tenant", pid)
+
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+ assert String.contains?(metrics, "tenant=\"reconnect-tenant\"")
+
+ start_supervised!(
+ {MetricsCleaner, [metrics_cleaner_schedule_timer_in_ms: 100, vacant_metric_threshold_in_seconds: 1]}
+ )
+
+ # Disconnect
+ Beacon.leave(:users, "reconnect-tenant", pid)
+ Process.sleep(500)
+
+ # Reconnect before threshold
+ pid2 = spawn_link(fn -> Process.sleep(:infinity) end)
+ Beacon.join(:users, "reconnect-tenant", pid2)
- %{tenant: tenant}
+ # Wait for cleanup to run
+ Process.sleep(2200)
+
+ # Metrics should still be present
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+ assert String.contains?(metrics, "tenant=\"reconnect-tenant\"")
+ end
end
- describe "metrics cleanup" do
- test "cleans up metrics for users that have been disconnected", %{tenant: %{external_id: external_id}} do
- start_supervised!(MetricsCleaner)
- {:ok, _} = Connect.lookup_or_start_connection(external_id)
- # Wait for promex to collect the metrics
- Process.sleep(6000)
+ describe "metrics cleanup - disconnected tenants" do
+ test "cleans up metrics for tenants that have been unregistered" do
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 1, connected_cluster: 10, limit: 100},
+ %{tenant: "connected-tenant"}
+ )
+
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 0, connected_cluster: 20, limit: 100},
+ %{tenant: "disconnected-tenant1"}
+ )
- Realtime.Telemetry.execute(
+ :telemetry.execute(
[:realtime, :connections],
- %{connected: 10, connected_cluster: 10, limit: 100},
- %{tenant: external_id}
+ %{connected: 0, connected_cluster: 20, limit: 100},
+ %{tenant: "disconnected-tenant2"}
)
- assert Realtime.PromEx.Metrics
- |> :ets.select([{{{:_, %{tenant: :"$1"}}, :_}, [], [:"$1"]}])
- |> Enum.any?(&(&1 == external_id))
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, "tenant=\"connected-tenant\"")
+ assert String.contains?(metrics, "tenant=\"disconnected-tenant1\"")
+ assert String.contains?(metrics, "tenant=\"disconnected-tenant2\"")
+
+ start_supervised!(
+ {MetricsCleaner, [metrics_cleaner_schedule_timer_in_ms: 100, vacant_metric_threshold_in_seconds: 1]}
+ )
- Connect.shutdown(external_id)
+ # Simulate tenant registration (connected)
+ :telemetry.execute([:syn, Connect, :registered], %{}, %{name: "connected-tenant"})
+
+ # Simulate tenant unregistration (disconnected)
+ :telemetry.execute([:syn, Connect, :unregistered], %{}, %{name: "disconnected-tenant1"})
+ :telemetry.execute([:syn, Connect, :unregistered], %{}, %{name: "disconnected-tenant2"})
+
+ # Wait for clean up to run
Process.sleep(200)
- refute Realtime.PromEx.Metrics
- |> :ets.select([{{{:_, %{tenant: :"$1"}}, :_}, [], [:"$1"]}])
- |> Enum.any?(&(&1 == external_id))
+ # Nothing changes yet (threshold not reached)
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, "tenant=\"connected-tenant\"")
+ assert String.contains?(metrics, "tenant=\"disconnected-tenant1\"")
+ assert String.contains?(metrics, "tenant=\"disconnected-tenant2\"")
+
+ # Wait for threshold to pass and cleanup to run
+ Process.sleep(2200)
+
+ # disconnected tenant metrics are now gone
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+
+ assert String.contains?(metrics, "tenant=\"connected-tenant\"")
+ refute String.contains?(metrics, "tenant=\"disconnected-tenant1\"")
+ refute String.contains?(metrics, "tenant=\"disconnected-tenant2\"")
+ end
+
+ test "does not clean up metrics if tenant reconnects before threshold" do
+ :telemetry.execute(
+ [:realtime, :connections],
+ %{connected: 1, connected_cluster: 10, limit: 100},
+ %{tenant: "reconnect-tenant"}
+ )
+
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+ assert String.contains?(metrics, "tenant=\"reconnect-tenant\"")
+
+ start_supervised!(
+ {MetricsCleaner, [metrics_cleaner_schedule_timer_in_ms: 100, vacant_metric_threshold_in_seconds: 1]}
+ )
+
+ # Simulate tenant unregistration
+ :telemetry.execute([:syn, Connect, :unregistered], %{}, %{name: "reconnect-tenant"})
+ Process.sleep(500)
+
+ # Re-register before threshold
+ :telemetry.execute([:syn, Connect, :registered], %{}, %{name: "reconnect-tenant"})
+
+ # Wait for cleanup to run
+ Process.sleep(2200)
+
+ # Metrics should still be present
+ metrics = Realtime.PromEx.get_metrics() |> IO.iodata_to_binary()
+ assert String.contains?(metrics, "tenant=\"reconnect-tenant\"")
+ end
+ end
+
+ describe "handle_info/2 unexpected message" do
+ test "logs error for unexpected messages" do
+ import ExUnit.CaptureLog
+
+ pid =
+ start_supervised!(
+ {MetricsCleaner, [metrics_cleaner_schedule_timer_in_ms: 60_000, vacant_metric_threshold_in_seconds: 600]}
+ )
+
+ log =
+ capture_log(fn ->
+ send(pid, :something_unexpected)
+ Process.sleep(100)
+ end)
+
+ assert log =~ "Unexpected message"
+ assert log =~ "something_unexpected"
+ end
+ end
+
+ describe "handle_beacon_event/4" do
+ test "inserts and deletes from ETS table" do
+ table = :ets.new(:test_beacon, [:set, :public])
+
+ MetricsCleaner.handle_beacon_event(
+ [:beacon, :users, :group, :vacant],
+ %{},
+ %{group: "test-tenant"},
+ table
+ )
+
+ assert [{"test-tenant", _timestamp}] = :ets.lookup(table, "test-tenant")
+
+ MetricsCleaner.handle_beacon_event(
+ [:beacon, :users, :group, :occupied],
+ %{},
+ %{group: "test-tenant"},
+ table
+ )
+
+ assert [] = :ets.lookup(table, "test-tenant")
+ end
+ end
+
+ describe "handle_syn_event/4" do
+ test "inserts and deletes from ETS table" do
+ table = :ets.new(:test_syn, [:set, :public])
+
+ MetricsCleaner.handle_syn_event(
+ [:syn, Connect, :unregistered],
+ %{},
+ %{name: "test-tenant"},
+ table
+ )
+
+ assert [{"test-tenant", _timestamp}] = :ets.lookup(table, "test-tenant")
+
+ MetricsCleaner.handle_syn_event(
+ [:syn, Connect, :registered],
+ %{},
+ %{name: "test-tenant"},
+ table
+ )
+
+ assert [] = :ets.lookup(table, "test-tenant")
end
end
end
diff --git a/test/realtime/metrics_pusher_test.exs b/test/realtime/metrics_pusher_test.exs
new file mode 100644
index 000000000..faad4bcbe
--- /dev/null
+++ b/test/realtime/metrics_pusher_test.exs
@@ -0,0 +1,156 @@
+defmodule Realtime.MetricsPusherTest do
+ use Realtime.DataCase, async: true
+ import ExUnit.CaptureLog
+
+ alias Realtime.MetricsPusher
+ alias Plug.Conn
+
+ setup {Req.Test, :verify_on_exit!}
+
+ # Helper function to start MetricsPusher and allow it to use Req.Test
+ defp start_and_allow_pusher(opts) do
+ pid = start_supervised!({MetricsPusher, opts})
+ Req.Test.allow(MetricsPusher, self(), pid)
+ {:ok, pid}
+ end
+
+ describe "start_link/1" do
+ test "does not start when URL is missing" do
+ opts = [enabled: true]
+ assert :ignore = MetricsPusher.start_link(opts)
+ end
+
+ test "sends request successfully" do
+ opts = [
+ url: "https://example.com:8428/api/v1/import/prometheus",
+ auth: "Bearer token",
+ compress: true,
+ interval: 10,
+ timeout: 5000
+ ]
+
+ parent = self()
+
+ Req.Test.expect(MetricsPusher, fn conn ->
+ body = Req.Test.raw_body(conn)
+ assert conn.method == "POST"
+ assert :zlib.gunzip(body) =~ "# HELP beam_stats_run_queue_count"
+ assert conn.scheme == :https
+ assert conn.host == "example.com"
+ assert conn.port == 8428
+ assert conn.request_path == "/api/v1/import/prometheus"
+ assert Conn.get_req_header(conn, "authorization") == ["Bearer token"]
+ assert Conn.get_req_header(conn, "content-encoding") == ["gzip"]
+ assert Conn.get_req_header(conn, "content-type") == ["text/plain"]
+
+ send(parent, :req_called)
+ Req.Test.text(conn, "")
+ end)
+
+ {:ok, _pid} = start_and_allow_pusher(opts)
+ assert_receive :req_called, 100
+ end
+
+ test "sends request successfully without auth header" do
+ opts = [
+ url: "http://localhost:8428/api/v1/import/prometheus",
+ compress: true,
+ interval: 10,
+ timeout: 5000
+ ]
+
+ parent = self()
+
+ Req.Test.expect(MetricsPusher, fn conn ->
+ body = Req.Test.raw_body(conn)
+ assert :zlib.gunzip(body) =~ "# HELP beam_stats_run_queue_count"
+ assert Conn.get_req_header(conn, "authorization") == []
+
+ send(parent, :req_called)
+ Req.Test.text(conn, "")
+ end)
+
+ {:ok, _pid} = start_and_allow_pusher(opts)
+ assert_receive :req_called, 100
+ end
+
+ test "sends request body untouched when compress=false" do
+ opts = [
+ url: "http://localhost:8428/api/v1/import/prometheus",
+ auth: "Bearer token",
+ compress: false,
+ interval: 10,
+ timeout: 5000
+ ]
+
+ parent = self()
+
+ Req.Test.expect(MetricsPusher, fn conn ->
+ body = Req.Test.raw_body(conn)
+ assert body =~ "# HELP beam_stats_run_queue_count"
+ assert Conn.get_req_header(conn, "content-encoding") == []
+ assert Conn.get_req_header(conn, "content-type") == ["text/plain"]
+
+ send(parent, :req_called)
+ Req.Test.text(conn, "")
+ end)
+
+ {:ok, _pid} = start_and_allow_pusher(opts)
+ assert_receive :req_called, 100
+ end
+
+ test "when request receives non 2XX response" do
+ opts = [
+ url: "https://example.com:8428/api/v1/import/prometheus",
+ auth: "Bearer token",
+ compress: true,
+ interval: 10,
+ timeout: 5000
+ ]
+
+ parent = self()
+
+ log =
+ capture_log(fn ->
+ Req.Test.expect(MetricsPusher, fn conn ->
+ send(parent, :req_called)
+ Conn.send_resp(conn, 500, "")
+ end)
+
+ {:ok, pid} = start_and_allow_pusher(opts)
+ assert_receive :req_called, 100
+ assert Process.alive?(pid)
+ # Wait enough for the log to be captured
+ Process.sleep(100)
+ end)
+
+ assert log =~ "MetricsPusher: Failed to push metrics to"
+ end
+
+ test "when an error is raised" do
+ opts = [
+ url: "https://example.com:8428/api/v1/import/prometheus",
+ interval: 10,
+ timeout: 5000
+ ]
+
+ parent = self()
+
+ log =
+ capture_log(fn ->
+ Req.Test.expect(MetricsPusher, fn conn ->
+ send(parent, :req_called)
+ raise RuntimeError, "unexpected error"
+ end)
+
+ {:ok, pid} = start_and_allow_pusher(opts)
+ assert_receive :req_called, 100
+ assert Process.alive?(pid)
+ # Wait enough for the log to be captured
+ Process.sleep(100)
+ end)
+
+ assert log =~ "MetricsPusher: Exception during push: %RuntimeError{message: \"unexpected error\"}"
+ end
+ end
+end
diff --git a/test/realtime/monitoring/distributed_metrics_test.exs b/test/realtime/monitoring/distributed_metrics_test.exs
index 491083973..49fe4af6f 100644
--- a/test/realtime/monitoring/distributed_metrics_test.exs
+++ b/test/realtime/monitoring/distributed_metrics_test.exs
@@ -15,7 +15,7 @@ defmodule Realtime.DistributedMetricsTest do
^node => %{
pid: _pid,
port: _port,
- queue_size: {:ok, 0},
+ queue_size: {:ok, _},
state: :up,
inet_stats: [
recv_oct: _,
diff --git a/test/realtime/monitoring/erl_sys_mon_test.exs b/test/realtime/monitoring/erl_sys_mon_test.exs
index b1e122d58..b14f79b58 100644
--- a/test/realtime/monitoring/erl_sys_mon_test.exs
+++ b/test/realtime/monitoring/erl_sys_mon_test.exs
@@ -5,16 +5,53 @@ defmodule Realtime.Monitoring.ErlSysMonTest do
describe "system monitoring" do
test "logs system monitor events" do
- start_supervised!({ErlSysMon, config: [{:long_message_queue, {1, 10}}]})
-
- assert capture_log(fn ->
- Task.async(fn ->
- Enum.map(1..1000, &send(self(), &1))
- # Wait for ErlSysMon to notice
- Process.sleep(4000)
- end)
- |> Task.await()
- end) =~ "Realtime.ErlSysMon message:"
+ start_supervised!({ErlSysMon, config: [{:long_message_queue, {1, 100}}]})
+
+ log =
+ capture_log(fn ->
+ Task.async(fn ->
+ Process.register(self(), TestProcess)
+ Enum.map(1..1000, &send(self(), &1))
+ # Wait for ErlSysMon to notice
+ Process.sleep(4000)
+ end)
+ |> Task.await()
+ end)
+
+ assert log =~ "Realtime.ErlSysMon message:"
+ assert log =~ "$initial_call\", {Realtime.Monitoring.ErlSysMonTest"
+ assert log =~ "ancestors\", [#{inspect(self())}]"
+ assert log =~ "registered_name: TestProcess"
+ assert log =~ "message_queue_len: "
+ assert log =~ "total_heap_size: "
+ end
+
+ test "logs non-pid monitor messages" do
+ {:ok, pid} = ErlSysMon.start_link(config: [])
+
+ log =
+ capture_log(fn ->
+ send(pid, {:unexpected, "message"})
+ Process.sleep(100)
+ end)
+
+ assert log =~ "Realtime.ErlSysMon message:"
+ assert log =~ "unexpected"
+ end
+
+ test "handles monitor event for dead process without crashing" do
+ {:ok, pid} = ErlSysMon.start_link(config: [])
+
+ dead_pid = spawn(fn -> :ok end)
+ Process.sleep(50)
+
+ log =
+ capture_log(fn ->
+ send(pid, {:monitor, dead_pid, :long_gc, %{timeout: 500}})
+ Process.sleep(100)
+ end)
+
+ assert log =~ "Realtime.ErlSysMon message:"
end
end
end
diff --git a/test/realtime/monitoring/peep/partitioned_test.exs b/test/realtime/monitoring/peep/partitioned_test.exs
new file mode 100644
index 000000000..47be695c8
--- /dev/null
+++ b/test/realtime/monitoring/peep/partitioned_test.exs
@@ -0,0 +1,6 @@
+Application.put_env(:peep, :test_storages, [
+ {Realtime.Monitoring.Peep.Partitioned, 3},
+ {Realtime.Monitoring.Peep.Partitioned, 1}
+])
+
+Code.require_file("../../../../deps/peep/test/shared/storage_test.exs", __DIR__)
diff --git a/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs b/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
index ff4c4f098..731873066 100644
--- a/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/distributed_test.exs
@@ -23,55 +23,41 @@ defmodule Realtime.PromEx.Plugins.DistributedTest do
describe "pooling metrics" do
setup do
- metrics =
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
-
- %{metrics: metrics}
+ %{metrics: PromEx.get_metrics(MetricsTest)}
end
test "send_pending_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_pending_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ assert metric_value(metrics, "dist_send_pending_bytes", origin_node: node(), target_node: node) == 0
end
test "send_count", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_send_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "send_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_send_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_send_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_count", %{metrics: metrics, node: node} do
- pattern = ~r/dist_recv_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_recv_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/dist_recv_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "dist_recv_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "queue_size", %{metrics: metrics, node: node} do
- pattern = ~r/dist_queue_size{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert is_integer(metric_value(metrics, pattern))
+ assert is_integer(metric_value(metrics, "dist_queue_size", origin_node: node(), target_node: node))
end
end
- defp metric_value(metrics, pattern) do
- metrics
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
- end
+ defp metric_value(metrics, metric, expected_tags), do: MetricsHelper.search(metrics, metric, expected_tags)
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs b/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
index 25d8fae16..5396aae6b 100644
--- a/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/gen_rpc_test.exs
@@ -23,55 +23,42 @@ defmodule Realtime.PromEx.Plugins.GenRpcTest do
describe "pooling metrics" do
setup do
- metrics =
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
-
- %{metrics: metrics}
+ %{metrics: PromEx.get_metrics(MetricsTest)}
end
test "send_pending_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_pending_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ assert metric_value(metrics, "gen_rpc_send_pending_bytes", origin_node: node(), target_node: node) == 0
end
test "send_count", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_send_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "send_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_send_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_send_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_count", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_recv_count{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_recv_count", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "recv_bytes", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_recv_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) > 0
+ value = metric_value(metrics, "gen_rpc_recv_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
+ assert value > 0
end
test "queue_size", %{metrics: metrics, node: node} do
- pattern = ~r/gen_rpc_queue_size_bytes{origin_node=\"#{node()}\",target_node=\"#{node}\"}\s(?\d+)/
- assert metric_value(metrics, pattern) == 0
+ value = metric_value(metrics, "gen_rpc_queue_size_bytes", origin_node: node(), target_node: node)
+ assert is_integer(value)
end
end
- defp metric_value(metrics, pattern) do
- metrics
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
- end
+ defp metric_value(metrics, metric, expected_tags), do: MetricsHelper.search(metrics, metric, expected_tags)
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs b/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
index a73e6e2f5..8f1d7d5be 100644
--- a/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/phoenix_test.exs
@@ -1,6 +1,7 @@
defmodule Realtime.PromEx.Plugins.PhoenixTest do
use Realtime.DataCase, async: false
alias Realtime.PromEx.Plugins
+ alias Realtime.Integration.WebsocketClient
defmodule MetricsTest do
use PromEx, otp_app: :realtime_test_phoenix
@@ -10,34 +11,79 @@ defmodule Realtime.PromEx.Plugins.PhoenixTest do
end
end
+ setup_all do
+ start_supervised!(MetricsTest)
+ :ok
+ end
+
+ setup do
+ %{tenant: Containers.checkout_tenant(run_migrations: true)}
+ end
+
describe "pooling metrics" do
- setup do
- start_supervised!(MetricsTest)
- :ok
+ test "number of connections", %{tenant: tenant} do
+ {:ok, token} = token_valid(tenant, "anon", %{})
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ Process.sleep(200)
+ assert metric_value("phoenix_connections_total") >= 2
end
+ end
+
+ describe "event metrics" do
+ test "socket connected", %{tenant: tenant} do
+ {:ok, token} = token_valid(tenant, "anon", %{})
- test "number of connections" do
- # Trigger a connection by making a request to the endpoint
- url = RealtimeWeb.Endpoint.url() <> "/healthcheck"
- Req.get!(url)
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, Phoenix.Socket.V1.JSONSerializer),
+ Phoenix.Socket.V1.JSONSerializer,
+ [{"x-api-key", token}]
+ )
+
+ {:ok, _} =
+ WebsocketClient.connect(
+ self(),
+ uri(tenant, RealtimeWeb.Socket.V2Serializer),
+ RealtimeWeb.Socket.V2Serializer,
+ [{"x-api-key", token}]
+ )
Process.sleep(200)
- assert metric_value() > 0
+
+ assert metric_value("phoenix_socket_connected_duration_milliseconds_count",
+ endpoint: "RealtimeWeb.Endpoint",
+ result: "ok",
+ serializer: "Elixir.Phoenix.Socket.V1.JSONSerializer",
+ transport: "websocket"
+ ) >= 1
+
+ assert metric_value("phoenix_socket_connected_duration_milliseconds_count",
+ endpoint: "RealtimeWeb.Endpoint",
+ result: "ok",
+ serializer: "Elixir.RealtimeWeb.Socket.V2Serializer",
+ transport: "websocket"
+ ) >= 1
end
end
- defp metric_value() do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(~r/phoenix_connections_total\s(?\d+)/, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs b/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
index 164c8d2eb..fd024e1a5 100644
--- a/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/tenant_test.exs
@@ -1,12 +1,13 @@
defmodule Realtime.PromEx.Plugins.TenantTest do
- alias Realtime.Tenants.Authorization.Policies
use Realtime.DataCase, async: false
alias Realtime.PromEx.Plugins.Tenant
alias Realtime.Rpc
- alias Realtime.UsersCounter
- alias Realtime.Tenants.Authorization.Policies
alias Realtime.Tenants.Authorization
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.RateCounter
+ alias Realtime.GenCounter
defmodule MetricsTest do
use PromEx, otp_app: :realtime_test_phoenix
@@ -15,54 +16,67 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
def plugins, do: [{Tenant, poll_rate: 50}]
end
+ setup_all do
+ start_supervised!(MetricsTest)
+ :ok
+ end
+
def handle_telemetry(event, metadata, content, pid: pid), do: send(pid, {event, metadata, content})
@aux_mod (quote do
defmodule FakeUserCounter do
def fake_add(external_id) do
- :ok = UsersCounter.add(spawn(fn -> Process.sleep(2000) end), external_id)
+ pid = spawn(fn -> Process.sleep(2000) end)
+ :ok = Beacon.join(:users, external_id, pid)
end
def fake_db_event(external_id) do
- external_id
- |> Realtime.Tenants.db_events_per_second_rate()
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.db_events_per_second_rate(external_id, 100)
- external_id
- |> Realtime.Tenants.db_events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_event(external_id) do
- external_id
- |> Realtime.Tenants.events_per_second_rate(123)
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.events_per_second_rate(external_id, 123)
- external_id
- |> Realtime.Tenants.events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_presence_event(external_id) do
- external_id
- |> Realtime.Tenants.presence_events_per_second_rate(123)
- |> Realtime.RateCounter.new()
+ rate = Realtime.Tenants.presence_events_per_second_rate(external_id, 123)
- external_id
- |> Realtime.Tenants.presence_events_per_second_key()
- |> Realtime.GenCounter.add()
+ rate
+ |> tap(&RateCounter.new(&1))
+ |> tap(&GenCounter.add(&1.id))
+ |> RateCounterHelper.tick!()
end
def fake_broadcast_from_database(external_id) do
Realtime.Telemetry.execute(
[:realtime, :tenants, :broadcast_from_database],
%{
- latency_committed_at: 10,
- latency_inserted_at: 1
+ # millisecond
+ latency_committed_at: 9,
+ # microsecond
+ latency_inserted_at: 9000
},
%{tenant: external_id}
)
end
+
+ def fake_input_bytes(external_id) do
+ Realtime.Telemetry.execute([:realtime, :channel, :input_bytes], %{size: 10}, %{tenant: external_id})
+ end
+
+ def fake_output_bytes(external_id) do
+ Realtime.Telemetry.execute([:realtime, :channel, :output_bytes], %{size: 10}, %{tenant: external_id})
+ end
end
end)
@@ -75,7 +89,8 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
on_exit(fn -> :telemetry.detach(__MODULE__) end)
- {:ok, node} = Clustered.start(@aux_mod)
+ {:ok, _} = Realtime.Tenants.Connect.lookup_or_start_connection(tenant.external_id)
+ {:ok, node} = Clustered.start(@aux_mod, extra_config: [{:realtime, :users_scope_broadcast_interval_in_ms, 50}])
%{tenant: tenant, node: node}
end
@@ -83,18 +98,22 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
tenant: %{external_id: external_id},
node: node
} do
- UsersCounter.add(self(), external_id)
+ :ok = Beacon.join(:users, external_id, self())
# Add bad tenant id
- UsersCounter.add(self(), random_string())
+ bad_tenant_id = random_string()
+ :ok = Beacon.join(:users, bad_tenant_id, self())
_ = Rpc.call(node, FakeUserCounter, :fake_add, [external_id])
+
Process.sleep(500)
Tenant.execute_tenant_metrics()
assert_receive {[:realtime, :connections], %{connected: 1, limit: 200, connected_cluster: 2},
- %{tenant: ^external_id}}
+ %{tenant: ^external_id}},
+ 500
- refute_receive :_
+ refute_receive {[:realtime, :connections], %{connected: 1, limit: 200, connected_cluster: 2},
+ %{tenant: ^bad_tenant_id}}
end
end
@@ -113,47 +132,59 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
role: "anon"
})
- start_supervised!(MetricsTest)
-
%{authorization_context: authorization_context, db_conn: db_conn, tenant: tenant}
end
test "event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_events", tenant: external_id) || 0
+ FakeUserCounter.fake_event(external_id)
+
+ Process.sleep(100)
+ assert metric_value("realtime_channel_events", tenant: external_id) == metric_value + 1
+ end
+
+ test "global event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_events") || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_events") == metric_value + 1
end
test "db_event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_db_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_db_events", tenant: external_id) || 0
+ FakeUserCounter.fake_db_event(external_id)
+ Process.sleep(100)
+ assert metric_value("realtime_channel_db_events", tenant: external_id) == metric_value + 1
+ end
+
+ test "global db_event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_db_events") || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_db_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_db_events") == metric_value + 1
end
test "presence_event exists after counter added", %{tenant: %{external_id: external_id}} do
- pattern =
- ~r/realtime_channel_presence_events{tenant="#{external_id}"}\s(?\d+)/
+ metric_value = metric_value("realtime_channel_presence_events", tenant: external_id) || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_presence_event(external_id)
- Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ Process.sleep(100)
+ assert metric_value("realtime_channel_presence_events", tenant: external_id) == metric_value + 1
end
- test "metric read_authorization_check exists after check", context do
- pattern =
- ~r/realtime_tenants_read_authorization_check_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
+ test "global presence_event exists after counter added", %{tenant: %{external_id: external_id}} do
+ metric_value = metric_value("realtime_channel_global_presence_events") || 0
+ FakeUserCounter.fake_presence_event(external_id)
+ Process.sleep(100)
+ assert metric_value("realtime_channel_global_presence_events") == metric_value + 1
+ end
- metric_value = metric_value(pattern)
+ test "metric read_authorization_check exists after check", context do
+ metric = "realtime_tenants_read_authorization_check_count"
+ metric_value = metric_value(metric, tenant: context.tenant.external_id) || 0
{:ok, _} =
Authorization.get_read_authorizations(
@@ -164,19 +195,17 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_read_authorization_check_bucket{tenant="#{context.tenant.external_id}",le="250"}\s(?\d+)/
+ assert metric_value(metric, tenant: context.tenant.external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_read_authorization_check_bucket",
+ tenant: context.tenant.external_id,
+ le: "250.0"
+ ) > 0
end
test "metric write_authorization_check exists after check", context do
- pattern =
- ~r/realtime_tenants_write_authorization_check_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_tenants_write_authorization_check_count"
+ metric_value = metric_value(metric, tenant: context.tenant.external_id) || 0
{:ok, _} =
Authorization.get_write_authorizations(
@@ -188,96 +217,110 @@ defmodule Realtime.PromEx.Plugins.TenantTest do
# Wait enough time for the poll rate to be triggered at least once
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value(metric, tenant: context.tenant.external_id) == metric_value + 1
- bucket_pattern =
- ~r/realtime_tenants_write_authorization_check_bucket{tenant="#{context.tenant.external_id}",le="250"}\s(?\d+)/
+ assert metric_value("realtime_tenants_write_authorization_check_bucket",
+ tenant: context.tenant.external_id,
+ le: "250.0"
+ ) > 0
+ end
+
+ test "metric replay exists after check", context do
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_replay_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
+
+ assert {:ok, _, _} = Realtime.Messages.replay(context.db_conn, external_id, "test", 0, 1)
+
+ # Wait enough time for the poll rate to be triggered at least once
+ Process.sleep(200)
- assert metric_value(bucket_pattern) > 0
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
+
+ assert metric_value("realtime_tenants_replay_bucket", tenant: external_id, le: "250.0") > 0
end
test "metric realtime_tenants_broadcast_from_database_latency_committed_at exists after check", context do
- pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_committed_at_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_broadcast_from_database_latency_committed_at_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
- metric_value = metric_value(pattern)
FakeUserCounter.fake_broadcast_from_database(context.tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
- bucket_pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_committed_at_bucket{tenant="#{context.tenant.external_id}",le="10"}\s(?\d+)/
-
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_broadcast_from_database_latency_committed_at_bucket",
+ tenant: external_id,
+ le: "10.0"
+ ) > 0
end
test "metric realtime_tenants_broadcast_from_database_latency_inserted_at exists after check", context do
- pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_inserted_at_count{tenant="#{context.tenant.external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ external_id = context.tenant.external_id
+ metric = "realtime_tenants_broadcast_from_database_latency_inserted_at_count"
+ metric_value = metric_value(metric, tenant: external_id) || 0
FakeUserCounter.fake_broadcast_from_database(context.tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_broadcast_from_database_latency_inserted_at_bucket{tenant="#{context.tenant.external_id}",le="5"}\s(?\d+)/
+ assert metric_value(metric, tenant: external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_broadcast_from_database_latency_inserted_at_bucket",
+ tenant: external_id,
+ le: "10.0"
+ ) > 0
end
test "tenant metric payload size", context do
external_id = context.tenant.external_id
-
- pattern =
- ~r/realtime_tenants_payload_size_count{tenant="#{external_id}"}\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_tenants_payload_size_count"
+ metric_value = metric_value(metric, message_type: "presence", tenant: external_id) || 0
message = %{topic: "a topic", event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub)
+ RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub, :presence)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
-
- bucket_pattern =
- ~r/realtime_tenants_payload_size_bucket{tenant="#{external_id}",le="100"}\s(?\d+)/
+ assert metric_value(metric, message_type: "presence", tenant: external_id) == metric_value + 1
- assert metric_value(bucket_pattern) > 0
+ assert metric_value("realtime_tenants_payload_size_bucket", tenant: external_id, le: "250") > 0
end
test "global metric payload size", context do
external_id = context.tenant.external_id
- pattern = ~r/realtime_payload_size_count\s(?\d+)/
-
- metric_value = metric_value(pattern)
+ metric = "realtime_payload_size_count"
+ metric_value = metric_value(metric, message_type: "broadcast") || 0
message = %{topic: "a topic", event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub)
+ RealtimeWeb.TenantBroadcaster.pubsub_broadcast(external_id, "a topic", message, Phoenix.PubSub, :broadcast)
+
+ Process.sleep(200)
+ assert metric_value(metric, message_type: "broadcast") == metric_value + 1
+
+ assert metric_value("realtime_payload_size_bucket", le: "250.0") > 0
+ end
+
+ test "channel input bytes", context do
+ external_id = context.tenant.external_id
+
+ FakeUserCounter.fake_input_bytes(external_id)
+ FakeUserCounter.fake_input_bytes(external_id)
Process.sleep(200)
- assert metric_value(pattern) == metric_value + 1
+ assert metric_value("realtime_channel_input_bytes", tenant: external_id) == 20
+ end
- bucket_pattern = ~r/realtime_payload_size_bucket{le="100"}\s(?\d+)/
+ test "channel output bytes", context do
+ external_id = context.tenant.external_id
+
+ FakeUserCounter.fake_output_bytes(external_id)
+ FakeUserCounter.fake_output_bytes(external_id)
- assert metric_value(bucket_pattern) > 0
+ Process.sleep(200)
+ assert metric_value("realtime_channel_output_bytes", tenant: external_id) == 20
end
end
- defp metric_value(pattern) do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs b/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
index 080fd3cfb..f747daac2 100644
--- a/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
+++ b/test/realtime/monitoring/prom_ex/plugins/tenants_test.exs
@@ -10,7 +10,7 @@ defmodule Realtime.PromEx.Plugins.TenantsTest do
use PromEx, otp_app: :realtime_test_tenants
@impl true
def plugins do
- [{Tenants, poll_rate: 100}]
+ [{Tenants, poll_rate: 50}]
end
end
@@ -20,118 +20,107 @@ defmodule Realtime.PromEx.Plugins.TenantsTest do
def exception, do: raise(RuntimeError)
end
- setup do
- local_tenant = Containers.checkout_tenant(run_migrations: true)
+ setup_all do
start_supervised!(MetricsTest)
- {:ok, %{tenant: local_tenant}}
+ :ok
end
describe "event_metrics erpc" do
- test "success" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="true",tenant="123"}\s(?\d+)/
+ setup do
+ %{tenant: random_string()}
+ end
+
+ test "global success", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:ok, "success"} = Rpc.enhanced_call(node(), Test, :success, [], tenant_id: "123")
+ previous_value = metric_value(metric, mechanism: "erpc", success: true) || 0
+ assert {:ok, "success"} = Rpc.enhanced_call(node(), Test, :success, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: true) == previous_value + 1
end
- test "failure" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global failure", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:error, "failure"} = Rpc.enhanced_call(node(), Test, :failure, [], tenant_id: "123")
+ previous_value = metric_value(metric, mechanism: "erpc", success: false) || 0
+ assert {:error, "failure"} = Rpc.enhanced_call(node(), Test, :failure, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: false) == previous_value + 1
end
- test "exception" do
- pattern = ~r/realtime_rpc_count{mechanism=\"erpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global exception", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
+ previous_value = metric_value(metric, mechanism: "erpc", success: false) || 0
assert {:error, :rpc_error, %RuntimeError{message: "runtime error"}} =
- Rpc.enhanced_call(node(), Test, :exception, [], tenant_id: "123")
+ Rpc.enhanced_call(node(), Test, :exception, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "erpc", success: false) == previous_value + 1
end
end
- test "event_metrics rpc" do
- pattern = ~r/realtime_rpc_count{mechanism=\"rpc\",success="",tenant="123"}\s(?\d+)/
- # Enough time for the poll rate to be triggered at least once
- Process.sleep(200)
- previous_value = metric_value(pattern)
- assert {:ok, "success"} = Rpc.call(node(), Test, :success, [], tenant_id: "123")
- Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
- end
-
describe "event_metrics gen_rpc" do
- test "success" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="true",tenant="123"}\s(?\d+)/
+ setup do
+ %{tenant: random_string()}
+ end
+
+ test "global success", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert GenRpc.multicall(Test, :success, [], tenant_id: "123") == [{node(), {:ok, "success"}}]
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: true) || 0
+ assert GenRpc.multicall(Test, :success, [], tenant_id: tenant) == [{node(), {:ok, "success"}}]
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: true) == previous_value + 1
end
- test "failure" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global failure", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
- assert GenRpc.multicall(Test, :failure, [], tenant_id: "123") == [{node(), {:error, "failure"}}]
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: false) || 0
+ assert GenRpc.multicall(Test, :failure, [], tenant_id: tenant) == [{node(), {:error, "failure"}}]
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: false) == previous_value + 1
end
- test "exception" do
- pattern = ~r/realtime_rpc_count{mechanism=\"gen_rpc\",success="false",tenant="123"}\s(?\d+)/
+ test "global exception", %{tenant: tenant} do
+ metric = "realtime_global_rpc_count"
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
-
+ previous_value = metric_value(metric, mechanism: "gen_rpc", success: false) || 0
node = node()
assert assert [{^node, {:error, :rpc_error, {:EXIT, {%RuntimeError{message: "runtime error"}, _stacktrace}}}}] =
- GenRpc.multicall(Test, :exception, [], tenant_id: "123")
+ GenRpc.multicall(Test, :exception, [], tenant_id: tenant)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value(metric, mechanism: "gen_rpc", success: false) == previous_value + 1
end
end
describe "pooling metrics" do
+ setup do
+ local_tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, %{tenant: local_tenant}}
+ end
+
test "conneted based on Connect module information for local node only", %{tenant: tenant} do
- pattern = ~r/realtime_tenants_connected\s(?\d+)/
# Enough time for the poll rate to be triggered at least once
Process.sleep(200)
- previous_value = metric_value(pattern)
+ previous_value = metric_value("realtime_tenants_connected")
{:ok, _} = Connect.lookup_or_start_connection(tenant.external_id)
Process.sleep(200)
- assert metric_value(pattern) == previous_value + 1
+ assert metric_value("realtime_tenants_connected") == previous_value + 1
end
end
- defp metric_value(pattern) do
- PromEx.get_metrics(MetricsTest)
- |> String.split("\n", trim: true)
- |> Enum.find_value(
- "0",
- fn item ->
- case Regex.run(pattern, item, capture: ["number"]) do
- [number] -> number
- _ -> false
- end
- end
- )
- |> String.to_integer()
+ defp metric_value(metric, expected_tags \\ nil) do
+ MetricsHelper.search(PromEx.get_metrics(MetricsTest), metric, expected_tags)
end
end
diff --git a/test/realtime/monitoring/prom_ex_test.exs b/test/realtime/monitoring/prom_ex_test.exs
index 849536543..a466e5efd 100644
--- a/test/realtime/monitoring/prom_ex_test.exs
+++ b/test/realtime/monitoring/prom_ex_test.exs
@@ -5,7 +5,7 @@ defmodule Realtime.PromExTest do
describe "get_metrics/0" do
test "builds metrics in prometheus format which includes host region and id" do
- metrics = PromEx.get_metrics()
+ metrics = PromEx.get_metrics() |> IO.iodata_to_binary()
assert String.contains?(
metrics,
@@ -16,27 +16,7 @@ defmodule Realtime.PromExTest do
assert String.contains?(
metrics,
- "beam_system_schedulers_online_info{host=\"nohost\",region=\"us-east-1\",id=\"nohost\"}"
- )
- end
- end
-
- describe "get_compressed_metrics/0" do
- test "builds metrics compressed using zlib" do
- compressed_metrics = PromEx.get_compressed_metrics()
-
- metrics = :zlib.uncompress(compressed_metrics)
-
- assert String.contains?(
- metrics,
- "# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
- )
-
- assert String.contains?(metrics, "# TYPE beam_system_schedulers_online_info gauge")
-
- assert String.contains?(
- metrics,
- "beam_system_schedulers_online_info{host=\"nohost\",region=\"us-east-1\",id=\"nohost\"}"
+ "beam_system_schedulers_online_info{host=\"nohost\",id=\"nohost\",region=\"us-east-1\"}"
)
end
end
diff --git a/test/realtime/monitoring/prometheus_test.exs b/test/realtime/monitoring/prometheus_test.exs
new file mode 100644
index 000000000..980fa7d34
--- /dev/null
+++ b/test/realtime/monitoring/prometheus_test.exs
@@ -0,0 +1,434 @@
+# Based on https://github.com/rkallos/peep/blob/708546ed069aebdf78ac1f581130332bd2e8b5b1/test/prometheus_test.exs
+defmodule Realtime.Monitoring.PrometheusTest do
+ use ExUnit.Case, async: true
+
+ alias Realtime.Monitoring.Prometheus
+ alias Telemetry.Metrics
+
+ defmodule StorageCounter do
+ @moduledoc false
+ use Agent
+
+ def start() do
+ Agent.start(fn -> 0 end, name: __MODULE__)
+ end
+
+ def fresh_id() do
+ Agent.get_and_update(__MODULE__, fn i -> {:"#{i}", i + 1} end)
+ end
+ end
+
+ # Test struct that doesn't implement String.Chars
+ defmodule TestError do
+ defstruct [:reason, :code]
+ end
+
+ setup_all do
+ StorageCounter.start()
+ :ok
+ end
+
+ @impls [:default, {Realtime.Monitoring.Peep.Partitioned, 4}, :striped]
+
+ for impl <- @impls do
+ test "#{inspect(impl)} - counter formatting" do
+ counter = Metrics.counter("prometheus.test.counter", description: "a counter")
+ name = StorageCounter.fresh_id()
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, counter, 1, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_counter a counter",
+ "# TYPE prometheus_test_counter counter",
+ ~s(prometheus_test_counter{baz="quux",foo="bar"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ describe "#{inspect(impl)} - sum" do
+ test "sum formatting" do
+ name = StorageCounter.fresh_id()
+ sum = Metrics.sum("prometheus.test.sum", description: "a sum")
+
+ opts = [
+ name: name,
+ metrics: [sum],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, sum, 5, %{foo: :bar, baz: "quux"})
+ Peep.insert_metric(name, sum, 3, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_sum a sum",
+ "# TYPE prometheus_test_sum counter",
+ ~s(prometheus_test_sum{baz="quux",foo="bar"} 8)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "custom type" do
+ name = StorageCounter.fresh_id()
+
+ sum =
+ Metrics.sum("prometheus.test.sum",
+ description: "a sum",
+ reporter_options: [prometheus_type: "gauge"]
+ )
+
+ opts = [
+ name: name,
+ metrics: [sum],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, sum, 5, %{foo: :bar, baz: "quux"})
+ Peep.insert_metric(name, sum, 3, %{foo: :bar, baz: "quux"})
+
+ expected = [
+ "# HELP prometheus_test_sum a sum",
+ "# TYPE prometheus_test_sum gauge",
+ ~s(prometheus_test_sum{baz="quux",foo="bar"} 8)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+ end
+
+ describe "#{inspect(impl)} - last_value" do
+ test "formatting" do
+ name = StorageCounter.fresh_id()
+ last_value = Metrics.last_value("prometheus.test.gauge", description: "a last_value")
+
+ opts = [
+ name: name,
+ metrics: [last_value],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, last_value, 5, %{blee: :bloo, flee: "floo"})
+
+ expected = [
+ "# HELP prometheus_test_gauge a last_value",
+ "# TYPE prometheus_test_gauge gauge",
+ ~s(prometheus_test_gauge{blee="bloo",flee="floo"} 5)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "custom type" do
+ name = StorageCounter.fresh_id()
+
+ last_value =
+ Metrics.last_value("prometheus.test.gauge",
+ description: "a last_value",
+ reporter_options: [prometheus_type: :sum]
+ )
+
+ opts = [
+ name: name,
+ metrics: [last_value],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, last_value, 5, %{blee: :bloo, flee: "floo"})
+
+ expected = [
+ "# HELP prometheus_test_gauge a last_value",
+ "# TYPE prometheus_test_gauge sum",
+ ~s(prometheus_test_gauge{blee="bloo",flee="floo"} 5)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+ end
+
+ test "#{inspect(impl)} - dist formatting" do
+ name = StorageCounter.fresh_id()
+
+ dist =
+ Metrics.distribution("prometheus.test.distribution",
+ description: "a distribution",
+ reporter_options: [max_value: 1000]
+ )
+
+ opts = [
+ name: name,
+ metrics: [dist],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ expected = []
+ assert export(name) == lines_to_string(expected)
+
+ Peep.insert_metric(name, dist, 1, %{glee: :gloo})
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.222222"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.493827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.825789"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.23152"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.727413"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="3.333505"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.074283"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.97968"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="6.086275"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="7.438781"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="9.091843"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="11.112253"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="13.581642"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="16.599785"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="20.288626"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="24.79721"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="30.307701"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="37.042745"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="45.274466"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="55.335459"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="67.632227"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="82.661611"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="101.030858"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="123.48216"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="150.92264"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="184.461004"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="225.452339"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="275.552858"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="336.786827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="411.628344"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="503.101309"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="614.9016"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="751.5464"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="918.556711"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1122.680424"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 1),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 1),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+
+ for i <- 2..2000 do
+ Peep.insert_metric(name, dist, i, %{glee: :gloo})
+ end
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.222222"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.493827"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.825789"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.23152"} 2),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="2.727413"} 2),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="3.333505"} 3),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.074283"} 4),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="4.97968"} 4),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="6.086275"} 6),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="7.438781"} 7),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="9.091843"} 9),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="11.112253"} 11),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="13.581642"} 13),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="16.599785"} 16),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="20.288626"} 20),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="24.79721"} 24),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="30.307701"} 30),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="37.042745"} 37),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="45.274466"} 45),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="55.335459"} 55),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="67.632227"} 67),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="82.661611"} 82),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="101.030858"} 101),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="123.48216"} 123),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="150.92264"} 150),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="184.461004"} 184),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="225.452339"} 225),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="275.552858"} 275),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="336.786827"} 336),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="411.628344"} 411),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="503.101309"} 503),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="614.9016"} 614),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="751.5464"} 751),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="918.556711"} 918),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1122.680424"} 1122),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 2000),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 2001000),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 2000)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - dist formatting pow10" do
+ name = StorageCounter.fresh_id()
+
+ dist =
+ Metrics.distribution("prometheus.test.distribution",
+ description: "a distribution",
+ reporter_options: [
+ max_value: 1000,
+ peep_bucket_calculator: Peep.Buckets.PowersOfTen
+ ]
+ )
+
+ opts = [
+ name: name,
+ metrics: [dist],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ expected = []
+ assert export(name) == lines_to_string(expected)
+
+ Peep.insert_metric(name, dist, 1, %{glee: :gloo})
+
+ expected = [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="10.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="100.0"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e3"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e4"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e5"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e6"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e7"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e8"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e9"} 1),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 1),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 1),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+
+ f = fn ->
+ for i <- 1..2000 do
+ Peep.insert_metric(name, dist, i, %{glee: :gloo})
+ end
+ end
+
+ 1..20 |> Enum.map(fn _ -> Task.async(f) end) |> Task.await_many()
+
+ expected =
+ [
+ "# HELP prometheus_test_distribution a distribution",
+ "# TYPE prometheus_test_distribution histogram",
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="10.0"} 181),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="100.0"} 1981),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e3"} 19981),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e4"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e5"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e6"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e7"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e8"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="1.0e9"} 40001),
+ ~s(prometheus_test_distribution_bucket{glee="gloo",le="+Inf"} 40001),
+ ~s(prometheus_test_distribution_sum{glee="gloo"} 40020001),
+ ~s(prometheus_test_distribution_count{glee="gloo"} 40001)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - regression: label escaping" do
+ name = StorageCounter.fresh_id()
+
+ counter =
+ Metrics.counter(
+ "prometheus.test.counter",
+ description: "a counter"
+ )
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ Peep.insert_metric(name, counter, 1, %{atom: "\"string\""})
+ Peep.insert_metric(name, counter, 1, %{"\"string\"" => :atom})
+ Peep.insert_metric(name, counter, 1, %{"\"string\"" => "\"string\""})
+ Peep.insert_metric(name, counter, 1, %{"string" => "string\n"})
+
+ expected = [
+ "# HELP prometheus_test_counter a counter",
+ "# TYPE prometheus_test_counter counter",
+ ~s(prometheus_test_counter{atom="\\\"string\\\""} 1),
+ ~s(prometheus_test_counter{\"string\"="atom"} 1),
+ ~s(prometheus_test_counter{\"string\"="\\\"string\\\""} 1),
+ ~s(prometheus_test_counter{string="string\\n"} 1)
+ ]
+
+ assert export(name) == lines_to_string(expected)
+ end
+
+ test "#{inspect(impl)} - regression: handle structs without String.Chars" do
+ name = StorageCounter.fresh_id()
+
+ counter =
+ Metrics.counter(
+ "prometheus.test.counter",
+ description: "a counter"
+ )
+
+ opts = [
+ name: name,
+ metrics: [counter],
+ storage: unquote(impl)
+ ]
+
+ {:ok, _pid} = Peep.start_link(opts)
+
+ # Create a struct that doesn't implement String.Chars
+ error_struct = %TestError{reason: :tcp_closed, code: 1001}
+
+ Peep.insert_metric(name, counter, 1, %{error: error_struct})
+
+ result = export(name)
+
+ # Should not crash and should contain the inspected struct representation
+ assert result =~ "prometheus_test_counter"
+ assert result =~ "TestError"
+ assert result =~ "tcp_closed"
+ end
+ end
+
+ defp export(name) do
+ Peep.get_all_metrics(name)
+ |> Prometheus.export()
+ |> IO.iodata_to_binary()
+ end
+
+ defp lines_to_string(lines) do
+ lines
+ |> Enum.map(&[&1, ?\n])
+ |> Enum.concat(["# EOF\n"])
+ |> IO.iodata_to_binary()
+ end
+end
diff --git a/test/realtime/nodes_test.exs b/test/realtime/nodes_test.exs
index ba3b6be0e..72965b2f0 100644
--- a/test/realtime/nodes_test.exs
+++ b/test/realtime/nodes_test.exs
@@ -1,9 +1,81 @@
defmodule Realtime.NodesTest do
- use Realtime.DataCase, async: true
+ use Realtime.DataCase, async: false
use Mimic
alias Realtime.Nodes
alias Realtime.Tenants
+ defp spawn_fake_node(region, node) do
+ parent = self()
+
+ fun = fn ->
+ :syn.join(RegionNodes, region, self(), node: node)
+ send(parent, :joined)
+
+ receive do
+ :ok -> :ok
+ end
+ end
+
+ {:ok, _pid} = start_supervised({Task, fun}, id: {region, node})
+ assert_receive :joined
+ end
+
+ describe "all_node_regions/0" do
+ test "returns all regions with nodes" do
+ spawn_fake_node("us-east-1", :node_1)
+ spawn_fake_node("ap-2", :node_2)
+ spawn_fake_node("ap-2", :node_3)
+
+ assert Nodes.all_node_regions() |> Enum.sort() == ["ap-2", "us-east-1"]
+ end
+
+ test "with no other nodes, returns my region only" do
+ assert Nodes.all_node_regions() == ["us-east-1"]
+ end
+ end
+
+ describe "region_nodes/1" do
+ test "nil region returns empty list" do
+ assert Nodes.region_nodes(nil) == []
+ end
+
+ test "returns nodes from region" do
+ region = "ap-southeast-2"
+ spawn_fake_node(region, :node_1)
+ spawn_fake_node(region, :node_2)
+
+ spawn_fake_node("eu-west-2", :node_3)
+
+ assert Nodes.region_nodes(region) == [:node_1, :node_2]
+ assert Nodes.region_nodes("eu-west-2") == [:node_3]
+ end
+
+ test "on non-existing region, returns empty list" do
+ assert Nodes.region_nodes("non-existing-region") == []
+ end
+ end
+
+ describe "node_from_region/2" do
+ test "nil region returns error" do
+ assert {:error, :not_available} = Nodes.node_from_region(nil, :any_key)
+ end
+
+ test "empty region returns error" do
+ assert {:error, :not_available} = Nodes.node_from_region("empty-region", :any_key)
+ end
+
+ test "returns the same node given the same key" do
+ region = "ap-southeast-3"
+ spawn_fake_node(region, :node_1)
+ spawn_fake_node(region, :node_2)
+
+ spawn_fake_node("eu-west-3", :node_3)
+
+ assert {:ok, :node_2} = Nodes.node_from_region(region, :key1)
+ assert {:ok, :node_2} = Nodes.node_from_region(region, :key1)
+ end
+ end
+
describe "get_node_for_tenant/1" do
setup do
tenant = Containers.checkout_tenant()
@@ -16,7 +88,7 @@ defmodule Realtime.NodesTest do
reject(&:syn.members/2)
end
- test "on existing tenant id, returns the node for the region using syn", %{
+ test "on existing tenant id, returns a node from the region using load-aware picking", %{
tenant: tenant,
region: region
} do
@@ -29,26 +101,24 @@ defmodule Realtime.NodesTest do
]
end)
- index = :erlang.phash2(tenant.external_id, length(expected_nodes))
-
- expected_node = Enum.fetch!(expected_nodes, index)
expected_region = Tenants.region(tenant)
assert {:ok, node, region} = Nodes.get_node_for_tenant(tenant)
- assert node == expected_node
assert region == expected_region
+ assert node in expected_nodes
end
- test "on existing tenant id, and a single node for a given region, returns default", %{
+ test "on existing tenant id, and a single node for a given region, returns single node", %{
tenant: tenant,
region: region
} do
expect(:syn, :members, fn RegionNodes, ^region -> [{self(), [node: :tenant@closest1]}] end)
+
assert {:ok, node, region} = Nodes.get_node_for_tenant(tenant)
expected_region = Tenants.region(tenant)
- assert node == node()
+ assert node == :tenant@closest1
assert region == expected_region
end
@@ -62,4 +132,200 @@ defmodule Realtime.NodesTest do
assert region == expected_region
end
end
+
+ describe "platform_region_translator/1" do
+ test "returns nil for nil input" do
+ assert Nodes.platform_region_translator(nil) == nil
+ end
+
+ test "uses default mapping when no custom mapping configured" do
+ original = Application.get_env(:realtime, :region_mapping)
+ on_exit(fn -> Application.put_env(:realtime, :region_mapping, original) end)
+
+ Application.put_env(:realtime, :region_mapping, nil)
+
+ assert Nodes.platform_region_translator("eu-north-1") == "eu-west-2"
+ assert Nodes.platform_region_translator("us-west-2") == "us-west-1"
+ assert Nodes.platform_region_translator("unknown-region") == nil
+ end
+
+ test "uses custom mapping when configured without falling back to defaults" do
+ original = Application.get_env(:realtime, :region_mapping)
+ on_exit(fn -> Application.put_env(:realtime, :region_mapping, original) end)
+
+ custom_mapping = %{
+ "custom-region-1" => "us-east-1",
+ "eu-north-1" => "custom-target"
+ }
+
+ Application.put_env(:realtime, :region_mapping, custom_mapping)
+
+ # Custom mappings work
+ assert Nodes.platform_region_translator("custom-region-1") == "us-east-1"
+ assert Nodes.platform_region_translator("eu-north-1") == "custom-target"
+
+ # Unmapped regions return nil (no fallback to defaults)
+ assert Nodes.platform_region_translator("us-west-2") == nil
+ end
+ end
+
+ describe "node_load/1" do
+ test "returns {:error, :not_enough_data} for local node with insufficient uptime" do
+ assert {:error, :not_enough_data} = Nodes.node_load(node())
+ end
+ end
+
+ describe "node_load/1 with sufficient uptime" do
+ setup do
+ Application.put_env(:realtime, :node_balance_uptime_threshold_in_ms, 0)
+
+ on_exit(fn ->
+ Application.put_env(:realtime, :node_balance_uptime_threshold_in_ms, 999_999_999_999)
+ end)
+ end
+
+ test "returns cpu load for local node" do
+ load = Nodes.node_load(node())
+
+ assert is_integer(load)
+ assert load >= 0
+ end
+
+ test "returns cpu load for remote node" do
+ {:ok, remote_node} = Clustered.start()
+
+ load = Nodes.node_load(remote_node)
+
+ assert is_integer(load)
+ assert load >= 0
+ end
+
+ test "remote node can also get its own load" do
+ {:ok, remote_node} = Clustered.start()
+
+ load = :rpc.call(remote_node, Nodes, :node_load, [remote_node])
+
+ assert is_integer(load)
+ assert load >= 0
+ end
+ end
+
+ describe "launch_node/3 load-aware but not enough uptime" do
+ test "returns the one node from the region when one node is available" do
+ region = "clustered-test-region"
+ spawn_fake_node(region, :remote_node)
+
+ result = Nodes.launch_node(region, node(), "test-tenant-123")
+
+ assert result == :remote_node
+ end
+
+ test "returns default node when no region nodes available" do
+ result = Nodes.launch_node("empty-region", node(), "test-tenant-123")
+
+ assert result == node()
+ end
+
+ test "same tenant_id picks same nodes" do
+ region = "deterministic-region"
+ spawn_fake_node(region, :node_a)
+ spawn_fake_node(region, :node_b)
+ spawn_fake_node(region, :node_c)
+
+ tenant_id = "test-tenant-456"
+
+ # Call 10 times, should always return same node with hashed tenant ID
+ results = for _ <- 1..10, do: Nodes.launch_node(region, node(), tenant_id)
+
+ assert length(Enum.uniq(results)) == 1
+ end
+
+ test "different tenant_ids distribute across nodes" do
+ region = "distribution-region"
+ spawn_fake_node(region, :node_a)
+ spawn_fake_node(region, :node_b)
+ spawn_fake_node(region, :node_c)
+
+ # Generate 30 different tenant IDs
+ tenant_ids = for i <- 1..30, do: "tenant-#{i}"
+
+ results =
+ Enum.map(tenant_ids, fn id ->
+ Nodes.launch_node(region, node(), id)
+ end)
+
+ # Should distribute across multiple nodes (at least 2) using the hashed tenant IDs
+ assert length(Enum.uniq(results)) >= 2
+ end
+ end
+
+ describe "launch_node/3 with load-aware node picking enabled" do
+ setup do
+ Application.put_env(:realtime, :node_balance_uptime_threshold_in_ms, 0)
+
+ on_exit(fn ->
+ Application.put_env(:realtime, :node_balance_uptime_threshold_in_ms, 999_999_999_999)
+ end)
+ end
+
+ test "picks deterministic node when one node has insufficient data" do
+ region = "uptime-test-region"
+ spawn_fake_node(region, :node_a)
+ spawn_fake_node(region, :node_b)
+
+ stub(Nodes, :node_load, fn
+ :node_a -> {:error, :not_enough_data}
+ :node_b -> 100
+ end)
+
+ results = for _ <- 1..10, do: Nodes.launch_node(region, node(), "test-tenant-123")
+
+ # Deterministic with hashed tenant ID
+ assert length(Enum.uniq(results)) == 1
+ assert Enum.uniq(results) == [:node_b]
+ end
+
+ test "compares load between nodes and picks the least loaded deterministically" do
+ {:ok, remote_node} = Clustered.start(nil, [{:realtime, :node_balance_uptime_threshold_in_ms, 0}])
+
+ region = "load-test-region"
+ spawn_fake_node(region, node())
+ spawn_fake_node(region, remote_node)
+
+ local_load = Nodes.node_load(node())
+ remote_load = Nodes.node_load(remote_node)
+
+ assert is_integer(local_load) and local_load >= 0
+ assert is_integer(remote_load) and remote_load >= 0
+
+ results = for _ <- 1..10, do: Nodes.launch_node(region, node(), "test-tenant-789")
+
+ # Should be deterministic - all same node within time bucket
+ assert length(Enum.uniq(results)) == 1
+ assert Enum.all?(results, &(&1 in [node(), remote_node]))
+ end
+ end
+
+ describe "short_node_id_from_name/1" do
+ test "extracts short ID from fly.io-style IPv6 node name" do
+ assert Nodes.short_node_id_from_name(:"realtime-prod@fdaa:0:cc:a7b:b385:83c3:cfe3:2") ==
+ "83c3cfe3"
+ end
+
+ test "returns full name for localhost node" do
+ assert Nodes.short_node_id_from_name(:"pink@127.0.0.1") == "pink@127.0.0.1"
+ end
+
+ test "returns host for standard domain-style node names" do
+ assert Nodes.short_node_id_from_name(:"realtime@host.name.internal") == "host.name.internal"
+ end
+
+ test "returns host for simple IP node" do
+ assert Nodes.short_node_id_from_name(:"pink@10.0.1.1") == "10.0.1.1"
+ end
+
+ test "returns host for nonode@nohost" do
+ assert Nodes.short_node_id_from_name(:nonode@nohost) == "nohost"
+ end
+ end
end
diff --git a/test/realtime/postgres_decoder_test.exs b/test/realtime/postgres_decoder_test.exs
index 9516e5e9a..bd9a0c579 100644
--- a/test/realtime/postgres_decoder_test.exs
+++ b/test/realtime/postgres_decoder_test.exs
@@ -2,24 +2,23 @@ defmodule Realtime.PostgresDecoderTest do
use ExUnit.Case, async: true
alias Realtime.Adapters.Postgres.Decoder
- alias Decoder.Messages.{
- Begin,
- Commit,
- Origin,
- Relation,
- Relation.Column,
- Insert,
- Update,
- Delete,
- Truncate,
- Type
- }
+ alias Decoder.Messages.Begin
+ alias Decoder.Messages.Commit
+ alias Decoder.Messages.Insert
+ alias Decoder.Messages.Origin
+ alias Decoder.Messages.Relation
+ alias Decoder.Messages.Relation.Column
+ alias Decoder.Messages.Type
+ alias Decoder.Messages.Unsupported
test "decodes begin messages" do
{:ok, expected_dt_no_microseconds, 0} = DateTime.from_iso8601("2019-07-18T17:02:35Z")
expected_dt = DateTime.add(expected_dt_no_microseconds, 726_322, :microsecond)
- assert Decoder.decode_message(<<66, 0, 0, 0, 2, 167, 244, 168, 128, 0, 2, 48, 246, 88, 88, 213, 242, 0, 0, 2, 107>>) ==
+ assert Decoder.decode_message(
+ <<66, 0, 0, 0, 2, 167, 244, 168, 128, 0, 2, 48, 246, 88, 88, 213, 242, 0, 0, 2, 107>>,
+ %{}
+ ) ==
%Begin{commit_timestamp: expected_dt, final_lsn: {2, 2_817_828_992}, xid: 619}
end
@@ -28,7 +27,8 @@ defmodule Realtime.PostgresDecoderTest do
expected_dt = DateTime.add(expected_dt_no_microseconds, 726_322, :microsecond)
assert Decoder.decode_message(
- <<67, 0, 0, 0, 0, 2, 167, 244, 168, 128, 0, 0, 0, 2, 167, 244, 168, 176, 0, 2, 48, 246, 88, 88, 213, 242>>
+ <<67, 0, 0, 0, 0, 2, 167, 244, 168, 128, 0, 0, 0, 2, 167, 244, 168, 176, 0, 2, 48, 246, 88, 88, 213, 242>>,
+ %{}
) == %Commit{
flags: [],
lsn: {2, 2_817_828_992},
@@ -38,7 +38,7 @@ defmodule Realtime.PostgresDecoderTest do
end
test "decodes origin messages" do
- assert Decoder.decode_message(<<79, 0, 0, 0, 2, 167, 244, 168, 128>> <> "Elmer Fud") ==
+ assert Decoder.decode_message(<<79, 0, 0, 0, 2, 167, 244, 168, 128>> <> "Elmer Fud", %{}) ==
%Origin{
origin_commit_lsn: {2, 2_817_828_992},
name: "Elmer Fud"
@@ -48,7 +48,8 @@ defmodule Realtime.PostgresDecoderTest do
test "decodes relation messages" do
assert Decoder.decode_message(
<<82, 0, 0, 96, 0, 112, 117, 98, 108, 105, 99, 0, 102, 111, 111, 0, 100, 0, 2, 0, 98, 97, 114, 0, 0, 0, 0,
- 25, 255, 255, 255, 255, 1, 105, 100, 0, 0, 0, 0, 23, 255, 255, 255, 255>>
+ 25, 255, 255, 255, 255, 1, 105, 100, 0, 0, 0, 0, 23, 255, 255, 255, 255>>,
+ %{}
) == %Relation{
id: 24_576,
namespace: "public",
@@ -74,7 +75,8 @@ defmodule Realtime.PostgresDecoderTest do
test "decodes type messages" do
assert Decoder.decode_message(
<<89, 0, 0, 128, 52, 112, 117, 98, 108, 105, 99, 0, 101, 120, 97, 109, 112, 108, 101, 95, 116, 121, 112,
- 101, 0>>
+ 101, 0>>,
+ %{}
) ==
%Type{
id: 32_820,
@@ -83,110 +85,103 @@ defmodule Realtime.PostgresDecoderTest do
}
end
- describe "truncate messages" do
- test "decodes messages" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 0, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [],
- truncated_relations: [24_576]
- }
- end
-
- test "decodes messages with cascade option" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 1, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [:cascade],
- truncated_relations: [24_576]
- }
- end
-
- test "decodes messages with restart identity option" do
- assert Decoder.decode_message(<<84, 0, 0, 0, 1, 2, 0, 0, 96, 0>>) ==
- %Truncate{
- number_of_relations: 1,
- options: [:restart_identity],
- truncated_relations: [24_576]
- }
- end
- end
-
describe "data message (TupleData) decoder" do
- test "decodes insert messages" do
- assert Decoder.decode_message(
- <<73, 0, 0, 96, 0, 78, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Insert{
- relation_id: 24_576,
- tuple_data: {"baz", "560"}
- }
+ setup do
+ relation = %{
+ id: 24_576,
+ namespace: "public",
+ name: "foo",
+ columns: [
+ %Column{name: "id", type: "uuid"},
+ %Column{name: "bar", type: "text"}
+ ]
+ }
+
+ %{relation: relation}
end
- test "decodes insert messages with null values" do
- assert Decoder.decode_message(<<73, 0, 0, 96, 0, 78, 0, 2, 110, 116, 0, 0, 0, 3, 53, 54, 48>>) == %Insert{
- relation_id: 24_576,
- tuple_data: {nil, "560"}
- }
- end
+ test "decodes insert messages", %{relation: relation} do
+ uuid = UUID.uuid4()
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "b" <>
+ <<16::integer-32>> <>
+ UUID.string_to_binary!(uuid) <>
+ "b" <>
+ <> <>
+ string
- test "decodes insert messages with unchanged toasted values" do
- assert Decoder.decode_message(<<73, 0, 0, 96, 0, 78, 0, 2, 117, 116, 0, 0, 0, 3, 53, 54, 48>>) == %Insert{
- relation_id: 24_576,
- tuple_data: {:unchanged_toast, "560"}
- }
- end
-
- test "decodes update messages with default replica identity setting" do
assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 78, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54,
- 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: nil,
- old_tuple_data: nil,
- tuple_data: {"example", "560"}
+ data,
+ %{relation.id => relation}
+ ) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {uuid, string}
}
end
- test "decodes update messages with FULL replica identity setting" do
- assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 79, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48, 78, 0, 2, 116, 0,
- 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: nil,
- old_tuple_data: {"baz", "560"},
- tuple_data: {"example", "560"}
- }
- end
+ test "ignores unknown relations", %{relation: relation} do
+ uuid = UUID.uuid4()
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <<679::integer-32>> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "b" <>
+ <<16::integer-32>> <>
+ UUID.string_to_binary!(uuid) <>
+ "b" <>
+ <> <>
+ string
- test "decodes update messages with USING INDEX replica identity setting" do
assert Decoder.decode_message(
- <<85, 0, 0, 96, 0, 75, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 110, 78, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97,
- 109, 112, 108, 101, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Update{
- relation_id: 24_576,
- changed_key_tuple_data: {"baz", nil},
- old_tuple_data: nil,
- tuple_data: {"example", "560"}
- }
+ data,
+ %{relation.id => relation}
+ ) == %Unsupported{}
end
- test "decodes DELETE messages with USING INDEX replica identity setting" do
- assert Decoder.decode_message(
- <<68, 0, 0, 96, 0, 75, 0, 2, 116, 0, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 110>>
- ) == %Delete{
- relation_id: 24_576,
- changed_key_tuple_data: {"example", nil}
+ test "decodes insert messages with null values", %{relation: relation} do
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "n" <>
+ "b" <>
+ <> <>
+ string
+
+ assert Decoder.decode_message(data, %{relation.id => relation}) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {nil, string}
}
end
- test "decodes DELETE messages with FULL replica identity setting" do
- assert Decoder.decode_message(
- <<68, 0, 0, 96, 0, 79, 0, 2, 116, 0, 0, 0, 3, 98, 97, 122, 116, 0, 0, 0, 3, 53, 54, 48>>
- ) == %Delete{
- relation_id: 24_576,
- old_tuple_data: {"baz", "560"}
+ test "decodes insert messages with unchanged toasted values", %{relation: relation} do
+ string = Generators.random_string()
+
+ data =
+ "I" <>
+ <> <>
+ "N" <>
+ <<2::integer-16>> <>
+ "u" <>
+ "b" <>
+ <> <>
+ string
+
+ assert Decoder.decode_message(data, %{relation.id => relation}) == %Insert{
+ relation_id: relation.id,
+ tuple_data: {:unchanged_toast, string}
}
end
end
diff --git a/test/realtime/rate_counter/rate_counter_test.exs b/test/realtime/rate_counter/rate_counter_test.exs
index 6d3f57401..829e961c2 100644
--- a/test/realtime/rate_counter/rate_counter_test.exs
+++ b/test/realtime/rate_counter/rate_counter_test.exs
@@ -22,7 +22,7 @@ defmodule Realtime.RateCounterTest do
max_bucket_len: 60,
tick: 1000,
tick_ref: _,
- idle_shutdown: 900_000,
+ idle_shutdown: 600_000,
idle_shutdown_ref: _,
telemetry: %{emit: false},
limit: %{log: false}
@@ -62,7 +62,7 @@ defmodule Realtime.RateCounterTest do
max_bucket_len: 60,
tick: 10,
tick_ref: _,
- idle_shutdown: 900_000,
+ idle_shutdown: 600_000,
idle_shutdown_ref: _,
telemetry: %{
emit: true,
@@ -197,7 +197,7 @@ defmodule Realtime.RateCounterTest do
id: id,
opts: [
tick: 100,
- max_bucket_len: 3,
+ max_bucket_len: 5,
limit: [
value: 49,
measurement: :sum,
@@ -215,7 +215,7 @@ defmodule Realtime.RateCounterTest do
avg: +0.0,
sum: 0,
bucket: _,
- max_bucket_len: 3,
+ max_bucket_len: 5,
telemetry: %{emit: false},
limit: %{
log: true,
@@ -228,7 +228,7 @@ defmodule Realtime.RateCounterTest do
log =
capture_log(fn ->
GenCounter.add(args.id, 100)
- Process.sleep(100)
+ Process.sleep(120)
end)
assert {:ok, %RateCounter{sum: sum, limit: %{triggered: true}}} = RateCounter.get(args)
@@ -239,7 +239,7 @@ defmodule Realtime.RateCounterTest do
# Splitting by the error message returns the error message and the rest of the log only
assert length(String.split(log, "ErrorMessage: Reason")) == 2
- Process.sleep(400)
+ Process.sleep(600)
assert {:ok, %RateCounter{sum: 0, limit: %{triggered: false}}} = RateCounter.get(args)
end
@@ -260,10 +260,10 @@ defmodule Realtime.RateCounterTest do
test "rate counters shut themselves down when no activity occurs on the GenCounter" do
args = %Args{id: {:domain, :metric, Ecto.UUID.generate()}}
- {:ok, pid} = RateCounter.new(args, idle_shutdown: 5)
+ {:ok, pid} = RateCounter.new(args, idle_shutdown: 100)
Process.monitor(pid)
- assert_receive {:DOWN, _ref, :process, ^pid, :normal}, 25
+ assert_receive {:DOWN, _ref, :process, ^pid, :normal}, 200
# Cache has not expired yet
assert {:ok, %RateCounter{}} = Cachex.get(RateCounter, args.id)
Process.sleep(2000)
@@ -301,6 +301,18 @@ defmodule Realtime.RateCounterTest do
end
end
+ describe "publish_update/1" do
+ test "cause shutdown with update message from update topic" do
+ args = %Args{id: {:domain, :metric, Ecto.UUID.generate()}}
+ {:ok, pid} = RateCounter.new(args)
+
+ Process.monitor(pid)
+ RateCounter.publish_update(args.id)
+
+ assert_receive {:DOWN, _ref, :process, ^pid, :normal}
+ end
+ end
+
describe "get/1" do
test "gets the state of a rate counter" do
args = %Args{id: {:domain, :metric, Ecto.UUID.generate()}}
@@ -316,37 +328,5 @@ defmodule Realtime.RateCounterTest do
end
end
- describe "stop/1" do
- test "stops rate counters for a given entity" do
- entity_id = Ecto.UUID.generate()
- fake_terms = Enum.map(1..10, fn _ -> {:domain, :"metric_#{random_string()}", Ecto.UUID.generate()} end)
- terms = Enum.map(1..10, fn _ -> {:domain, :"metric_#{random_string()}", entity_id} end)
-
- for term <- terms do
- args = %Args{id: term}
- {:ok, _} = RateCounter.new(args)
- assert {:ok, %RateCounter{}} = RateCounter.get(args)
- end
-
- for term <- fake_terms do
- args = %Args{id: term}
- {:ok, _} = RateCounter.new(args)
- assert {:ok, %RateCounter{}} = RateCounter.get(args)
- end
-
- assert :ok = RateCounter.stop(entity_id)
- # Wait for processes to shut down and Registry to update
- Process.sleep(100)
-
- for term <- terms do
- assert [] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, term})
- end
-
- for term <- fake_terms do
- assert [{_pid, _value}] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, term})
- end
- end
- end
-
def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {event, measures, metadata})
end
diff --git a/test/realtime/repo_replica_test.exs b/test/realtime/repo_replica_test.exs
index a3734d31b..e794f060f 100644
--- a/test/realtime/repo_replica_test.exs
+++ b/test/realtime/repo_replica_test.exs
@@ -1,14 +1,23 @@
defmodule Realtime.Repo.ReplicaTest do
- use ExUnit.Case
+ # application env being changed
+ use ExUnit.Case, async: false
alias Realtime.Repo.Replica
setup do
previous_platform = Application.get_env(:realtime, :platform)
previous_region = Application.get_env(:realtime, :region)
+ previous_master_region = Application.get_env(:realtime, :master_region)
+ previous_main_replica = Application.get_env(:realtime, Replica)
on_exit(fn ->
Application.put_env(:realtime, :platform, previous_platform)
Application.put_env(:realtime, :region, previous_region)
+ Application.put_env(:realtime, :master_region, previous_master_region)
+ Application.delete_env(:realtime, Replica)
+
+ if previous_main_replica do
+ Application.put_env(:realtime, Replica, previous_main_replica)
+ end
end)
end
@@ -16,12 +25,20 @@ defmodule Realtime.Repo.ReplicaTest do
for {region, mod} <- Replica.replicas_aws() do
setup do
Application.put_env(:realtime, :platform, :aws)
+ Application.put_env(:realtime, :master_region, "special-region")
+ :ok
end
test "handles #{region} region" do
Application.put_env(:realtime, :region, unquote(region))
replica_asserts(unquote(mod), Replica.replica())
end
+
+ test "defaults to Realtime.Repo if region is equal to master region on #{region}" do
+ Application.put_env(:realtime, :region, unquote(region))
+ Application.put_env(:realtime, :master_region, unquote(region))
+ replica_asserts(Realtime.Repo, Replica.replica())
+ end
end
test "defaults to Realtime.Repo if region is not configured" do
@@ -34,6 +51,8 @@ defmodule Realtime.Repo.ReplicaTest do
for {region, mod} <- Replica.replicas_fly() do
setup do
Application.put_env(:realtime, :platform, :fly)
+ Application.put_env(:realtime, :master_region, "special-region")
+ :ok
end
test "handles #{region} region" do
@@ -48,6 +67,53 @@ defmodule Realtime.Repo.ReplicaTest do
end
end
+ describe "main replica module configuration" do
+ setup do
+ Application.put_env(:realtime, Replica, hostname: "test-replica-host")
+ :ok
+ end
+
+ test "uses main replica module when configured on AWS platform" do
+ Application.put_env(:realtime, :platform, :aws)
+ Application.put_env(:realtime, :region, "us-west-1")
+ Application.put_env(:realtime, :master_region, "us-east-1")
+
+ replica_asserts(Replica, Replica.replica())
+ end
+
+ test "uses main replica module when configured on Fly platform" do
+ Application.put_env(:realtime, :platform, :fly)
+ Application.put_env(:realtime, :region, "sea")
+ Application.put_env(:realtime, :master_region, "sjc")
+
+ replica_asserts(Replica, Replica.replica())
+ end
+
+ test "still defaults to Realtime.Repo when region matches master region" do
+ Application.put_env(:realtime, :platform, :aws)
+ Application.put_env(:realtime, :region, "us-west-1")
+ Application.put_env(:realtime, :master_region, "us-west-1")
+
+ replica_asserts(Realtime.Repo, Replica.replica())
+ end
+
+ test "uses main replica module when region is unknown" do
+ Application.put_env(:realtime, :platform, :aws)
+ Application.put_env(:realtime, :region, "unknown-region")
+ Application.put_env(:realtime, :master_region, "us-east-1")
+
+ replica_asserts(Replica, Replica.replica())
+ end
+
+ test "uses main replica module without platform configuration" do
+ Application.delete_env(:realtime, :platform)
+ Application.put_env(:realtime, :region, "us-west-1")
+ Application.put_env(:realtime, :master_region, "us-east-1")
+
+ replica_asserts(Replica, Replica.replica())
+ end
+ end
+
defp replica_asserts(mod, replica) do
assert mod == replica
assert [Ecto.Repo] == replica.__info__(:attributes) |> Keyword.get(:behaviour)
diff --git a/test/realtime/rpc_test.exs b/test/realtime/rpc_test.exs
index 221cd781b..9c83d7064 100644
--- a/test/realtime/rpc_test.exs
+++ b/test/realtime/rpc_test.exs
@@ -81,8 +81,7 @@ defmodule Realtime.RpcTest do
func: :test_success,
origin_node: ^origin_node,
target_node: ^node,
- success: true,
- tenant: "123"
+ success: true
}}
end
@@ -100,8 +99,7 @@ defmodule Realtime.RpcTest do
func: :test_raise,
origin_node: ^origin_node,
target_node: ^node,
- success: false,
- tenant: "123"
+ success: false
}}
end
diff --git a/test/realtime/signal_handler_test.exs b/test/realtime/signal_handler_test.exs
index e694f0a7a..fe2e5f902 100644
--- a/test/realtime/signal_handler_test.exs
+++ b/test/realtime/signal_handler_test.exs
@@ -4,7 +4,7 @@ defmodule Realtime.SignalHandlerTest do
alias Realtime.SignalHandler
defmodule FakeHandler do
- def handle_event(:sigterm, _state), do: send(self(), :ok)
+ def handle_event(signal, _state), do: send(self(), signal)
end
setup do
@@ -20,7 +20,24 @@ defmodule Realtime.SignalHandlerTest do
assert capture_log(fn -> SignalHandler.handle_event(:sigterm, state) end) =~
"SignalHandler: :sigterm received"
- assert_receive :ok
+ assert_receive :sigterm
+ end
+
+ test "sets shutdown_in_progress on sigterm" do
+ {:ok, state} = SignalHandler.init({%{handler_mod: FakeHandler}, :ok})
+
+ capture_log(fn -> SignalHandler.handle_event(:sigterm, state) end)
+
+ assert Application.get_env(:realtime, :shutdown_in_progress) == true
+ end
+
+ test "does not set shutdown_in_progress on non-sigterm signals" do
+ Application.put_env(:realtime, :shutdown_in_progress, false)
+ {:ok, state} = SignalHandler.init({%{handler_mod: FakeHandler}, :ok})
+
+ capture_log(fn -> SignalHandler.handle_event(:sigusr1, state) end)
+
+ refute Application.get_env(:realtime, :shutdown_in_progress)
end
end
diff --git a/test/realtime/syn_handler_test.exs b/test/realtime/syn_handler_test.exs
index 2b27cf322..35664f178 100644
--- a/test/realtime/syn_handler_test.exs
+++ b/test/realtime/syn_handler_test.exs
@@ -13,8 +13,15 @@ defmodule Realtime.SynHandlerTest do
defmodule FakeConnect do
use GenServer
+ def start_link([tenant_id, region, opts]) do
+ name = {Connect, tenant_id, %{conn: nil, region: region}}
+ gen_opts = [name: {:via, :syn, name}]
+ GenServer.start_link(FakeConnect, [tenant_id, opts], gen_opts)
+ end
+
def init([tenant_id, opts]) do
- :syn.update_registry(Connect, tenant_id, fn _pid, meta -> %{meta | conn: "fake_conn"} end)
+ conn = Keyword.get(opts, :conn, "remote_conn")
+ :syn.update_registry(Connect, tenant_id, fn _pid, meta -> %{meta | conn: conn} end)
if opts[:trap_exit], do: Process.flag(:trap_exit, true)
@@ -28,125 +35,184 @@ defmodule Realtime.SynHandlerTest do
Code.eval_quoted(@aux_mod)
- defp assert_process_down(pid, reason \\ nil, timeout \\ 100) do
- ref = Process.monitor(pid)
+ # > :"main@127.0.0.11" < :"atest@127.0.0.1"
+ # false
+ # iex(2)> :erlang.phash2("tenant123", 2)
+ # 0
+ # iex(3)> :erlang.phash2("tenant999", 2)
+ # 1
+ describe "integration test with a Connect conflict name=atest" do
+ setup do
+ {:ok, pid, node} =
+ Clustered.start_disconnected(@aux_mod, name: :atest, extra_config: [{:realtime, :region, "ap-southeast-2"}])
- if reason do
- assert_receive {:DOWN, ^ref, :process, ^pid, ^reason}, timeout
- else
- assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
+ %{peer_pid: pid, node: node}
+ end
+
+ @tag tenant_id: "tenant999"
+ test "tenant hash = 1", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 1
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+ on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
+
+ log =
+ capture_log(fn ->
+ # Connect to peer node to cause a conflict on syn
+ true = Node.connect(node)
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
+
+ # Both nodes agree
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
+
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
+
+ assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
+
+ refute Process.alive?(local_pid)
+ end)
+
+ assert log =~ "stop local process: #{inspect(local_pid)}"
+ assert log =~ "Successfully stopped #{inspect(local_pid)}"
+
+ assert log =~
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
+ end
+
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+ on_exit(fn -> Process.exit(remote_pid, :kill) end)
+
+ log =
+ capture_log(fn ->
+ # Connect to peer node to cause a conflict on syn
+ true = Node.connect(node)
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
+
+ # Both nodes agree
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} = :syn.lookup(Connect, tenant_id)
+
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
+
+ refute :peer.call(peer_pid, Process, :alive?, [remote_pid])
+
+ assert Process.alive?(local_pid)
+ end)
+
+ assert log =~ "remote process will be stopped: #{inspect(remote_pid)}"
end
end
- describe "integration test with a Connect conflict" do
+ # > :"main@127.0.0.11" < :"test@127.0.0.1"
+ # true
+ # iex(2)> :erlang.phash2("tenant123", 2)
+ # 0
+ # iex(3)> :erlang.phash2("tenant999", 2)
+ # 1
+ describe "integration test with a Connect conflict name=test" do
setup do
- ensure_connect_down("dev_tenant")
- {:ok, pid, node} = Clustered.start_disconnected(@aux_mod, extra_config: [{:realtime, :region, "ap-southeast-2"}])
- Endpoint.subscribe("connect:dev_tenant")
+ {:ok, pid, node} =
+ Clustered.start_disconnected(@aux_mod, name: :test, extra_config: [{:realtime, :region, "ap-southeast-2"}])
+
%{peer_pid: pid, node: node}
end
- test "local node started first", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
- # start connect locally first
- {:ok, db_conn} = Connect.lookup_or_start_connection(external_id)
- assert Connect.ready?(external_id)
- connect = Connect.whereis(external_id)
- assert node(connect) == node()
-
- # Now let's force the remote node to start the fake Connect process
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
+ @tag tenant_id: "tenant999"
+ test "tenant hash = 1", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 1
+ Endpoint.subscribe("connect:#{tenant_id}")
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
+
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+
on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
log =
capture_log(fn ->
- Endpoint.subscribe("connect:dev_tenant")
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
# Give some time for the conflict resolution to happen on the other node
Process.sleep(500)
# Both nodes agree
- assert {^connect, %{region: "us-east-1", conn: ^db_conn}} = :syn.lookup(Connect, external_id)
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} = :syn.lookup(Connect, tenant_id)
- assert {^connect, %{region: "us-east-1", conn: ^db_conn}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^local_pid, %{region: "us-east-1", conn: "local_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
refute :peer.call(peer_pid, Process, :alive?, [remote_pid])
- assert Process.alive?(connect)
+ assert Process.alive?(local_pid)
end)
assert log =~ "remote process will be stopped: #{inspect(remote_pid)}"
end
- test "remote node started first", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
# Start remote process first
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+
on_exit(fn -> Process.exit(remote_pid, :kill) end)
# start connect locally later
- {:ok, _db_conn} = Connect.lookup_or_start_connection(external_id)
- assert Connect.ready?(external_id)
- connect = Connect.whereis(external_id)
- assert node(connect) == node()
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn"]]})
log =
capture_log(fn ->
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
- assert_process_down(connect)
- assert_receive %{event: "connect_down"}
+ # Give some time for the conflict resolution to happen on the other node
+ Process.sleep(500)
# Both nodes agree
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} = :syn.lookup(Connect, external_id)
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
- refute Process.alive?(connect)
+ refute Process.alive?(local_pid)
end)
- assert log =~ "stop local process: #{inspect(connect)}"
- assert log =~ "Successfully stopped #{inspect(connect)}"
+ assert log =~ "stop local process: #{inspect(local_pid)}"
+ assert log =~ "Successfully stopped #{inspect(local_pid)}"
assert log =~
- "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"dev_tenant\" #{inspect(connect)}"
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
end
- test "remote node started first but timed out stopping", %{node: node, peer_pid: peer_pid} do
- external_id = "dev_tenant"
+ @tag tenant_id: "tenant123"
+ test "tenant hash = 0 but timed out stopping", %{node: node, peer_pid: peer_pid, tenant_id: tenant_id} do
+ assert :erlang.phash2(tenant_id, 2) == 0
# Start remote process first
- name = {Connect, external_id, %{conn: nil, region: "ap-southeast-2"}}
- opts = [name: {:via, :syn, name}]
- {:ok, remote_pid} = :peer.call(peer_pid, GenServer, :start_link, [FakeConnect, [external_id, []], opts])
- on_exit(fn -> Process.exit(remote_pid, :brutal_kill) end)
+ {:ok, remote_pid} = :peer.call(peer_pid, FakeConnect, :start_link, [[tenant_id, "ap-southeast-2", []]])
+
+ on_exit(fn -> Process.exit(remote_pid, :kill) end)
- {:ok, local_pid} =
- start_supervised(%{
- id: self(),
- start: {GenServer, :start_link, [FakeConnect, [external_id, [trap_exit: true]], opts]}
- })
+ # start connect locally later
+ local_pid = start_supervised!({FakeConnect, [tenant_id, "us-east-1", [conn: "local_conn", trap_exit: true]]})
log =
capture_log(fn ->
# Connect to peer node to cause a conflict on syn
true = Node.connect(node)
assert_process_down(local_pid, :killed, 6000)
- assert_receive %{event: "connect_down"}
# Both nodes agree
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} =
- :peer.call(peer_pid, :syn, :lookup, [Connect, external_id])
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} =
+ :peer.call(peer_pid, :syn, :lookup, [Connect, tenant_id])
- assert {^remote_pid, %{region: "ap-southeast-2", conn: "fake_conn"}} = :syn.lookup(Connect, external_id)
+ assert {^remote_pid, %{region: "ap-southeast-2", conn: "remote_conn"}} = :syn.lookup(Connect, tenant_id)
assert :peer.call(peer_pid, Process, :alive?, [remote_pid])
@@ -157,7 +223,34 @@ defmodule Realtime.SynHandlerTest do
assert log =~ "Timed out while waiting for process #{inspect(local_pid)} to stop. Sending kill exit signal"
assert log =~
- "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"dev_tenant\" #{inspect(local_pid)}"
+ "Elixir.Realtime.Tenants.Connect terminated due to syn conflict resolution: \"#{tenant_id}\" #{inspect(local_pid)}"
+ end
+ end
+
+ describe "on_process_registered/5" do
+ test "emits telemetry event for process registration" do
+ pid = self()
+ meta = %{some: :meta}
+ reason = :normal
+
+ # Attach a test handler to capture the telemetry event
+ test_pid = self()
+ handler_id = [:test, :syn_handler, :registered]
+
+ :telemetry.attach(
+ handler_id,
+ [:syn, @mod, :registered],
+ fn event, measurements, metadata, _config ->
+ send(test_pid, {:telemetry_event, event, measurements, metadata})
+ end,
+ nil
+ )
+
+ on_exit(fn -> :telemetry.detach(handler_id) end)
+
+ assert SynHandler.on_process_registered(@mod, @name, pid, meta, reason) == :ok
+
+ assert_receive {:telemetry_event, [:syn, @mod, :registered], %{}, %{name: @name}}
end
end
@@ -166,34 +259,82 @@ defmodule Realtime.SynHandlerTest do
RealtimeWeb.Endpoint.subscribe("#{@topic}:#{@name}")
end
+ test "emits telemetry event for process unregistration" do
+ reason = :normal
+ pid = self()
+
+ # Attach a test handler to capture the telemetry event
+ test_pid = self()
+ handler_id = [:test, :syn_handler, :unregistered]
+
+ :telemetry.attach(
+ handler_id,
+ [:syn, @mod, :unregistered],
+ fn event, measurements, metadata, _config ->
+ send(test_pid, {:telemetry_event, event, measurements, metadata})
+ end,
+ nil
+ )
+
+ on_exit(fn -> :telemetry.detach(handler_id) end)
+
+ capture_log(fn ->
+ assert SynHandler.on_process_unregistered(@mod, @name, pid, %{}, reason) == :ok
+ end)
+
+ assert_receive {:telemetry_event, [:syn, @mod, :unregistered], %{}, %{name: @name}}
+
+ topic = "#{@topic}:#{@name}"
+ event = "#{@topic}_down"
+ assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: %{reason: ^reason, pid: ^pid}}
+ end
+
test "it handles :syn_conflict_resolution reason" do
reason = :syn_conflict_resolution
+ pid = self()
log =
capture_log(fn ->
- assert SynHandler.on_process_unregistered(@mod, @name, self(), %{}, reason) == :ok
+ assert SynHandler.on_process_unregistered(@mod, @name, pid, %{}, reason) == :ok
end)
topic = "#{@topic}:#{@name}"
event = "#{@topic}_down"
assert log =~ "#{@mod} terminated due to syn conflict resolution: #{inspect(@name)} #{inspect(self())}"
- assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: nil}
+ assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: %{reason: ^reason, pid: ^pid}}
end
test "it handles other reasons" do
reason = :other_reason
+ pid = self()
log =
capture_log(fn ->
- assert SynHandler.on_process_unregistered(@mod, @name, self(), %{}, reason) == :ok
+ assert SynHandler.on_process_unregistered(@mod, @name, pid, %{}, reason) == :ok
end)
topic = "#{@topic}:#{@name}"
event = "#{@topic}_down"
refute log =~ "#{@mod} terminated: #{inspect(@name)} #{node()}"
- assert_receive %Phoenix.Socket.Broadcast{topic: ^topic, event: ^event, payload: nil}, 500
+
+ assert_receive %Phoenix.Socket.Broadcast{
+ topic: ^topic,
+ event: ^event,
+ payload: %{reason: ^reason, pid: ^pid}
+ },
+ 500
+ end
+ end
+
+ defp assert_process_down(pid, reason, timeout) do
+ ref = Process.monitor(pid)
+
+ if reason do
+ assert_receive {:DOWN, ^ref, :process, ^pid, ^reason}, timeout
+ else
+ assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
end
end
end
diff --git a/test/realtime/telemetry/logger_test.exs b/test/realtime/telemetry/logger_test.exs
index 640cfc7e2..28ececf37 100644
--- a/test/realtime/telemetry/logger_test.exs
+++ b/test/realtime/telemetry/logger_test.exs
@@ -26,4 +26,10 @@ defmodule Realtime.Telemetry.LoggerTest do
end) =~ "Billing metrics: [:realtime, :connections]"
end
end
+
+ describe "handle_info/2" do
+ test "ignores unexpected messages" do
+ assert {:noreply, []} = TelemetryLogger.handle_info(:unexpected, [])
+ end
+ end
end
diff --git a/test/realtime/tenants/authorization_remote_test.exs b/test/realtime/tenants/authorization_remote_test.exs
index 53efe44ec..05a79e36c 100644
--- a/test/realtime/tenants/authorization_remote_test.exs
+++ b/test/realtime/tenants/authorization_remote_test.exs
@@ -1,6 +1,5 @@
defmodule Realtime.Tenants.AuthorizationRemoteTest do
# async: false due to usage of Clustered
- # Also using dev_tenant due to distributed test
use RealtimeWeb.ConnCase, async: false
use Mimic
@@ -16,7 +15,7 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
alias Realtime.Tenants.Authorization.Policies.PresencePolicies
alias Realtime.Tenants.Connect
- setup [:rls_context]
+ setup [:remote_rls_context]
describe "get_authorizations" do
@tag role: "authenticated",
@@ -78,8 +77,6 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
@tag role: "anon",
policies: []
test "db process is down", context do
- # Grab a remote pid that will not exist in the near future. erpc uses a new process to perform the call.
- # Once it has returned the process is not alive anymore
db_conn = :erpc.call(context.node, :erlang, :self, [])
{:error, :increase_connection_pool} =
@@ -100,8 +97,8 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
Authorization.get_read_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
for _ <- 1..10 do
{:error, :increase_connection_pool} =
@@ -110,9 +107,6 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
end)
assert log =~ "IncreaseConnectionPool: Too many database timeouts"
-
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) == 2
end
@@ -127,8 +121,8 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
Authorization.get_write_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
for _ <- 1..10 do
{:error, :increase_connection_pool} =
@@ -137,9 +131,6 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
end)
assert log =~ "IncreaseConnectionPool: Too many database timeouts"
-
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) == 2
end
end
@@ -184,8 +175,8 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
end)
Task.await_many([t1, t2], 20_000)
- # Wait for RateCounter to log
- Process.sleep(1000)
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
end)
external_id = context.tenant.external_id
@@ -236,12 +227,8 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
end
end
- defp rls_context(context) do
- tenant = Realtime.Tenants.get_tenant_by_external_id("dev_tenant")
- Connect.shutdown("dev_tenant")
- # Waiting for :syn to unregister
- Process.sleep(100)
- Realtime.RateCounter.stop("dev_tenant")
+ defp remote_rls_context(context) do
+ tenant = Containers.checkout_tenant_unboxed(run_migrations: true)
{:ok, local_db_conn} = Database.connect(tenant, "realtime_test", :stop)
topic = random_string()
@@ -249,15 +236,11 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
clean_table(local_db_conn, "realtime", "messages")
claims = %{sub: random_string(), role: context.role, exp: Joken.current_time() + 1_000}
- signer = Joken.Signer.create("HS256", "secret")
-
- jwt = Joken.generate_and_sign!(%{}, claims, signer)
authorization_context =
Authorization.build_authorization_params(%{
tenant_id: tenant.external_id,
topic: topic,
- jwt: jwt,
claims: claims,
headers: [{"header-1", "value-1"}],
role: claims.role
@@ -268,7 +251,7 @@ defmodule Realtime.Tenants.AuthorizationRemoteTest do
{:ok, node} = Clustered.start()
region = Tenants.region(tenant)
- {:ok, db_conn} = :erpc.call(node, Connect, :connect, ["dev_tenant", region])
+ {:ok, db_conn} = :erpc.call(node, Connect, :connect, [tenant.external_id, region])
assert node(db_conn) == node
diff --git a/test/realtime/tenants/authorization_test.exs b/test/realtime/tenants/authorization_test.exs
index 724e6e933..cc032e911 100644
--- a/test/realtime/tenants/authorization_test.exs
+++ b/test/realtime/tenants/authorization_test.exs
@@ -8,13 +8,13 @@ defmodule Realtime.Tenants.AuthorizationTest do
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Tenants.Authorization
alias Realtime.Tenants.Authorization.Policies
alias Realtime.Tenants.Authorization.Policies.BroadcastPolicies
alias Realtime.Tenants.Authorization.Policies.PresencePolicies
- setup [:rls_context]
+ setup [:checkout_tenant_and_connect, :rls_context]
describe "get_authorizations/3" do
@tag role: "authenticated",
@@ -51,19 +51,34 @@ defmodule Realtime.Tenants.AuthorizationTest do
@tag role: "authenticated",
policies: [:read_matching_user_role]
test "user role is exposed", context do
- # policy role is checking for "authenticated"
- # set_config is setting request.jwt.claim.role to authenticated as well
assert {:ok, %Policies{broadcast: %BroadcastPolicies{read: true, write: nil}}} =
Authorization.get_read_authorizations(%Policies{}, context.db_conn, context.authorization_context)
authorization_context = %{context.authorization_context | role: "anon"}
- # policy role is checking for "authenticated"
- # set_config is setting request.jwt.claim.role to anon
assert {:ok, %Policies{broadcast: %BroadcastPolicies{read: false, write: nil}}} =
Authorization.get_read_authorizations(%Policies{}, context.db_conn, authorization_context)
end
+ @tag role: "authenticated",
+ policies: [:authenticated_read_broadcast, :authenticated_write_broadcast]
+ test "skips presence RLS check when presence is disabled", context do
+ {:ok, policies} =
+ Authorization.get_read_authorizations(%Policies{}, context.db_conn, context.authorization_context,
+ presence_enabled?: false
+ )
+
+ {:ok, policies} =
+ Authorization.get_write_authorizations(policies, context.db_conn, context.authorization_context,
+ presence_enabled?: false
+ )
+
+ assert %Policies{
+ broadcast: %BroadcastPolicies{read: true, write: true},
+ presence: %PresencePolicies{read: false, write: false}
+ } == policies
+ end
+
@tag role: "anon",
policies: [
:authenticated_read_broadcast_and_presence,
@@ -105,9 +120,8 @@ defmodule Realtime.Tenants.AuthorizationTest do
Authorization.get_read_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
- # The next auth requests will not call the database due to being rate limited
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
reject(&Database.transaction/4)
for _ <- 1..10 do
@@ -117,10 +131,7 @@ defmodule Realtime.Tenants.AuthorizationTest do
end)
assert log =~ "IncreaseConnectionPool: Too many database timeouts"
-
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
- assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) == 2
+ assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) <= 3
end
@tag role: "anon", policies: []
@@ -135,9 +146,8 @@ defmodule Realtime.Tenants.AuthorizationTest do
Authorization.get_write_authorizations(%Policies{}, pid, context.authorization_context)
end
- # Waiting for RateCounter to limit
- Process.sleep(1100)
- # The next auth requests will not call the database due to being rate limited
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
reject(&Database.transaction/4)
for _ <- 1..10 do
@@ -147,9 +157,6 @@ defmodule Realtime.Tenants.AuthorizationTest do
end)
assert log =~ "IncreaseConnectionPool: Too many database timeouts"
-
- # Only one log message should be emitted
- # Splitting by the error message returns the error message and the rest of the log only
assert length(String.split(log, "IncreaseConnectionPool: Too many database timeouts")) == 2
end
end
@@ -192,8 +199,8 @@ defmodule Realtime.Tenants.AuthorizationTest do
end)
Task.await_many([t1, t2], 20_000)
- # Wait for RateCounter log
- Process.sleep(1000)
+ rate_counter = Realtime.Tenants.authorization_errors_per_second_rate(context.tenant)
+ RateCounterHelper.tick!(rate_counter)
end)
external_id = context.tenant.external_id
@@ -277,40 +284,6 @@ defmodule Realtime.Tenants.AuthorizationTest do
end
end
- def rls_context(context) do
- tenant = Containers.checkout_tenant(run_migrations: true)
- # Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
-
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- topic = context[:topic] || random_string()
-
- create_rls_policies(db_conn, context.policies, %{topic: topic, sub: context[:sub], role: context.role})
-
- claims = %{"sub" => context[:sub] || random_string(), "role" => context.role, "exp" => Joken.current_time() + 1_000}
-
- authorization_context =
- Authorization.build_authorization_params(%{
- tenant_id: tenant.external_id,
- topic: topic,
- claims: claims,
- headers: [{"header-1", "value-1"}],
- role: claims["role"],
- sub: claims["sub"]
- })
-
- Realtime.Tenants.Migrations.create_partitions(db_conn)
-
- on_exit(fn -> Process.exit(db_conn, :kill) end)
-
- %{
- tenant: tenant,
- topic: topic,
- db_conn: db_conn,
- authorization_context: authorization_context
- }
- end
-
defp update_db_pool_size(tenant, db_pool) do
extension = hd(tenant.extensions)
@@ -318,9 +291,8 @@ defmodule Realtime.Tenants.AuthorizationTest do
extensions = [Map.from_struct(%{extension | :settings => settings})]
- {:ok, tenant} = Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ {:ok, tenant} = Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
- # Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
end
end
diff --git a/test/realtime/tenants/batch_broadcast_test.exs b/test/realtime/tenants/batch_broadcast_test.exs
new file mode 100644
index 000000000..f5fa42764
--- /dev/null
+++ b/test/realtime/tenants/batch_broadcast_test.exs
@@ -0,0 +1,529 @@
+defmodule Realtime.Tenants.BatchBroadcastTest do
+ use RealtimeWeb.ConnCase, async: true
+ use Mimic
+
+ alias Realtime.Database
+ alias Realtime.GenCounter
+ alias Realtime.RateCounter
+ alias Realtime.Tenants
+ alias Realtime.Tenants.BatchBroadcast
+ alias Realtime.Tenants.Authorization
+ alias Realtime.Tenants.Authorization.Policies
+ alias Realtime.Tenants.Authorization.Policies.BroadcastPolicies
+ alias Realtime.Tenants.Connect
+
+ alias RealtimeWeb.TenantBroadcaster
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ Realtime.Tenants.Cache.update_cache(tenant)
+ {:ok, tenant: tenant}
+ end
+
+ describe "public message broadcasting" do
+ test "broadcasts multiple public messages successfully", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic1 = random_string()
+ topic2 = random_string()
+
+ messages = %{
+ messages: [
+ %{topic: topic1, payload: %{"data" => "test1"}, event: "event1"},
+ %{topic: topic2, payload: %{"data" => "test2"}, event: "event2"},
+ %{topic: topic1, payload: %{"data" => "test3"}, event: "event3"}
+ ]
+ }
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+
+ test "public messages do not have private prefix in topic", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, topic, _, _, _ ->
+ refute String.contains?(topic, "-private")
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+ end
+
+ describe "message ID metadata" do
+ test "includes message ID in metadata when provided", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{id: "msg-123", topic: topic, payload: %{"data" => "test"}, event: "event1"}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, _, broadcast, _, _ ->
+ assert %Phoenix.Socket.Broadcast{
+ payload: %{
+ "payload" => %{"data" => "test"},
+ "event" => "event1",
+ "type" => "broadcast",
+ "meta" => %{"id" => "msg-123"}
+ }
+ } = broadcast
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+ end
+
+ describe "super user broadcasting" do
+ test "bypasses authorization for private messages with super_user flag", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic1 = random_string()
+ topic2 = random_string()
+
+ messages = %{
+ messages: [
+ %{topic: topic1, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: topic2, payload: %{"data" => "test2"}, event: "event2", private: true}
+ ]
+ }
+
+ expect(GenCounter, :add, 2, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 2, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, true)
+ end
+
+ test "private messages have private prefix in topic", %{tenant: tenant} do
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ topic = random_string()
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, fn _, topic, _, _, _ ->
+ assert String.contains?(topic, "-private")
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, true)
+ end
+ end
+
+ describe "private message authorization" do
+ test "broadcasts private messages with valid authorization", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]}
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, 1, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, fn params -> params end)
+ |> expect(:get_write_authorizations, fn _, _ -> {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}} end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "skips private messages without authorization", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "anon"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ Authorization
+ |> expect(:build_authorization_params, 1, fn params -> params end)
+ |> expect(:get_write_authorizations, 1, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: false}}}
+ end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+
+ test "broadcasts only authorized topics in mixed authorization batch", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{
+ messages: [
+ %{topic: topic, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: random_string(), payload: %{"data" => "test2"}, event: "event2", private: true}
+ ]
+ }
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, 2, fn params -> params end)
+ |> expect(:get_write_authorizations, 2, fn
+ _, %{topic: ^topic} -> %Policies{broadcast: %BroadcastPolicies{write: true}}
+ _, _ -> %Policies{broadcast: %BroadcastPolicies{write: false}}
+ end)
+
+ # Only one topic will actually be broadcasted
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, %Phoenix.Socket.Broadcast{topic: ^topic}, _, _ ->
+ :ok
+ end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "groups messages by topic and checks authorization once per topic", %{tenant: tenant} do
+ topic_1 = random_string()
+ topic_2 = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ messages = %{
+ messages: [
+ %{topic: topic_1, payload: %{"data" => "test1"}, event: "event1", private: true},
+ %{topic: topic_2, payload: %{"data" => "test2"}, event: "event2", private: true},
+ %{topic: topic_1, payload: %{"data" => "test3"}, event: "event3", private: true}
+ ]
+ }
+
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+
+ Authorization
+ |> expect(:build_authorization_params, 2, fn params -> params end)
+ |> expect(:get_write_authorizations, 2, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}}
+ end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+ end
+
+ test "handles missing auth params for private messages", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: 0}} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ reject(&Connect.lookup_or_start_connection/1)
+
+ messages = %{
+ messages: [%{topic: "topic1", payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+ end
+
+ describe "mixed public and private messages" do
+ setup %{tenant: tenant} do
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+ %{db_conn: db_conn}
+ end
+
+ test "broadcasts both public and private messages together", %{tenant: tenant, db_conn: db_conn} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ create_rls_policies(db_conn, [:authenticated_write_broadcast], %{topic: topic})
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn
+ ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: 0}}
+
+ _ ->
+ {:ok,
+ %RateCounter{
+ avg: 0,
+ limit: %{log: true, value: 10, measurement: :sum, triggered: false, log_fn: fn -> :ok end}
+ }}
+ end)
+
+ expect(GenCounter, :add, 3, fn ^broadcast_events_key -> :ok end)
+ expect(Connect, :lookup_or_start_connection, fn _ -> {:ok, db_conn} end)
+
+ Authorization
+ |> expect(:build_authorization_params, fn params -> params end)
+ |> expect(:get_write_authorizations, fn _, _ ->
+ {:ok, %Policies{broadcast: %BroadcastPolicies{write: true}}}
+ end)
+
+ expect(TenantBroadcaster, :pubsub_broadcast, 3, fn _, _, _, _, _ -> :ok end)
+
+ messages = %{
+ messages: [
+ %{topic: "public1", payload: %{"data" => "public"}, event: "event1", private: false},
+ %{topic: topic, payload: %{"data" => "private"}, event: "event2", private: true},
+ %{topic: "public2", payload: %{"data" => "public2"}, event: "event3"}
+ ]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
+ assert length(broadcast_calls) == 3
+ end
+ end
+
+ describe "Plug.Conn integration" do
+ test "accepts and converts Plug.Conn to auth params", %{tenant: tenant} do
+ topic = random_string()
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]}
+
+ expect(GenCounter, :add, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 1, fn _, _, _, _, _ -> :ok end)
+
+ conn =
+ build_conn()
+ |> Map.put(:assigns, %{
+ claims: %{"sub" => "user123", "role" => "authenticated"},
+ role: "authenticated",
+ sub: "user123"
+ })
+ |> Map.put(:req_headers, [{"authorization", "Bearer token"}])
+
+ assert :ok = BatchBroadcast.broadcast(conn, tenant, messages, false)
+ end
+ end
+
+ describe "message validation" do
+ test "returns changeset error when topic is missing", %{tenant: tenant} do
+ messages = %{messages: [%{payload: %{"data" => "test"}, event: "event1"}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when payload is missing", %{tenant: tenant} do
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, event: "event1"}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when event is missing", %{tenant: tenant} do
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}}]}
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+
+ test "returns changeset error when messages array is empty", %{tenant: tenant} do
+ messages = %{messages: []}
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, %Ecto.Changeset{valid?: false}} = result
+ end
+ end
+
+ describe "rate limiting" do
+ test "rejects broadcast when rate limit is exceeded", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ topic = random_string()
+ messages = %{messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1"}]}
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: tenant.max_events_per_second + 1}} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ assert {:error, :too_many_requests, "You have exceeded your rate limit"} = result
+ end
+
+ test "rejects broadcast when batch would exceed rate limit", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ messages = %{
+ messages:
+ Enum.map(1..10, fn _ ->
+ %{topic: random_string(), payload: %{"data" => "test"}, event: random_string()}
+ end)
+ }
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: tenant.max_events_per_second - 5}}
+ end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert {:error, :too_many_requests, "Too many messages to broadcast, please reduce the batch size"} = result
+ end
+
+ test "allows broadcast at rate limit boundary", %{tenant: tenant} do
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+ broadcast_events_key = Tenants.events_per_second_key(tenant)
+ current_rate = tenant.max_events_per_second - 2
+
+ messages = %{
+ messages: [
+ %{topic: random_string(), payload: %{"data" => "test1"}, event: "event1"},
+ %{topic: random_string(), payload: %{"data" => "test2"}, event: "event2"}
+ ]
+ }
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate ->
+ {:ok, %RateCounter{avg: current_rate}}
+ end)
+
+ expect(GenCounter, :add, 2, fn ^broadcast_events_key -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 2, fn _, _, _, _, _ -> :ok end)
+
+ assert :ok = BatchBroadcast.broadcast(nil, tenant, messages, false)
+ end
+
+ test "rejects broadcast when payload size exceeds tenant limit", %{tenant: tenant} do
+ messages = %{
+ messages: [
+ %{
+ topic: random_string(),
+ payload: %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ event: "event1"
+ }
+ ]
+ }
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ result = BatchBroadcast.broadcast(nil, tenant, messages, false)
+
+ assert {:error,
+ %Ecto.Changeset{
+ valid?: false,
+ changes: %{messages: [%{errors: [payload: {"Payload size exceeds tenant limit", []}]}]}
+ }} = result
+ end
+ end
+
+ describe "error handling" do
+ test "returns error when tenant is nil" do
+ messages = %{messages: [%{topic: "topic1", payload: %{"data" => "test"}, event: "event1"}]}
+ assert {:error, :tenant_not_found} = BatchBroadcast.broadcast(nil, nil, messages, false)
+ end
+
+ test "gracefully handles database connection errors for private messages", %{tenant: tenant} do
+ topic = random_string()
+ sub = random_string()
+ role = "authenticated"
+
+ auth_params = %{
+ tenant_id: tenant.external_id,
+ headers: [{"header-1", "value-1"}],
+ claims: %{"sub" => sub, "role" => role, "exp" => Joken.current_time() + 1_000},
+ role: role,
+ sub: sub
+ }
+
+ events_per_second_rate = Tenants.events_per_second_rate(tenant)
+
+ RateCounter
+ |> stub(:new, fn _ -> {:ok, nil} end)
+ |> stub(:get, fn ^events_per_second_rate -> {:ok, %RateCounter{avg: 0}} end)
+
+ expect(Connect, :lookup_or_start_connection, fn _ -> {:error, :connection_failed} end)
+
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
+
+ messages = %{
+ messages: [%{topic: topic, payload: %{"data" => "test"}, event: "event1", private: true}]
+ }
+
+ assert :ok = BatchBroadcast.broadcast(auth_params, tenant, messages, false)
+
+ assert calls(&TenantBroadcaster.pubsub_broadcast/5) == []
+ end
+ end
+end
diff --git a/test/realtime/tenants/cache_test.exs b/test/realtime/tenants/cache_test.exs
index 1889c94ef..3c2e05ceb 100644
--- a/test/realtime/tenants/cache_test.exs
+++ b/test/realtime/tenants/cache_test.exs
@@ -1,11 +1,10 @@
defmodule Realtime.Tenants.CacheTest do
- alias Realtime.Rpc
- # async: false due to the usage of dev_realtime tenant
use Realtime.DataCase, async: false
alias Realtime.Api
- alias Realtime.Tenants.Cache
+ alias Realtime.Rpc
alias Realtime.Tenants
+ alias Realtime.Tenants.Cache
setup do
{:ok, tenant: tenant_fixture()}
@@ -15,10 +14,18 @@ defmodule Realtime.Tenants.CacheTest do
test "tenants cache returns a cached result", %{tenant: tenant} do
external_id = tenant.external_id
assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
- Api.update_tenant(tenant, %{name: "new name"})
+
+ changeset = Api.Tenant.changeset(tenant, %{name: "new name"})
+ Repo.update!(changeset)
assert %Api.Tenant{name: "new name"} = Tenants.get_tenant_by_external_id(external_id)
assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
end
+
+ test "does not cache when tenant is not found" do
+ assert Cache.get_tenant_by_external_id("not found") == nil
+
+ assert Cachex.exists?(Cache, {:get_tenant_by_external_id, "not found"}) == {:ok, false}
+ end
end
describe "invalidate_tenant_cache/1" do
@@ -38,43 +45,130 @@ defmodule Realtime.Tenants.CacheTest do
end
end
+ describe "update_cache/1" do
+ test "updates the cache given a tenant", %{tenant: tenant} do
+ external_id = tenant.external_id
+ assert %Api.Tenant{name: "tenant"} = Cache.get_tenant_by_external_id(external_id)
+ # Update a tenant
+ updated_tenant = %{tenant | name: "updated name"}
+ # Update cache
+ Cache.update_cache(updated_tenant)
+ assert %Api.Tenant{name: "updated name"} = Cache.get_tenant_by_external_id(external_id)
+ end
+ end
+
describe "distributed_invalidate_tenant_cache/1" do
setup do
{:ok, node} = Clustered.start()
- %{node: node}
+
+ tenant =
+ Ecto.Adapters.SQL.Sandbox.unboxed_run(Realtime.Repo, fn ->
+ tenant_fixture()
+ end)
+
+ on_exit(fn ->
+ Ecto.Adapters.SQL.Sandbox.unboxed_run(Realtime.Repo, fn ->
+ Realtime.Api.delete_tenant_by_external_id(tenant.external_id)
+ end)
+ end)
+
+ %{node: node, tenant: tenant}
+ end
+
+ test "invalidates the cache given a tenant_id", %{node: node, tenant: tenant} do
+ external_id = tenant.external_id
+ expected_name = tenant.name
+ dummy_name = random_string()
+ dummy_tenant = %{tenant | name: dummy_name}
+
+ assert {:ok, true} = Cache.update_cache(dummy_tenant)
+
+ assert {:ok, %Api.Tenant{name: ^dummy_name}} =
+ Cachex.get(Cache, {:get_tenant_by_external_id, external_id})
+
+ seed_remote_cache(node, external_id, dummy_tenant)
+
+ assert :ok = Cache.distributed_invalidate_tenant_cache(external_id)
+
+ assert_eventually(fn ->
+ %Api.Tenant{name: ^expected_name} = Cache.get_tenant_by_external_id(external_id)
+
+ %Api.Tenant{name: ^expected_name} =
+ Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
+ end)
end
+ end
+
+ describe "global_cache_update/1" do
+ setup do
+ {:ok, node} = Clustered.start()
+
+ tenant =
+ Ecto.Adapters.SQL.Sandbox.unboxed_run(Realtime.Repo, fn ->
+ tenant_fixture()
+ end)
+
+ on_exit(fn ->
+ Ecto.Adapters.SQL.Sandbox.unboxed_run(Realtime.Repo, fn ->
+ Realtime.Api.delete_tenant_by_external_id(tenant.external_id)
+ end)
+ end)
- test "invalidates the cache given a tenant_id", %{node: node} do
- external_id = "dev_tenant"
- %Api.Tenant{name: expected_name} = tenant = Tenants.get_tenant_by_external_id(external_id)
+ %{node: node, tenant: tenant}
+ end
+ test "update the cache given a tenant_id", %{node: node, tenant: tenant} do
+ external_id = tenant.external_id
+ expected_name = tenant.name
dummy_name = random_string()
+ dummy_tenant = %{tenant | name: dummy_name}
- # Ensure cache has the values
- Cachex.put!(
- Realtime.Tenants.Cache,
- {{:get_tenant_by_external_id, 1}, [external_id]},
- {:cached, %{tenant | name: dummy_name}}
- )
+ assert {:ok, true} = Cache.update_cache(dummy_tenant)
- Rpc.enhanced_call(node, Cachex, :put!, [
- Realtime.Tenants.Cache,
- {{:get_tenant_by_external_id, 1}, [external_id]},
- {:cached, %{tenant | name: dummy_name}}
- ])
+ assert {:ok, %Api.Tenant{name: ^dummy_name}} =
+ Cachex.get(Cache, {:get_tenant_by_external_id, external_id})
- # Cache showing old value
- assert %Api.Tenant{name: ^dummy_name} = Cache.get_tenant_by_external_id(external_id)
- assert %Api.Tenant{name: ^dummy_name} = Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
+ seed_remote_cache(node, external_id, dummy_tenant)
- # Invalidate cache
- assert true = Cache.distributed_invalidate_tenant_cache(external_id)
+ assert :ok = Cache.global_cache_update(tenant)
+
+ assert_eventually(fn ->
+ {:ok, %Api.Tenant{name: ^expected_name}} =
+ Cachex.get(Cache, {:get_tenant_by_external_id, external_id})
+
+ {:ok, %Api.Tenant{name: ^expected_name}} =
+ Rpc.enhanced_call(node, Cachex, :get, [Cache, {:get_tenant_by_external_id, external_id}])
+ end)
+ end
+ end
+
+ defp seed_remote_cache(node, external_id, tenant, attempts \\ 20) do
+ Rpc.enhanced_call(node, Cache, :update_cache, [tenant])
- # Cache showing new value
- assert %Api.Tenant{name: ^expected_name} = Cache.get_tenant_by_external_id(external_id)
+ case Rpc.enhanced_call(node, Cachex, :get, [Cache, {:get_tenant_by_external_id, external_id}]) do
+ {:ok, %Api.Tenant{external_id: ^external_id, name: name}} when name == tenant.name ->
+ :ok
- assert %Api.Tenant{name: ^expected_name} =
- Rpc.enhanced_call(node, Cache, :get_tenant_by_external_id, [external_id])
+ _other when attempts > 0 ->
+ Process.sleep(50)
+ seed_remote_cache(node, external_id, tenant, attempts - 1)
+
+ other ->
+ flunk("Failed to seed remote cache after retries, last result: #{inspect(other)}")
end
end
+
+ defp assert_eventually(fun, attempts \\ 50, interval \\ 100)
+
+ defp assert_eventually(fun, 0, _interval) do
+ fun.()
+ end
+
+ defp assert_eventually(fun, attempts, interval) do
+ fun.()
+ rescue
+ _ ->
+ Process.sleep(interval)
+ assert_eventually(fun, attempts - 1, interval)
+ end
end
diff --git a/test/realtime/tenants/connect/reconcile_migrations_test.exs b/test/realtime/tenants/connect/reconcile_migrations_test.exs
new file mode 100644
index 000000000..19f023802
--- /dev/null
+++ b/test/realtime/tenants/connect/reconcile_migrations_test.exs
@@ -0,0 +1,39 @@
+defmodule Realtime.Tenants.Connect.ReconcileMigrationsTest do
+ use Realtime.DataCase, async: true
+
+ alias Realtime.Tenants.Connect.ReconcileMigrations
+ alias Realtime.Tenants.Migrations
+
+ setup do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ %{tenant: tenant}
+ end
+
+ describe "run/1" do
+ test "does nothing when migrations_ran matches database count", %{tenant: tenant} do
+ acc = %{tenant: tenant, migrations_ran_on_database: tenant.migrations_ran}
+
+ assert {:ok, %{tenant: returned_tenant}} = ReconcileMigrations.run(acc)
+ assert returned_tenant.migrations_ran == tenant.migrations_ran
+ end
+
+ test "updates tenant when database has fewer migrations than cached count", %{tenant: tenant} do
+ stale_count = tenant.migrations_ran - 5
+ acc = %{tenant: tenant, migrations_ran_on_database: stale_count}
+
+ assert {:ok, %{tenant: updated_tenant}} = ReconcileMigrations.run(acc)
+ assert updated_tenant.migrations_ran == stale_count
+ end
+
+ test "updates tenant when database has more migrations than cached count", %{tenant: tenant} do
+ {:ok, tenant} =
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{migrations_ran: 0})
+
+ total = Enum.count(Migrations.migrations())
+ acc = %{tenant: tenant, migrations_ran_on_database: total}
+
+ assert {:ok, %{tenant: updated_tenant}} = ReconcileMigrations.run(acc)
+ assert updated_tenant.migrations_ran == total
+ end
+ end
+end
diff --git a/test/realtime/tenants/connect/register_process_test.exs b/test/realtime/tenants/connect/register_process_test.exs
index d4227996f..02cc33391 100644
--- a/test/realtime/tenants/connect/register_process_test.exs
+++ b/test/realtime/tenants/connect/register_process_test.exs
@@ -7,7 +7,7 @@ defmodule Realtime.Tenants.Connect.RegisterProcessTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, conn} = Database.connect(tenant, "realtime_test")
%{tenant_id: tenant.external_id, db_conn_pid: conn}
end
diff --git a/test/realtime/tenants/connect_test.exs b/test/realtime/tenants/connect_test.exs
index 290fb1c8d..66c04634d 100644
--- a/test/realtime/tenants/connect_test.exs
+++ b/test/realtime/tenants/connect_test.exs
@@ -50,7 +50,50 @@ defmodule Realtime.Tenants.ConnectTest do
end
end
+ describe "list_tenants/0" do
+ test "lists all tenants with active connections", %{tenant: tenant1} do
+ tenant2 = Containers.checkout_tenant(run_migrations: true)
+ assert {:ok, _} = Connect.lookup_or_start_connection(tenant1.external_id)
+ assert {:ok, _} = Connect.lookup_or_start_connection(tenant2.external_id)
+
+ list_tenants = Connect.list_tenants() |> MapSet.new()
+ tenants = MapSet.new([tenant1.external_id, tenant2.external_id])
+
+ assert MapSet.subset?(tenants, list_tenants)
+ end
+ end
+
describe "handle cold start" do
+ test "multiple processes connecting calling Connect.connect", %{tenant: tenant} do
+ parent = self()
+
+ # Let's slow down Connect.connect so that multiple RPC calls are executed
+ stub(Connect, :connect, fn x, y, z ->
+ :timer.sleep(1000)
+ call_original(Connect, :connect, [x, y, z])
+ end)
+
+ connect = fn -> send(parent, Connect.lookup_or_start_connection(tenant.external_id)) end
+ # Let's call enough times to potentially trigger the Connect RateCounter
+
+ for _ <- 1..50, do: spawn(connect)
+
+ assert_receive({:ok, pid}, 2000)
+
+ for _ <- 1..49, do: assert_receive({:ok, ^pid})
+
+ # Does not trigger rate limit as connections eventually succeeded
+
+ {:ok, rate_counter} =
+ tenant.external_id
+ |> Tenants.connect_errors_per_second_rate()
+ |> Realtime.RateCounter.get()
+
+ assert rate_counter.sum == 0
+ assert rate_counter.avg == 0.0
+ assert rate_counter.limit.triggered == false
+ end
+
test "multiple proccesses succeed together", %{tenant: tenant} do
parent = self()
@@ -78,12 +121,55 @@ defmodule Realtime.Tenants.ConnectTest do
assert_receive {:ok, ^pid}
end
- test "more than 5 seconds passed error out", %{tenant: tenant} do
+ test "more than 15 seconds passed error out", %{tenant: tenant} do
parent = self()
# Let's slow down Connect starting
expect(Database, :check_tenant_connection, fn t ->
- :timer.sleep(5500)
+ Process.sleep(15500)
+ call_original(Database, :check_tenant_connection, [t])
+ end)
+
+ connect = fn -> send(parent, Connect.lookup_or_start_connection(tenant.external_id)) end
+
+ spawn(connect)
+ spawn(connect)
+
+ {:error, :initializing} = Connect.lookup_or_start_connection(tenant.external_id)
+ # The above call waited 15 seconds
+ assert_receive {:error, :initializing}
+ assert_receive {:error, :initializing}
+
+ # This one will succeed
+ {:ok, _pid} = Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ test "too many db connections", %{tenant: tenant} do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => false,
+ "db_pool" => 100,
+ "subcriber_pool_size" => 100,
+ "subs_pool_size" => 100
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ parent = self()
+
+ # Let's slow down Connect starting
+ expect(Database, :check_tenant_connection, fn t ->
+ :timer.sleep(1000)
call_original(Database, :check_tenant_connection, [t])
end)
@@ -97,12 +183,13 @@ defmodule Realtime.Tenants.ConnectTest do
spawn(connect)
spawn(connect)
- {:error, :tenant_database_unavailable} = Connect.lookup_or_start_connection(tenant.external_id)
+ # This one should block and wait for the first Connect
+ {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
- # Only one will succeed the others timed out waiting
- assert_receive {:error, :tenant_database_unavailable}
- assert_receive {:error, :tenant_database_unavailable}
- assert_receive {:ok, _pid}, 7000
+ assert_receive {:error, :tenant_db_too_many_connections}
+ assert_receive {:error, :tenant_db_too_many_connections}
+ assert_receive {:error, :tenant_db_too_many_connections}
+ refute_receive _any
end
end
@@ -113,11 +200,9 @@ defmodule Realtime.Tenants.ConnectTest do
log =
capture_log(fn ->
assert {:ok, db_conn} = Connect.lookup_or_start_connection(external_id, check_connect_region_interval: 100)
-
expect(Rebalancer, :check, 1, fn _, _, ^external_id -> {:error, :wrong_region} end)
reject(&Rebalancer.check/3)
-
- assert_process_down(db_conn, 500, {:shutdown, :rebalancing})
+ assert_process_down(db_conn, 1000, {:shutdown, :rebalancing})
end)
assert log =~ "Rebalancing Tenant database connection"
@@ -253,10 +338,9 @@ defmodule Realtime.Tenants.ConnectTest do
{:ok, db_conn} = Connect.lookup_or_start_connection(external_id, check_connected_user_interval: 10)
region = Tenants.region(tenant)
assert {_pid, %{conn: ^db_conn, region: ^region}} = :syn.lookup(Connect, external_id)
+ Beacon.leave(:users, external_id, self())
Process.sleep(1000)
- :syn.leave(:users, external_id, self())
- Process.sleep(1000)
- assert :undefined = :syn.lookup(Connect, external_id)
+ refute Beacon.local_member?(:users, external_id, self())
refute Process.alive?(db_conn)
Connect.shutdown(external_id)
end
@@ -267,6 +351,34 @@ defmodule Realtime.Tenants.ConnectTest do
assert {:error, :tenant_suspended} = Connect.lookup_or_start_connection(tenant.external_id)
end
+ test "tenant not able to connect if database has not enough connections", %{
+ tenant: tenant
+ } do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => false,
+ "db_pool" => 100,
+ "subcriber_pool_size" => 100,
+ "subs_pool_size" => 100
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ assert capture_log(fn ->
+ assert {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
+ end) =~ ~r/Only \d+ available connections\. At least \d+ connections are required/
+ end
+
test "handles tenant suspension and unsuspension in a reactive way", %{tenant: tenant} do
assert {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
@@ -338,6 +450,30 @@ defmodule Realtime.Tenants.ConnectTest do
refute Process.alive?(pid)
end
+ test "reconciles migrations_ran when database count differs from cached value", %{tenant: tenant} do
+ total_migrations = Enum.count(Realtime.Tenants.Migrations.migrations())
+ stale_count = tenant.migrations_ran - 5
+ parent = self()
+
+ expect(Database, :check_tenant_connection, fn t ->
+ {:ok, conn, _actual_count} = call_original(Database, :check_tenant_connection, [t])
+ {:ok, conn, stale_count}
+ end)
+
+ expect(Realtime.Tenants.Migrations, :run_migrations, fn tenant ->
+ send(parent, {:migrations_ran_at_run, tenant.migrations_ran})
+ call_original(Realtime.Tenants.Migrations, :run_migrations, [tenant])
+ end)
+
+ assert {:ok, _db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert Connect.ready?(tenant.external_id)
+
+ assert_receive {:migrations_ran_at_run, ^stale_count}
+
+ updated_tenant = Tenants.get_tenant_by_external_id(tenant.external_id)
+ assert updated_tenant.migrations_ran == total_migrations
+ end
+
test "starts broadcast handler and does not fail on existing connection", %{tenant: tenant} do
assert {:ok, _db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
@@ -345,48 +481,97 @@ defmodule Realtime.Tenants.ConnectTest do
replication_connection_before = ReplicationConnection.whereis(tenant.external_id)
assert Process.alive?(replication_connection_before)
+ assert {:ok, replication_conn_pid_before} = Connect.replication_status(tenant.external_id)
+
assert {:ok, _db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
replication_connection_after = ReplicationConnection.whereis(tenant.external_id)
assert Process.alive?(replication_connection_after)
assert replication_connection_before == replication_connection_after
+
+ assert {:ok, replication_conn_pid_after} = Connect.replication_status(tenant.external_id)
+ assert replication_conn_pid_before == replication_conn_pid_after
end
- test "on replication connection postgres pid being stopped, also kills the Connect module", %{tenant: tenant} do
+ test "on replication connection postgres pid being stopped, Connect module recovers it", %{tenant: tenant} do
assert {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+ Process.monitor(replication_connection_pid)
+
assert Process.alive?(replication_connection_pid)
pid = Connect.whereis(tenant.external_id)
+ assert {:ok, replication_conn_before} = Connect.replication_status(tenant.external_id)
+
Postgrex.query!(
db_conn,
"SELECT pg_terminate_backend(pid) from pg_stat_activity where application_name='realtime_replication_connection'",
[]
)
- assert_process_down(replication_connection_pid)
- assert_process_down(pid)
+ assert_receive {:DOWN, _, :process, ^replication_connection_pid, _}
+
+ Process.sleep(100)
+ assert {:error, :not_connected} = Connect.replication_status(tenant.external_id)
+
+ new_replication_connection_pid = assert_pid(fn -> ReplicationConnection.whereis(tenant.external_id) end)
+
+ assert replication_connection_pid != new_replication_connection_pid
+ assert Process.alive?(new_replication_connection_pid)
+ assert Process.alive?(pid)
+
+ assert {:ok, replication_conn_after} = assert_replication_status(tenant.external_id)
+ assert replication_conn_before != replication_conn_after
end
- test "on replication connection exit, also kills the Connect module", %{tenant: tenant} do
+ test "on replication connection exit, Connect module recovers it", %{tenant: tenant} do
assert {:ok, _db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
replication_connection_pid = ReplicationConnection.whereis(tenant.external_id)
+ Process.monitor(replication_connection_pid)
assert Process.alive?(replication_connection_pid)
pid = Connect.whereis(tenant.external_id)
+
+ assert {:ok, replication_conn_before} = Connect.replication_status(tenant.external_id)
+
Process.exit(replication_connection_pid, :kill)
+ assert_receive {:DOWN, _, :process, ^replication_connection_pid, _}
- assert_process_down(replication_connection_pid)
- assert_process_down(pid)
+ Process.sleep(100)
+ assert {:error, :not_connected} = Connect.replication_status(tenant.external_id)
+
+ new_replication_connection_pid = assert_pid(fn -> ReplicationConnection.whereis(tenant.external_id) end)
+
+ assert replication_connection_pid != new_replication_connection_pid
+ assert Process.alive?(new_replication_connection_pid)
+ assert Process.alive?(pid)
+
+ assert {:ok, replication_conn_after} = assert_replication_status(tenant.external_id)
+ assert replication_conn_before != replication_conn_after
+ end
+
+ test "handles replication connection timeout by logging and shutting down", %{tenant: tenant} do
+ expect(ReplicationConnection, :start, fn _tenant, _pid ->
+ {:error, :replication_connection_timeout}
+ end)
+
+ log =
+ capture_log(fn ->
+ assert {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert_process_down(db_conn)
+ end)
+
+ assert log =~ "ReplicationConnectionTimeout"
+ assert log =~ "Replication connection timed out during initialization"
end
test "handles max_wal_senders by logging the correct operational code", %{tenant: tenant} do
opts = tenant |> Database.from_tenant("realtime_test", :stop) |> Database.opts()
+ parent = self()
- # This creates a loop of errors that occupies all WAL senders and lets us test the error handling
pids =
for i <- 0..4 do
replication_slot_opts =
@@ -402,6 +587,7 @@ defmodule Realtime.Tenants.ConnectTest do
spawn(fn ->
{:ok, pid} = PostgresReplication.start_link(replication_slot_opts)
+ send(parent, {:replication_ready, i})
receive do
:stop -> Process.exit(pid, :kill)
@@ -409,6 +595,8 @@ defmodule Realtime.Tenants.ConnectTest do
end)
end
+ for i <- 0..4, do: assert_receive({:replication_ready, ^i}, 5000)
+
on_exit(fn ->
Enum.each(pids, &send(&1, :stop))
Process.sleep(2000)
@@ -429,6 +617,53 @@ defmodule Realtime.Tenants.ConnectTest do
assert capture_log(fn -> assert {:error, :rpc_error, _} = Connect.lookup_or_start_connection("tenant") end) =~
"project=tenant external_id=tenant [error] ErrorOnRpcCall"
end
+
+ test "rate limit connect when too many connections against bad database", %{tenant: tenant} do
+ extension = %{
+ "type" => "postgres_cdc_rls",
+ "settings" => %{
+ "db_host" => "127.0.0.1",
+ "db_name" => "postgres",
+ "db_user" => "supabase_admin",
+ "db_password" => "postgres",
+ "poll_interval" => 100,
+ "poll_max_changes" => 100,
+ "poll_max_record_bytes" => 1_048_576,
+ "region" => "us-east-1",
+ "ssl_enforced" => true
+ }
+ }
+
+ {:ok, tenant} = update_extension(tenant, extension)
+
+ log =
+ capture_log(fn ->
+ res =
+ for _ <- 1..10 do
+ Process.sleep(250)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ assert Enum.any?(res, fn {_, res} -> res == :connect_rate_limit_reached end)
+ end)
+
+ assert log =~ "DatabaseConnectionRateLimitReached: Too many connection attempts against the tenant database"
+ end
+
+ test "rate limit connect will not trigger if connection is successful", %{tenant: tenant} do
+ log =
+ capture_log(fn ->
+ res =
+ for _ <- 1..20 do
+ Process.sleep(500)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ end
+
+ refute Enum.any?(res, fn {_, res} -> res == :tenant_db_too_many_connections end)
+ end)
+
+ refute log =~ "DatabaseConnectionRateLimitReached: Too many connection attempts against the tenant database"
+ end
end
describe "shutdown/1" do
@@ -441,6 +676,9 @@ defmodule Realtime.Tenants.ConnectTest do
assert Process.alive?(connect_pid)
assert Process.alive?(replication_connection_pid)
+ assert {_, %{conn: ^db_conn}} = :syn.lookup(Connect, tenant.external_id)
+ assert {:ok, _replication_conn_pid} = Connect.replication_status(tenant.external_id)
+
Connect.shutdown(tenant.external_id)
assert_process_down(connect_pid)
assert_process_down(replication_connection_pid)
@@ -449,30 +687,6 @@ defmodule Realtime.Tenants.ConnectTest do
test "if tenant does not exist, does nothing" do
assert :ok = Connect.shutdown("none")
end
-
- test "tenant not able to connect if database has not enough connections", %{tenant: tenant} do
- extension = %{
- "type" => "postgres_cdc_rls",
- "settings" => %{
- "db_host" => "127.0.0.1",
- "db_name" => "postgres",
- "db_user" => "supabase_admin",
- "db_password" => "postgres",
- "poll_interval" => 100,
- "poll_max_changes" => 100,
- "poll_max_record_bytes" => 1_048_576,
- "region" => "us-east-1",
- "ssl_enforced" => false,
- "db_pool" => 100,
- "subcriber_pool_size" => 100,
- "subs_pool_size" => 100
- }
- }
-
- {:ok, tenant} = update_extension(tenant, extension)
-
- assert {:error, :tenant_db_too_many_connections} = Connect.lookup_or_start_connection(tenant.external_id)
- end
end
describe "registers into local registry" do
@@ -519,6 +733,36 @@ defmodule Realtime.Tenants.ConnectTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions})
+ end
+
+ defp assert_pid(call, attempts \\ 10)
+
+ defp assert_pid(_call, 0) do
+ raise "Timeout waiting for pid"
+ end
+
+ defp assert_pid(call, attempts) do
+ case call.() do
+ pid when is_pid(pid) ->
+ pid
+
+ _ ->
+ Process.sleep(500)
+ assert_pid(call, attempts - 1)
+ end
+ end
+
+ defp assert_replication_status(tenant_id, attempts \\ 20)
+
+ defp assert_replication_status(tenant_id, 0) do
+ Connect.replication_status(tenant_id)
+ end
+
+ defp assert_replication_status(tenant_id, attempts) do
+ case Connect.replication_status(tenant_id) do
+ {:ok, _} = result -> result
+ _ -> Process.sleep(500) && assert_replication_status(tenant_id, attempts - 1)
+ end
end
end
diff --git a/test/realtime/tenants/janitor/maintenance_task_test.exs b/test/realtime/tenants/janitor/maintenance_task_test.exs
index f4c51436e..5d4aea474 100644
--- a/test/realtime/tenants/janitor/maintenance_task_test.exs
+++ b/test/realtime/tenants/janitor/maintenance_task_test.exs
@@ -4,20 +4,26 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
alias Realtime.Tenants.Janitor.MaintenanceTask
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
%{tenant: tenant}
end
test "cleans messages older than 72 hours and creates partitions", %{tenant: tenant} do
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+
utc_now = NaiveDateTime.utc_now()
limit = NaiveDateTime.add(utc_now, -72, :hour)
+ date_start = Date.utc_today() |> Date.add(-10)
+ date_end = Date.utc_today()
+ create_messages_partitions(conn, date_start, date_end)
+
messages =
for days <- -5..0 do
inserted_at = NaiveDateTime.add(utc_now, days, :day)
@@ -27,12 +33,11 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
assert MaintenanceTask.run(tenant.external_id) == :ok
- {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
{:ok, res} = Repo.all(conn, from(m in Message), Message)
verify_partitions(conn)
@@ -63,7 +68,7 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
tenant = tenant_fixture(%{extensions: extensions})
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
Process.flag(:trap_exit, true)
@@ -80,7 +85,7 @@ defmodule Realtime.Tenants.Janitor.MaintenanceTaskTest do
defp verify_partitions(conn) do
today = Date.utc_today()
- yesterday = Date.add(today, -1)
+ yesterday = Date.add(today, -3)
future = Date.add(today, 3)
dates = Date.range(yesterday, future)
diff --git a/test/realtime/tenants/janitor_test.exs b/test/realtime/tenants/janitor_test.exs
index 4ac1a0eda..aa32b86f8 100644
--- a/test/realtime/tenants/janitor_test.exs
+++ b/test/realtime/tenants/janitor_test.exs
@@ -6,9 +6,9 @@ defmodule Realtime.Tenants.JanitorTest do
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
alias Realtime.Tenants.Janitor
alias Realtime.Tenants.Connect
+ alias Realtime.Tenants.Repo
setup do
:ets.delete_all_objects(Connect)
@@ -24,13 +24,21 @@ defmodule Realtime.Tenants.JanitorTest do
Enum.map(
[tenant1, tenant2],
fn tenant ->
- tenant = Repo.preload(tenant, :extensions)
+ tenant = Realtime.Repo.preload(tenant, :extensions)
Connect.lookup_or_start_connection(tenant.external_id)
Process.sleep(500)
tenant
end
)
+ date_start = Date.utc_today() |> Date.add(-10)
+ date_end = Date.utc_today()
+
+ Enum.map(tenants, fn tenant ->
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+ create_messages_partitions(conn, date_start, date_end)
+ end)
+
start_supervised!(
{Task.Supervisor,
name: Realtime.Tenants.Janitor.TaskSupervisor, max_children: 5, max_seconds: 500, max_restarts: 1}
@@ -62,7 +70,7 @@ defmodule Realtime.Tenants.JanitorTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
start_supervised!(Janitor)
@@ -105,7 +113,7 @@ defmodule Realtime.Tenants.JanitorTest do
to_keep =
messages
- |> Enum.reject(&(NaiveDateTime.compare(limit, &1.inserted_at) == :gt))
+ |> Enum.reject(&(NaiveDateTime.compare(NaiveDateTime.beginning_of_day(limit), &1.inserted_at) == :gt))
|> MapSet.new()
start_supervised!(Janitor)
@@ -162,7 +170,7 @@ defmodule Realtime.Tenants.JanitorTest do
defp verify_partitions(conn) do
today = Date.utc_today()
- yesterday = Date.add(today, -1)
+ yesterday = Date.add(today, -3)
future = Date.add(today, 3)
dates = Date.range(yesterday, future)
diff --git a/test/realtime/tenants/rebalancer_test.exs b/test/realtime/tenants/rebalancer_test.exs
index ac8e1ea36..d91e7e675 100644
--- a/test/realtime/tenants/rebalancer_test.exs
+++ b/test/realtime/tenants/rebalancer_test.exs
@@ -9,7 +9,7 @@ defmodule Realtime.Tenants.RebalancerTest do
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
%{tenant: tenant}
end
diff --git a/test/realtime/tenants/replication_connection/watchdog_test.exs b/test/realtime/tenants/replication_connection/watchdog_test.exs
new file mode 100644
index 000000000..7c8f0e527
--- /dev/null
+++ b/test/realtime/tenants/replication_connection/watchdog_test.exs
@@ -0,0 +1,149 @@
+defmodule Realtime.Tenants.ReplicationConnection.WatchdogTest do
+ use ExUnit.Case, async: true
+
+ import ExUnit.CaptureLog
+
+ alias Realtime.Tenants.ReplicationConnection.Watchdog
+
+ defmodule FakeReplicationConnection do
+ def child_spec(opts) do
+ %{id: __MODULE__, start: {__MODULE__, :start_link, [opts]}, type: :worker, restart: :temporary, shutdown: 500}
+ end
+
+ def start_link(opts \\ []), do: :gen_statem.start_link(__MODULE__, opts, [])
+
+ def callback_mode, do: :state_functions
+
+ def init(opts) do
+ respond_to_health_checks = Keyword.get(opts, :respond_to_health_checks, true)
+ delay_ms = Keyword.get(opts, :delay_ms, 0)
+
+ data = %{
+ respond_to_health_checks: respond_to_health_checks,
+ delay_ms: delay_ms,
+ health_check_count: 0
+ }
+
+ {:ok, :idle, data}
+ end
+
+ def idle({:call, from}, :health_check, %{respond_to_health_checks: true, delay_ms: delay_ms} = data) do
+ if delay_ms > 0 do
+ Process.sleep(delay_ms)
+ end
+
+ :gen_statem.reply(from, :ok)
+ {:keep_state, %{data | health_check_count: data.health_check_count + 1}}
+ end
+
+ def idle({:call, _from}, :health_check, %{respond_to_health_checks: false} = data) do
+ # Don't reply - this will cause a timeout
+ {:keep_state, %{data | health_check_count: data.health_check_count + 1}}
+ end
+
+ def idle({:call, from}, :get_health_check_count, data) do
+ :gen_statem.reply(from, data.health_check_count)
+ {:keep_state, data}
+ end
+
+ def idle({:call, from}, :set_no_respond, data) do
+ :gen_statem.reply(from, :ok)
+ {:keep_state, %{data | respond_to_health_checks: false}}
+ end
+
+ def get_health_check_count(pid), do: :gen_statem.call(pid, :get_health_check_count)
+
+ def set_no_respond(pid), do: :gen_statem.call(pid, :set_no_respond)
+ end
+
+ test "performs periodic health checks successfully" do
+ fake_pid = start_link_supervised!(FakeReplicationConnection)
+
+ watchdog_pid =
+ start_supervised!(
+ {Watchdog, parent_pid: fake_pid, tenant_id: "test-tenant", watchdog_interval: 50, watchdog_timeout: 100}
+ )
+
+ # Wait for at least 2 health check cycles
+ Process.sleep(150)
+
+ assert Process.alive?(watchdog_pid)
+ assert Process.alive?(fake_pid)
+
+ # Verify health checks were performed
+ count = FakeReplicationConnection.get_health_check_count(fake_pid)
+ assert count >= 2
+ end
+
+ describe "timeout handling" do
+ test "stops when health check times out" do
+ # Create a fake process that doesn't respond to health checks
+ fake_pid = start_supervised!({FakeReplicationConnection, respond_to_health_checks: false})
+
+ logs =
+ capture_log(fn ->
+ watchdog_pid =
+ start_supervised!(
+ {Watchdog, parent_pid: fake_pid, tenant_id: "test-tenant", watchdog_interval: 50, watchdog_timeout: 100}
+ )
+
+ ref = Process.monitor(watchdog_pid)
+
+ # Wait for the first health check to timeout
+ assert_receive {:DOWN, ^ref, :process, ^watchdog_pid, :watchdog_timeout}, 500
+ refute Process.alive?(watchdog_pid)
+ end)
+
+ assert logs =~ "ReplicationConnectionWatchdogTimeout"
+ assert logs =~ "ReplicationConnection is not responding"
+ end
+
+ test "stops immediately if health check takes longer than timeout" do
+ # Create a fake process with a 200ms delay
+ fake_pid = start_supervised!({FakeReplicationConnection, delay_ms: 200})
+
+ logs =
+ capture_log(fn ->
+ watchdog_pid =
+ start_supervised!(
+ {Watchdog, parent_pid: fake_pid, tenant_id: "timeout-test", watchdog_interval: 50, watchdog_timeout: 100}
+ )
+
+ ref = Process.monitor(watchdog_pid)
+
+ # Should timeout because delay (200ms) > timeout (100ms)
+ assert_receive {:DOWN, ^ref, :process, ^watchdog_pid, :watchdog_timeout}, 500
+ end)
+
+ assert logs =~ "ReplicationConnectionWatchdogTimeout"
+ end
+ end
+
+ describe "dynamic behavior changes" do
+ test "handles transition from healthy to timeout" do
+ # Start with responding, then stop responding
+ fake_pid = start_supervised!(FakeReplicationConnection)
+
+ watchdog_pid =
+ start_supervised!(
+ {Watchdog, parent_pid: fake_pid, tenant_id: "test-tenant", watchdog_interval: 50, watchdog_timeout: 100}
+ )
+
+ # Wait for first successful health check
+ Process.sleep(80)
+ assert Process.alive?(watchdog_pid)
+
+ ref = Process.monitor(watchdog_pid)
+ # Now make the fake process stop responding
+ FakeReplicationConnection.set_no_respond(fake_pid)
+
+ logs =
+ capture_log(fn ->
+ # Should timeout on next health check
+ assert_receive {:DOWN, ^ref, :process, ^watchdog_pid, :watchdog_timeout}, 500
+ end)
+
+ assert logs =~ "ReplicationConnectionWatchdogTimeout"
+ end
+ end
+end
diff --git a/test/realtime/tenants/replication_connection_test.exs b/test/realtime/tenants/replication_connection_test.exs
index 783270313..8745f32a6 100644
--- a/test/realtime/tenants/replication_connection_test.exs
+++ b/test/realtime/tenants/replication_connection_test.exs
@@ -11,6 +11,9 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
alias Realtime.Tenants
alias Realtime.Tenants.ReplicationConnection
alias RealtimeWeb.Endpoint
+ alias Realtime.Tenants.Repo
+
+ @replication_slot_name "supabase_realtime_messages_replication_slot_test"
setup do
slot = Application.get_env(:realtime, :slot_name_suffix)
@@ -20,11 +23,10 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant = Containers.checkout_tenant(run_migrations: true)
{:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- name = "supabase_realtime_messages_replication_slot_test"
- Postgrex.query(db_conn, "SELECT pg_drop_replication_slot($1)", [name])
- Process.exit(db_conn, :normal)
+ Integrations.setup_postgres_changes(db_conn)
+ Postgrex.query(db_conn, "SELECT pg_drop_replication_slot($1)", [@replication_slot_name])
- %{tenant: tenant}
+ %{tenant: tenant, db_conn: db_conn}
end
describe "temporary process" do
@@ -70,7 +72,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert {:error, _} = ReplicationConnection.start(tenant, self())
end
- test "starts a handler for the tenant and broadcasts", %{tenant: tenant} do
+ test "starts a handler for the tenant and broadcasts", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -98,8 +100,8 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
payload = %{
"event" => "INSERT",
+ "meta" => %{"id" => row.id},
"payload" => %{
- "id" => row.id,
"value" => value
},
"type" => "broadcast"
@@ -121,8 +123,89 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
})
end
- {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- {:ok, _} = Realtime.Repo.insert_all_entries(db_conn, messages, Message)
+ {:ok, _} = Repo.insert_all_entries(db_conn, messages, Message)
+
+ messages_received =
+ for _ <- 1..total_messages, into: [] do
+ assert_receive {:socket_push, :text, data}
+ data |> IO.iodata_to_binary() |> Jason.decode!()
+ end
+
+ for row <- messages do
+ assert Enum.count(messages_received, fn message_received ->
+ value = row |> Map.from_struct() |> get_in([:changes, :payload, "value"])
+
+ match?(
+ %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "INSERT",
+ "meta" => %{"id" => _id},
+ "payload" => %{
+ "value" => ^value
+ }
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ },
+ message_received
+ )
+ end) == 1
+ end
+ end
+
+ test "starts a handler for the tenant and broadcasts to public channel", %{tenant: tenant, db_conn: db_conn} do
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = random_string()
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, true)
+ subscribe(tenant_topic, topic)
+
+ total_messages = 5
+ # Works with one insert per transaction
+ for _ <- 1..total_messages do
+ value = random_string()
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => false,
+ "event" => "INSERT",
+ "payload" => %{"value" => value}
+ })
+
+ assert_receive {:socket_push, :text, data}
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ payload = %{
+ "event" => "INSERT",
+ "meta" => %{"id" => row.id},
+ "payload" => %{
+ "value" => value
+ },
+ "type" => "broadcast"
+ }
+
+ assert message == %{"event" => "broadcast", "payload" => payload, "ref" => nil, "topic" => topic}
+ end
+
+ Process.sleep(500)
+ # Works with batch inserts
+ messages =
+ for _ <- 1..total_messages do
+ Message.changeset(%Message{}, %{
+ "topic" => topic,
+ "private" => false,
+ "event" => "INSERT",
+ "extension" => "broadcast",
+ "payload" => %{"value" => random_string()}
+ })
+ end
+
+ {:ok, _} = Repo.insert_all_entries(db_conn, messages, Message)
messages_received =
for _ <- 1..total_messages, into: [] do
@@ -139,8 +222,8 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
"event" => "broadcast",
"payload" => %{
"event" => "INSERT",
+ "meta" => %{"id" => _id},
"payload" => %{
- "id" => _,
"value" => ^value
}
},
@@ -153,6 +236,113 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
end
end
+ test "replicates binary with exactly 16 bytes to test UUID conversion error", %{tenant: tenant} do
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = "db:job_scheduler"
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ subscribe(tenant_topic, topic)
+ payload = %{"value" => random_string()}
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => true,
+ "event" => "UPDATE",
+ "extension" => "broadcast",
+ "payload" => payload
+ })
+
+ row_id = row.id
+
+ assert_receive {:socket_push, :text, data}, 2000
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ assert %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "UPDATE",
+ "meta" => %{"id" => ^row_id},
+ "payload" => received_payload,
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ } = message
+
+ assert received_payload == payload
+ end
+
+ test "should not process unsupported relations", %{tenant: tenant, db_conn: db_conn} do
+ # update
+ queries = [
+ "DROP TABLE IF EXISTS public.test",
+ """
+ CREATE TABLE "public"."test" (
+ "id" int4 NOT NULL default nextval('test_id_seq'::regclass),
+ "details" text,
+ PRIMARY KEY ("id"));
+ """
+ ]
+
+ Postgrex.transaction(db_conn, fn conn ->
+ Enum.each(queries, &Postgrex.query!(conn, &1, []))
+ end)
+
+ logs =
+ capture_log(fn ->
+ start_link_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert_publication_contains_only_messages(db_conn, "supabase_realtime_messages_publication")
+
+ # Add table to publication to test the error handling
+ Postgrex.query!(db_conn, "ALTER PUBLICATION supabase_realtime_messages_publication ADD TABLE public.test", [])
+ %{rows: [[_id]]} = Postgrex.query!(db_conn, "insert into test (details) values ('test') returning id", [])
+
+ topic = "db:job_scheduler"
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ subscribe(tenant_topic, topic)
+ payload = %{"value" => random_string()}
+
+ row =
+ message_fixture(tenant, %{
+ "topic" => topic,
+ "private" => true,
+ "event" => "UPDATE",
+ "extension" => "broadcast",
+ "payload" => payload
+ })
+
+ row_id = row.id
+
+ assert_receive {:socket_push, :text, data}, 2000
+ message = data |> IO.iodata_to_binary() |> Jason.decode!()
+
+ assert %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "UPDATE",
+ "meta" => %{"id" => ^row_id},
+ "payload" => received_payload,
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => ^topic
+ } = message
+
+ assert received_payload == payload
+ end)
+
+ assert logs =~ "Unexpected relation on schema 'public' and table 'test'"
+ end
+
test "monitored pid stopping brings down ReplicationConnection ", %{tenant: tenant} do
monitored_pid =
spawn(fn ->
@@ -204,7 +394,32 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert logs =~ "UnableToBroadcastChanges"
end
- test "payload without id", %{tenant: tenant} do
+ test "message that exceeds payload size logs error", %{tenant: tenant} do
+ logs =
+ capture_log(fn ->
+ start_supervised!(
+ {ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
+ restart: :transient
+ )
+
+ topic = random_string()
+ tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
+ assert :ok = Endpoint.subscribe(tenant_topic)
+
+ message_fixture(tenant, %{
+ "event" => random_string(),
+ "topic" => random_string(),
+ "private" => true,
+ "payload" => %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)}
+ })
+
+ refute_receive %Phoenix.Socket.Broadcast{}, 500
+ end)
+
+ assert logs =~ "UnableToBroadcastChanges: %{messages: [%{payload: [\"Payload size exceeds tenant limit\"]}]}"
+ end
+
+ test "payload without id", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -214,33 +429,39 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
subscribe(tenant_topic, topic)
- fixture =
- message_fixture(tenant, %{
- "topic" => topic,
- "private" => true,
- "event" => "INSERT",
- "payload" => %{"value" => "something"}
- })
+ value = "something"
+ event = "INSERT"
+
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text)::jsonb, $2 :: text, $3 :: text, TRUE::bool);",
+ [value, event, topic]
+ )
+
+ {:ok, [%{id: id}]} = Repo.all(db_conn, from(m in Message), Message)
assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{
"event" => "broadcast",
- "payload" => %{"event" => "INSERT", "payload" => payload, "type" => "broadcast"},
+ "payload" => %{
+ "event" => "INSERT",
+ "meta" => %{"id" => ^id},
+ "payload" => payload,
+ "type" => "broadcast"
+ },
"ref" => nil,
"topic" => ^topic
} = message
- id = fixture.id
-
assert payload == %{
"value" => "something",
"id" => id
}
end
- test "payload including id", %{tenant: tenant} do
+ test "payload including id", %{tenant: tenant, db_conn: db_conn} do
start_link_supervised!(
{ReplicationConnection, %ReplicationConnection{tenant_id: tenant.external_id, monitored_pid: self()}},
restart: :transient
@@ -250,21 +471,29 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
tenant_topic = Tenants.tenant_topic(tenant.external_id, topic, false)
subscribe(tenant_topic, topic)
- payload = %{"value" => "something", "id" => "123456"}
+ id = "123456"
+ value = "something"
+ event = "INSERT"
- message_fixture(tenant, %{
- "topic" => topic,
- "private" => true,
- "event" => "INSERT",
- "payload" => payload
- })
+ Postgrex.query!(
+ db_conn,
+ "SELECT realtime.send (json_build_object ('value', $1 :: text, 'id', $2 :: text)::jsonb, $3 :: text, $4 :: text, TRUE::bool);",
+ [value, id, event, topic]
+ )
+
+ {:ok, [%{id: message_id}]} = Repo.all(db_conn, from(m in Message), Message)
assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{
"event" => "broadcast",
- "payload" => %{"event" => "INSERT", "payload" => ^payload, "type" => "broadcast"},
+ "payload" => %{
+ "meta" => %{"id" => ^message_id},
+ "event" => "INSERT",
+ "payload" => %{"value" => "something", "id" => ^id},
+ "type" => "broadcast"
+ },
"ref" => nil,
"topic" => ^topic
} = message
@@ -272,7 +501,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
test "fails on existing replication slot", %{tenant: tenant} do
{:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
- name = "supabase_realtime_messages_replication_slot_test"
+ name = @replication_slot_name
Postgrex.query!(db_conn, "SELECT pg_create_logical_replication_slot($1, 'test_decoding')", [name])
@@ -288,7 +517,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
call_original(ReplicationConnection, :init, [arg])
end)
- {:error, :timeout} = ReplicationConnection.start(tenant, self(), 100)
+ assert {:error, :replication_connection_timeout} = ReplicationConnection.start(tenant, self(), 100)
end
test "handle standby connections exceeds max_wal_senders", %{tenant: tenant} do
@@ -331,6 +560,118 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
assert {:error, :max_wal_senders_reached} = ReplicationConnection.start(tenant, self())
end
+
+ test "handles WAL pressure gracefully", %{tenant: tenant} do
+ {:ok, replication_pid} = ReplicationConnection.start(tenant, self())
+
+ {:ok, conn} = Database.connect(tenant, "realtime_test", :stop)
+ on_exit(fn -> Process.exit(conn, :normal) end)
+
+ large_payload = String.duplicate("x", 10 * 1024 * 1024)
+
+ for i <- 1..5 do
+ message_fixture_with_conn(tenant, conn, %{
+ "topic" => "stress_#{i}",
+ "private" => true,
+ "event" => "INSERT",
+ "payload" => %{"data" => large_payload}
+ })
+ end
+
+ assert Process.alive?(replication_pid)
+ end
+ end
+
+ describe "publication validation steps" do
+ test "if proper tables are included, starts replication", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name} FOR TABLE realtime.messages", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ refute logs =~ "Recreating"
+ end
+
+ test "if includes unexpected tables, recreates publication", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS public.wrong_table (id int)", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name} FOR TABLE public.wrong_table", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
+
+ test "recreates publication if it has no tables", %{tenant: tenant, db_conn: db_conn} do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE PUBLICATION #{publication_name}", [])
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
+
+ test "recreates publication if it has expected tables and unexpected tables under same publication", %{
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ publication_name = "supabase_realtime_messages_publication"
+
+ Postgrex.query!(db_conn, "DROP PUBLICATION IF EXISTS #{publication_name}", [])
+ Postgrex.query!(db_conn, "CREATE TABLE IF NOT EXISTS public.extra_table (id int)", [])
+
+ Postgrex.query!(
+ db_conn,
+ "CREATE PUBLICATION #{publication_name} FOR TABLE realtime.messages, public.extra_table",
+ []
+ )
+
+ logs =
+ capture_log(fn ->
+ {:ok, pid} = ReplicationConnection.start(tenant, self())
+
+ assert_replication_started(db_conn, @replication_slot_name)
+ assert Process.alive?(pid)
+ assert_publication_contains_only_messages(db_conn, publication_name)
+
+ Process.exit(pid, :shutdown)
+ end)
+
+ assert logs =~ "Recreating"
+ end
end
describe "whereis/1" do
@@ -378,7 +719,7 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
"payload" => %{"value" => random_string()}
})
- assert_receive {:socket_push, :text, data}
+ assert_receive {:socket_push, :text, data}, 500
message = data |> IO.iodata_to_binary() |> Jason.decode!()
assert %{"event" => "broadcast", "payload" => _, "ref" => nil, "topic" => ^topic} = message
@@ -409,4 +750,59 @@ defmodule Realtime.Tenants.ReplicationConnectionTest do
ref = Process.monitor(pid)
assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
end
+
+ defp message_fixture_with_conn(_tenant, conn, override) do
+ create_attrs = %{
+ "topic" => random_string(),
+ "extension" => "broadcast"
+ }
+
+ override = override |> Enum.map(fn {k, v} -> {"#{k}", v} end) |> Map.new()
+
+ {:ok, message} =
+ create_attrs
+ |> Map.merge(override)
+ |> TenantConnection.create_message(conn)
+
+ message
+ end
+
+ defp assert_publication_contains_only_messages(db_conn, publication_name) do
+ %{rows: rows} =
+ Postgrex.query!(
+ db_conn,
+ "SELECT schemaname, tablename FROM pg_publication_tables WHERE pubname = $1",
+ [publication_name]
+ )
+
+ valid_tables =
+ Enum.all?(rows, fn [schema, table] ->
+ schema == "realtime" and (table == "messages" or String.starts_with?(table, "messages_"))
+ end)
+
+ assert valid_tables, "Expected only realtime.messages or its partitions, got: #{inspect(rows)}"
+ end
+
+ defp assert_replication_started(db_conn, slot_name, retries \\ 10, interval_ms \\ 10) do
+ case check_replication_status(db_conn, slot_name, retries, interval_ms) do
+ :ok -> :ok
+ :error -> flunk("Replication slot #{slot_name} did not become active")
+ end
+ end
+
+ defp check_replication_status(_db_conn, _slot_name, 0, _interval_ms), do: :error
+
+ defp check_replication_status(db_conn, slot_name, retries_remaining, interval_ms) do
+ %{rows: rows} =
+ Postgrex.query!(db_conn, "SELECT active FROM pg_replication_slots WHERE slot_name = $1", [slot_name])
+
+ case rows do
+ [[true]] ->
+ :ok
+
+ _ ->
+ Process.sleep(interval_ms)
+ check_replication_status(db_conn, slot_name, retries_remaining - 1, interval_ms)
+ end
+ end
end
diff --git a/test/realtime/repo_test.exs b/test/realtime/tenants/repo_test.exs
similarity index 99%
rename from test/realtime/repo_test.exs
rename to test/realtime/tenants/repo_test.exs
index 7d6841b01..697274494 100644
--- a/test/realtime/repo_test.exs
+++ b/test/realtime/tenants/repo_test.exs
@@ -1,10 +1,10 @@
-defmodule Realtime.RepoTest do
+defmodule Realtime.Tenants.RepoTest do
use Realtime.DataCase, async: true
import Ecto.Query
alias Realtime.Api.Message
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Database
setup do
diff --git a/test/realtime/tenants_test.exs b/test/realtime/tenants_test.exs
index aefe0b86c..202facdb5 100644
--- a/test/realtime/tenants_test.exs
+++ b/test/realtime/tenants_test.exs
@@ -89,15 +89,6 @@ defmodule Realtime.TenantsTest do
end
end
- describe "update_migrations_ran/1" do
- test "updates migrations_ran to the count of all migrations" do
- tenant = tenant_fixture(%{migrations_ran: 0})
- Tenants.update_migrations_ran(tenant.external_id, 1)
- tenant = Repo.reload!(tenant)
- assert tenant.migrations_ran == 1
- end
- end
-
describe "broadcast_operation_event/2" do
setup do
tenant = tenant_fixture()
diff --git a/test/realtime/user_counter_test.exs b/test/realtime/user_counter_test.exs
deleted file mode 100644
index d93529764..000000000
--- a/test/realtime/user_counter_test.exs
+++ /dev/null
@@ -1,74 +0,0 @@
-defmodule Realtime.UsersCounterTest do
- use Realtime.DataCase, async: false
- alias Realtime.UsersCounter
- alias Realtime.Rpc
-
- describe "add/1" do
- test "starts counter for tenant" do
- assert UsersCounter.add(self(), random_string()) == :ok
- end
- end
-
- @aux_mod (quote do
- defmodule Aux do
- def ping(),
- do:
- spawn(fn ->
- Process.sleep(3000)
- :pong
- end)
- end
- end)
-
- Code.eval_quoted(@aux_mod)
-
- describe "tenant_users/1" do
- test "returns count of connected clients for tenant on cluster node" do
- tenant_id = random_string()
- expected = generate_load(tenant_id)
- Process.sleep(1000)
- assert UsersCounter.tenant_users(tenant_id) == expected
- end
- end
-
- describe "tenant_users/2" do
- test "returns count of connected clients for tenant on target cluster" do
- tenant_id = random_string()
- generate_load(tenant_id)
- {:ok, node} = Clustered.start(@aux_mod)
- pid = Rpc.call(node, Aux, :ping, [])
- UsersCounter.add(pid, tenant_id)
- assert UsersCounter.tenant_users(node, tenant_id) == 1
- end
- end
-
- defp generate_load(tenant_id, nodes \\ 2, processes \\ 2) do
- for i <- 1..nodes do
- # Avoid port collision
- extra_config = [
- {:gen_rpc, :tcp_server_port, 15970 + i}
- ]
-
- {:ok, node} = Clustered.start(@aux_mod, extra_config: extra_config, phoenix_port: 4012 + i)
-
- for _ <- 1..processes do
- pid = Rpc.call(node, Aux, :ping, [])
-
- for _ <- 1..10 do
- # replicate same pid added multiple times concurrently
- Task.start(fn ->
- UsersCounter.add(pid, tenant_id)
- end)
-
- # noisy neighbors to test handling of bigger loads on concurrent calls
- Task.start(fn ->
- pid = Rpc.call(node, Aux, :ping, [])
- UsersCounter.add(pid, random_string())
- end)
- end
- end
- end
-
- nodes * processes
- end
-end
diff --git a/test/realtime/users_counter_test.exs b/test/realtime/users_counter_test.exs
new file mode 100644
index 000000000..796fa1567
--- /dev/null
+++ b/test/realtime/users_counter_test.exs
@@ -0,0 +1,155 @@
+defmodule Realtime.UsersCounterTest do
+ use Realtime.DataCase, async: false
+ alias Realtime.UsersCounter
+ alias Realtime.Rpc
+
+ setup_all do
+ tenant_id = random_string()
+ count = generate_load(tenant_id)
+
+ %{tenant_id: tenant_id, count: count, nodes: Node.list()}
+ end
+
+ describe "already_counted?/2" do
+ test "returns true if pid already counted for tenant", %{tenant_id: tenant_id} do
+ pid = self()
+ assert UsersCounter.add(pid, tenant_id) == :ok
+ assert UsersCounter.already_counted?(pid, tenant_id) == true
+ end
+
+ test "returns false if pid not counted for tenant" do
+ assert UsersCounter.already_counted?(self(), random_string()) == false
+ end
+ end
+
+ describe "add/1" do
+ test "starts counter for tenant" do
+ assert UsersCounter.add(self(), random_string()) == :ok
+ end
+ end
+
+ describe "local_tenants/0" do
+ test "returns list of tenant ids with local connections" do
+ tenant_id = random_string()
+ assert UsersCounter.add(self(), tenant_id) == :ok
+
+ tenants = UsersCounter.local_tenants()
+ assert is_list(tenants)
+ assert tenant_id in tenants
+ end
+ end
+
+ @aux_mod (quote do
+ defmodule Aux do
+ def ping() do
+ spawn(fn -> Process.sleep(:infinity) end)
+ end
+
+ def join(pid, group) do
+ UsersCounter.add(pid, group)
+ end
+ end
+ end)
+
+ Code.eval_quoted(@aux_mod)
+
+ describe "tenant_counts/0" do
+ test "map of tenant and number of users", %{tenant_id: tenant_id, count: expected} do
+ assert UsersCounter.add(self(), tenant_id) == :ok
+ Process.sleep(1000)
+ counts = UsersCounter.tenant_counts()
+
+ assert counts[tenant_id] == expected + 1
+ assert map_size(counts) >= 61
+
+ counts = Beacon.local_member_counts(:users)
+
+ assert counts[tenant_id] == 1
+ assert map_size(counts) >= 1
+
+ counts = Beacon.member_counts(:users)
+
+ assert counts[tenant_id] == expected + 1
+ assert map_size(counts) >= 61
+ end
+ end
+
+ describe "local_tenant_counts/0" do
+ test "map of tenant and number of users for local node only", %{tenant_id: tenant_id} do
+ assert UsersCounter.add(self(), tenant_id) == :ok
+
+ my_counts = UsersCounter.local_tenant_counts()
+ # Only one connection from this test process on this node
+ assert my_counts == %{tenant_id => 1}
+ end
+ end
+
+ describe "tenant_users/1" do
+ test "returns count of connected clients for tenant on cluster node", %{tenant_id: tenant_id, count: expected} do
+ Process.sleep(1000)
+ assert UsersCounter.tenant_users(tenant_id) == expected
+ end
+ end
+
+ defp generate_load(tenant_id) do
+ processes = 2
+
+ gen_rpc_port = Application.fetch_env!(:gen_rpc, :tcp_server_port)
+
+ nodes = %{
+ node() => gen_rpc_port,
+ :"us_node@127.0.0.1" => 16980,
+ :"ap2_nodeX@127.0.0.1" => 16981,
+ :"ap2_nodeY@127.0.0.1" => 16982
+ }
+
+ regions = %{
+ :"us_node@127.0.0.1" => "us-east-1",
+ :"ap2_nodeX@127.0.0.1" => "ap-southeast-2",
+ :"ap2_nodeY@127.0.0.1" => "ap-southeast-2"
+ }
+
+ on_exit(fn -> Application.put_env(:gen_rpc, :client_config_per_node, {:internal, %{}}) end)
+ Application.put_env(:gen_rpc, :client_config_per_node, {:internal, nodes})
+
+ nodes
+ |> Enum.filter(fn {node, _port} -> node != Node.self() end)
+ |> Enum.with_index(1)
+ |> Enum.each(fn {{node, gen_rpc_port}, i} ->
+ # Avoid port collision
+ extra_config = [
+ {:gen_rpc, :tcp_server_port, gen_rpc_port},
+ {:gen_rpc, :client_config_per_node, {:internal, nodes}},
+ {:realtime, :users_scope_broadcast_interval_in_ms, 100},
+ {:realtime, :region, regions[node]}
+ ]
+
+ node_name =
+ node
+ |> to_string()
+ |> String.split("@")
+ |> hd()
+ |> String.to_atom()
+
+ {:ok, node} = Clustered.start(@aux_mod, name: node_name, extra_config: extra_config, phoenix_port: 4012 + i)
+
+ for _ <- 1..processes do
+ pid = Rpc.call(node, Aux, :ping, [])
+
+ for _ <- 1..10 do
+ # replicate same pid added multiple times concurrently
+ Task.start(fn ->
+ Rpc.call(node, Aux, :join, [pid, tenant_id])
+ end)
+
+ # noisy neighbors to test handling of bigger loads on concurrent calls
+ Task.start(fn ->
+ Rpc.call(node, Aux, :join, [pid, random_string()])
+ end)
+ end
+ end
+ end)
+
+ 3 * processes
+ end
+end
diff --git a/test/realtime_web/channels/auth/jwt_verification_test.exs b/test/realtime_web/channels/auth/jwt_verification_test.exs
index b6255ee1f..c67edbefa 100644
--- a/test/realtime_web/channels/auth/jwt_verification_test.exs
+++ b/test/realtime_web/channels/auth/jwt_verification_test.exs
@@ -376,5 +376,62 @@ defmodule RealtimeWeb.JwtVerificationTest do
assert {:error, :error_generating_signer} = JwtVerification.verify(token, jwt_secret, jwks)
end
+
+ test "using Ed25519 JWK" do
+ # Generate Ed25519 key pair
+ {pub, priv} = :crypto.generate_key(:eddsa, :ed25519)
+
+ jwk = %{
+ "kty" => "OKP",
+ "crv" => "Ed25519",
+ "x" => Base.url_encode64(pub, padding: false),
+ "d" => Base.url_encode64(priv, padding: false),
+ "kid" => "ed-key-1"
+ }
+
+ jwks = %{"keys" => [jwk]}
+
+ signer = Joken.Signer.create("Ed25519", jwk, %{"kid" => "ed-key-1"})
+
+ Mock.freeze()
+ current_time = Mock.current_time()
+
+ token =
+ Joken.generate_and_sign!(
+ %{"exp" => %Joken.Claim{generate: fn -> current_time + 100 end}},
+ %{},
+ signer
+ )
+
+ assert {:ok, _claims} = JwtVerification.verify(token, @jwt_secret, jwks)
+ end
+
+ test "returns error for unsupported algorithm with kid and jwks" do
+ header = Base.url_encode64(Jason.encode!(%{"alg" => "PS256", "kid" => "key-1"}), padding: false)
+ claims = Base.url_encode64(Jason.encode!(%{"exp" => 9_999_999_999}), padding: false)
+ token = "#{header}.#{claims}.signature"
+
+ jwks = %{"keys" => [%{"kty" => "RSA", "kid" => "key-1"}]}
+
+ assert {:error, _} = JwtVerification.verify(token, @jwt_secret, jwks)
+ end
+
+ test "falls back to jwt_secret when HS256 kid has no matching JWK" do
+ Mock.freeze()
+ current_time = Mock.current_time()
+
+ signer = Joken.Signer.create("HS256", @jwt_secret)
+
+ token =
+ Joken.generate_and_sign!(
+ %{"exp" => %Joken.Claim{generate: fn -> current_time + 100 end}},
+ %{},
+ signer
+ )
+
+ jwks = %{"keys" => [%{"kty" => "oct", "kid" => "wrong-kid"}]}
+
+ assert {:ok, _claims} = JwtVerification.verify(token, @jwt_secret, jwks)
+ end
end
end
diff --git a/test/realtime_web/channels/payloads/flexible_boolean_test.exs b/test/realtime_web/channels/payloads/flexible_boolean_test.exs
new file mode 100644
index 000000000..cb0704ab4
--- /dev/null
+++ b/test/realtime_web/channels/payloads/flexible_boolean_test.exs
@@ -0,0 +1,72 @@
+defmodule RealtimeWeb.Channels.Payloads.FlexibleBooleanTest do
+ use ExUnit.Case, async: true
+
+ alias RealtimeWeb.Channels.Payloads.FlexibleBoolean
+
+ describe "type/0" do
+ test "returns :boolean" do
+ assert FlexibleBoolean.type() == :boolean
+ end
+ end
+
+ describe "cast/1" do
+ test "casts boolean true as-is" do
+ assert FlexibleBoolean.cast(true) == {:ok, true}
+ end
+
+ test "casts boolean false as-is" do
+ assert FlexibleBoolean.cast(false) == {:ok, false}
+ end
+
+ test "casts string 'true' in any case to boolean true" do
+ assert FlexibleBoolean.cast("true") == {:ok, true}
+ assert FlexibleBoolean.cast("True") == {:ok, true}
+ assert FlexibleBoolean.cast("TRUE") == {:ok, true}
+ assert FlexibleBoolean.cast("tRuE") == {:ok, true}
+ end
+
+ test "casts string 'false' in any case to boolean false" do
+ assert FlexibleBoolean.cast("false") == {:ok, false}
+ assert FlexibleBoolean.cast("False") == {:ok, false}
+ assert FlexibleBoolean.cast("FALSE") == {:ok, false}
+ assert FlexibleBoolean.cast("fAlSe") == {:ok, false}
+ end
+
+ test "returns error for invalid string values" do
+ assert FlexibleBoolean.cast("test") == :error
+ assert FlexibleBoolean.cast("yes") == :error
+ assert FlexibleBoolean.cast("no") == :error
+ assert FlexibleBoolean.cast("1") == :error
+ assert FlexibleBoolean.cast("0") == :error
+ assert FlexibleBoolean.cast("") == :error
+ end
+
+ test "returns error for non-boolean, non-string values" do
+ assert FlexibleBoolean.cast(1) == :error
+ assert FlexibleBoolean.cast(0) == :error
+ assert FlexibleBoolean.cast(nil) == :error
+ assert FlexibleBoolean.cast(%{}) == :error
+ assert FlexibleBoolean.cast([]) == :error
+ end
+ end
+
+ describe "load/1" do
+ test "loads boolean values" do
+ assert FlexibleBoolean.load(true) == {:ok, true}
+ assert FlexibleBoolean.load(false) == {:ok, false}
+ end
+ end
+
+ describe "dump/1" do
+ test "dumps boolean values" do
+ assert FlexibleBoolean.dump(true) == {:ok, true}
+ assert FlexibleBoolean.dump(false) == {:ok, false}
+ end
+
+ test "returns error for non-boolean values" do
+ assert FlexibleBoolean.dump("true") == :error
+ assert FlexibleBoolean.dump(1) == :error
+ assert FlexibleBoolean.dump(nil) == :error
+ end
+ end
+end
diff --git a/test/realtime_web/channels/payloads/join_test.exs b/test/realtime_web/channels/payloads/join_test.exs
index 32bf1b397..6c025b9bd 100644
--- a/test/realtime_web/channels/payloads/join_test.exs
+++ b/test/realtime_web/channels/payloads/join_test.exs
@@ -6,6 +6,7 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
alias RealtimeWeb.Channels.Payloads.Join
alias RealtimeWeb.Channels.Payloads.Config
alias RealtimeWeb.Channels.Payloads.Broadcast
+ alias RealtimeWeb.Channels.Payloads.Broadcast.Replay
alias RealtimeWeb.Channels.Payloads.Presence
alias RealtimeWeb.Channels.Payloads.PostgresChange
@@ -17,7 +18,7 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
config = %{
"config" => %{
"private" => false,
- "broadcast" => %{"ack" => false, "self" => false},
+ "broadcast" => %{"ack" => false, "self" => false, "replay" => %{"since" => 1, "limit" => 10}},
"presence" => %{"enabled" => true, "key" => key},
"postgres_changes" => [
%{"event" => "INSERT", "schema" => "public", "table" => "users", "filter" => "id=eq.1"},
@@ -37,8 +38,9 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
postgres_changes: postgres_changes
} = config
- assert %Broadcast{ack: false, self: false} = broadcast
+ assert %Broadcast{ack: false, self: false, replay: replay} = broadcast
assert %Presence{enabled: true, key: ^key} = presence
+ assert %Replay{since: 1, limit: 10} = replay
assert [
%PostgresChange{event: "INSERT", schema: "public", table: "users", filter: "id=eq.1"},
@@ -56,6 +58,25 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
assert is_binary(key)
end
+ test "presence key can be number" do
+ config = %{"config" => %{"presence" => %{"enabled" => true, "key" => 123}}}
+
+ assert {:ok, %Join{config: %Config{presence: %Presence{key: key}}}} = Join.validate(config)
+
+ assert key == 123
+ end
+
+ test "invalid replay" do
+ config = %{"config" => %{"broadcast" => %{"replay" => 123}}}
+
+ assert {
+ :error,
+ :invalid_join_payload,
+ %{config: %{broadcast: %{replay: ["unable to parse, expected a map"]}}}
+ } =
+ Join.validate(config)
+ end
+
test "missing enabled presence defaults to true" do
config = %{"config" => %{"presence" => %{}}}
@@ -92,5 +113,202 @@ defmodule RealtimeWeb.Channels.Payloads.JoinTest do
user_token: ["unable to parse, expected string"]
}
end
+
+ test "handles postgres changes with nil value in array as empty array" do
+ config = %{"config" => %{"postgres_changes" => [nil]}}
+
+ assert {:ok, %Join{config: %Config{postgres_changes: []}}} = Join.validate(config)
+ end
+
+ test "handles postgres changes as nil as empty array" do
+ config = %{"config" => %{"postgres_changes" => nil}}
+
+ assert {:ok, %Join{config: %Config{postgres_changes: []}}} = Join.validate(config)
+ end
+
+ test "accepts string 'true' for boolean fields" do
+ config = %{
+ "config" => %{
+ "private" => "true",
+ "broadcast" => %{"ack" => "true", "self" => "true"},
+ "presence" => %{"enabled" => "true"}
+ }
+ }
+
+ assert {:ok, %Join{config: config_result}} = Join.validate(config)
+
+ assert %Config{
+ private: true,
+ broadcast: %Broadcast{ack: true, self: true},
+ presence: %Presence{enabled: true}
+ } = config_result
+ end
+
+ test "accepts string 'True' for boolean fields" do
+ config = %{
+ "config" => %{
+ "private" => "True",
+ "broadcast" => %{"ack" => "True", "self" => "True"},
+ "presence" => %{"enabled" => "True"}
+ }
+ }
+
+ assert {:ok, %Join{config: config_result}} = Join.validate(config)
+
+ assert %Config{
+ private: true,
+ broadcast: %Broadcast{ack: true, self: true},
+ presence: %Presence{enabled: true}
+ } = config_result
+ end
+
+ test "accepts string 'false' for boolean fields" do
+ config = %{
+ "config" => %{
+ "private" => "false",
+ "broadcast" => %{"ack" => "false", "self" => "false"},
+ "presence" => %{"enabled" => "false"}
+ }
+ }
+
+ assert {:ok, %Join{config: config_result}} = Join.validate(config)
+
+ assert %Config{
+ private: false,
+ broadcast: %Broadcast{ack: false, self: false},
+ presence: %Presence{enabled: false}
+ } = config_result
+ end
+
+ test "accepts string 'False' for boolean fields" do
+ config = %{
+ "config" => %{
+ "private" => "False",
+ "broadcast" => %{"ack" => "False", "self" => "False"},
+ "presence" => %{"enabled" => "False"}
+ }
+ }
+
+ assert {:ok, %Join{config: config_result}} = Join.validate(config)
+
+ assert %Config{
+ private: false,
+ broadcast: %Broadcast{ack: false, self: false},
+ presence: %Presence{enabled: false}
+ } = config_result
+ end
+
+ test "rejects invalid boolean strings" do
+ config = %{
+ "config" => %{
+ "private" => "yes",
+ "broadcast" => %{"ack" => "a", "self" => "b"},
+ "presence" => %{"enabled" => "no"}
+ }
+ }
+
+ assert {:error, :invalid_join_payload, errors} = Join.validate(config)
+
+ assert errors == %{
+ config: %{
+ private: ["unable to parse, expected boolean"],
+ broadcast: %{
+ ack: ["unable to parse, expected boolean"],
+ self: ["unable to parse, expected boolean"]
+ },
+ presence: %{enabled: ["unable to parse, expected boolean"]}
+ }
+ }
+ end
+ end
+
+ describe "presence_enabled?/1" do
+ test "returns enabled value from config" do
+ join = %Join{config: %Config{presence: %Presence{enabled: false}}}
+ refute Join.presence_enabled?(join)
+
+ join = %Join{config: %Config{presence: %Presence{enabled: true}}}
+ assert Join.presence_enabled?(join)
+ end
+
+ test "defaults to true when config is nil" do
+ assert Join.presence_enabled?(%Join{config: nil})
+ end
+
+ test "defaults to true for non-Join struct" do
+ assert Join.presence_enabled?(nil)
+ end
+ end
+
+ describe "presence_key/1" do
+ test "returns UUID when key is empty string" do
+ join = %Join{config: %Config{presence: %Presence{key: ""}}}
+ key = Join.presence_key(join)
+ assert is_binary(key)
+ assert key != ""
+ end
+
+ test "returns the configured key" do
+ join = %Join{config: %Config{presence: %Presence{key: "my_key"}}}
+ assert Join.presence_key(join) == "my_key"
+ end
+
+ test "returns UUID for non-matching struct" do
+ key = Join.presence_key(%Join{config: nil})
+ assert is_binary(key)
+ assert key != ""
+ end
+ end
+
+ describe "ack_broadcast?/1" do
+ test "returns ack value from config" do
+ join = %Join{config: %Config{broadcast: %Broadcast{ack: true}}}
+ assert Join.ack_broadcast?(join)
+
+ join = %Join{config: %Config{broadcast: %Broadcast{ack: false}}}
+ refute Join.ack_broadcast?(join)
+ end
+
+ test "defaults to false when config is nil" do
+ refute Join.ack_broadcast?(%Join{config: nil})
+ end
+ end
+
+ describe "self_broadcast?/1" do
+ test "returns self value from config" do
+ join = %Join{config: %Config{broadcast: %Broadcast{self: true}}}
+ assert Join.self_broadcast?(join)
+
+ join = %Join{config: %Config{broadcast: %Broadcast{self: false}}}
+ refute Join.self_broadcast?(join)
+ end
+
+ test "defaults to false when config is nil" do
+ refute Join.self_broadcast?(%Join{config: nil})
+ end
+ end
+
+ describe "private?/1" do
+ test "returns private value from config" do
+ join = %Join{config: %Config{private: true}}
+ assert Join.private?(join)
+
+ join = %Join{config: %Config{private: false}}
+ refute Join.private?(join)
+ end
+
+ test "defaults to false when config is nil" do
+ refute Join.private?(%Join{config: nil})
+ end
+ end
+
+ describe "error_message/2" do
+ test "returns message with type when type is present" do
+ assert Join.error_message(:field, type: :string) == "unable to parse, expected string"
+ end
+
+ test "returns generic message when type is not present" do
+ assert Join.error_message(:field, []) == "unable to parse"
+ end
end
end
diff --git a/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs b/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
index 2cd7005df..b2aa9b90e 100644
--- a/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
+++ b/test/realtime_web/channels/realtime_channel/broadcast_handler_test.exs
@@ -1,5 +1,8 @@
defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
- use Realtime.DataCase, async: true
+ use Realtime.DataCase,
+ async: true,
+ parameterize: [%{serializer: Phoenix.Socket.V1.JSONSerializer}, %{serializer: RealtimeWeb.Socket.V2Serializer}]
+
use Mimic
import Generators
@@ -17,26 +20,27 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
setup [:initiate_tenant]
+ @payload %{"a" => "b"}
+
describe "handle/3" do
- test "with write true policy, user is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with write true policy, user is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, policies: %Policies{broadcast: %BroadcastPolicies{write: true}})
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0
end
@@ -50,40 +54,37 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
refute_received _any
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert avg == 0.0
end
@tag policies: [:authenticated_read_broadcast, :authenticated_write_broadcast]
- test "with nil policy but valid user, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with nil policy but valid user, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
@tag policies: [:authenticated_read_matching_user_sub, :authenticated_write_matching_user_sub], sub: UUID.generate()
- test "with valid sub, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn, sub: sub} do
+ test "with valid sub, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, sub: sub, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: nil, read: true}},
@@ -92,17 +93,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -120,13 +118,12 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
- refute_received {:socket_push, :text, _}
+ refute_receive {:socket_push, :text, _}, 120
end
@tag policies: [:read_matching_user_role, :write_matching_user_role], role: "anon"
- test "with valid role, is able to send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "with valid role, is able to send message",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: nil, read: true}},
@@ -135,17 +132,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -163,9 +157,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
- refute_received {:socket_push, :text, _}
+ refute_receive {:socket_push, :text, _}, 120
end
test "with nil policy and invalid user, won't send message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
@@ -177,16 +169,15 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(120)
-
refute_received _any
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert avg == 0.0
end
@tag policies: [:authenticated_read_broadcast, :authenticated_write_broadcast]
- test "validation only runs once on nil and valid policies", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "validation only runs once on nil and valid policies",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic)
expect(Authorization, :get_write_authorizations, 1, fn conn, db_conn, auth_context ->
@@ -197,15 +188,14 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
@@ -222,12 +212,10 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
socket
end
- Process.sleep(100)
-
- refute_received _
+ refute_receive _, 100
end
- test "no ack still sends message", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "no ack still sends message", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket =
socket_fixture(tenant, topic,
policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
@@ -236,7 +224,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100, reduce: socket do
socket ->
- {:noreply, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:noreply, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
@@ -245,56 +233,128 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
end
- test "public channels are able to send messages", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "public channels are able to send messages",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, private?: false, policies: nil)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
- Process.sleep(120)
-
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_received {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
- test "public channels are able to send messages and ack", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ test "public channels are able to send messages and ack",
+ %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
socket = socket_fixture(tenant, topic, private?: false, policies: nil)
for _ <- 1..100, reduce: socket do
socket ->
- {:reply, :ok, socket} = BroadcastHandler.handle(%{"a" => "b"}, db_conn, socket)
+ {:reply, :ok, socket} = BroadcastHandler.handle(@payload, db_conn, socket)
socket
end
for _ <- 1..100 do
topic = "realtime:#{topic}"
assert_receive {:socket_push, :text, data}
- message = data |> IO.iodata_to_binary() |> Jason.decode!()
- assert message == %{"event" => "broadcast", "payload" => %{"a" => "b"}, "ref" => nil, "topic" => topic}
+ assert Jason.decode!(data) == message(serializer, topic, @payload)
end
- Process.sleep(120)
- {:ok, %{avg: avg, bucket: buckets}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, %{avg: avg, bucket: buckets}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert Enum.sum(buckets) == 100
assert avg > 0.0
end
+ test "V2 json UserBroadcastPush", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
+ socket = socket_fixture(tenant, topic, private?: false, policies: nil)
+
+ user_broadcast_payload = %{"a" => "b"}
+ json_encoded_user_broadcast_payload = Jason.encode!(user_broadcast_payload)
+
+ {:reply, :ok, _socket} =
+ BroadcastHandler.handle({"event123", :json, json_encoded_user_broadcast_payload, %{}}, db_conn, socket)
+
+ topic = "realtime:#{topic}"
+ assert_receive {:socket_push, code, data}
+
+ if serializer == RealtimeWeb.Socket.V2Serializer do
+ assert code == :binary
+
+ assert data ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ byte_size(topic),
+ # user_event_size
+ byte_size("event123"),
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ topic::binary,
+ "event123"
+ >> <> json_encoded_user_broadcast_payload
+ else
+ assert code == :text
+
+ assert Jason.decode!(data) ==
+ message(serializer, topic, %{
+ "event" => "event123",
+ "payload" => user_broadcast_payload,
+ "type" => "broadcast"
+ })
+ end
+ end
+
+ test "V2 binary UserBroadcastPush", %{topic: topic, tenant: tenant, db_conn: db_conn, serializer: serializer} do
+ socket = socket_fixture(tenant, topic, private?: false, policies: nil)
+
+ user_broadcast_payload = <<123, 456, 789>>
+
+ {:reply, :ok, _socket} =
+ BroadcastHandler.handle({"event123", :binary, user_broadcast_payload, %{}}, db_conn, socket)
+
+ topic = "realtime:#{topic}"
+
+ if serializer == RealtimeWeb.Socket.V2Serializer do
+ assert_receive {:socket_push, :binary, data}
+
+ assert data ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ byte_size(topic),
+ # user_event_size
+ byte_size("event123"),
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ topic::binary,
+ "event123"
+ >> <> user_broadcast_payload
+ else
+ # Can't receive binary payloads on V1 serializer
+ refute_receive {:socket_push, _code, _data}
+ end
+ end
+
@tag policies: [:broken_write_presence]
test "handle failing rls policy", %{topic: topic, tenant: tenant, db_conn: db_conn} do
socket = socket_fixture(tenant, topic)
@@ -303,14 +363,81 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
capture_log(fn ->
{:noreply, _socket} = BroadcastHandler.handle(%{}, db_conn, socket)
- # Enough for the RateCounter to calculate the last bucket
- refute_received _, 1200
+ {:ok, %{avg: avg}} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
+ assert avg == 0.0
+
+ refute_receive _, 200
end)
assert log =~ "RlsPolicyError"
+ end
- {:ok, %{avg: avg}} = RateCounter.get(Tenants.events_per_second_rate(tenant))
- assert avg == 0.0
+ test "handle payload size excedding limits in private channels", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ socket =
+ socket_fixture(tenant, topic,
+ policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
+ ack_broadcast: false
+ )
+
+ assert {:noreply, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in public channels", %{topic: topic, tenant: tenant, db_conn: db_conn} do
+ socket = socket_fixture(tenant, topic, ack_broadcast: false, private?: false)
+
+ assert {:noreply, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in private channel and if ack it will receive error", %{
+ topic: topic,
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ socket =
+ socket_fixture(tenant, topic,
+ policies: %Policies{broadcast: %BroadcastPolicies{write: true}},
+ ack_broadcast: true
+ )
+
+ assert {:reply, {:error, :payload_size_exceeded}, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
+ end
+
+ test "handle payload size excedding limits in public channels and if ack it will receive error", %{
+ topic: topic,
+ tenant: tenant,
+ db_conn: db_conn
+ } do
+ socket = socket_fixture(tenant, topic, ack_broadcast: true, private?: false)
+
+ assert {:reply, {:error, :payload_size_exceeded}, _} =
+ BroadcastHandler.handle(
+ %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 1)},
+ db_conn,
+ socket
+ )
+
+ refute_receive {:socket_push, :text, _}, 120
end
end
@@ -318,7 +445,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
rate = Tenants.events_per_second_rate(tenant)
RateCounter.new(rate, tick: 100)
@@ -331,7 +458,7 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
fastlane =
RealtimeWeb.RealtimeChannel.MessageDispatcher.fastlane_metadata(
self(),
- Phoenix.Socket.V1.JSONSerializer,
+ context.serializer,
"realtime:#{topic}",
:warning,
"tenant_id"
@@ -389,4 +516,10 @@ defmodule RealtimeWeb.RealtimeChannel.BroadcastHandlerTest do
}
}
end
+
+ defp message(RealtimeWeb.Socket.V2Serializer, topic, payload), do: [nil, nil, topic, "broadcast", payload]
+
+ defp message(Phoenix.Socket.V1.JSONSerializer, topic, payload) do
+ %{"event" => "broadcast", "payload" => payload, "ref" => nil, "topic" => topic}
+ end
end
diff --git a/test/realtime_web/channels/realtime_channel/logging_test.exs b/test/realtime_web/channels/realtime_channel/logging_test.exs
index 92634daef..cd131d16e 100644
--- a/test/realtime_web/channels/realtime_channel/logging_test.exs
+++ b/test/realtime_web/channels/realtime_channel/logging_test.exs
@@ -37,6 +37,7 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
assert log =~ "sub=#{sub}"
assert log =~ "exp=#{exp}"
assert log =~ "iss=#{iss}"
+ assert log =~ "error_code=TestError"
end
end
@@ -57,6 +58,7 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
assert log =~ "sub=#{sub}"
assert log =~ "exp=#{exp}"
assert log =~ "iss=#{iss}"
+ assert log =~ "error_code=TestWarning"
end
end
@@ -67,10 +69,14 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
for log_level <- log_levels do
socket = %{assigns: %{log_level: log_level, tenant: random_string(), access_token: "test_token"}}
- assert capture_log(fn ->
- assert Logging.maybe_log_error(socket, "TestCode", "test message") ==
- {:error, %{reason: "TestCode: test message"}}
- end) =~ "TestCode: test message"
+ log =
+ capture_log(fn ->
+ assert Logging.maybe_log_error(socket, "TestCode", "test message") ==
+ {:error, %{reason: "TestCode: test message"}}
+ end)
+
+ assert log =~ "TestCode: test message"
+ assert log =~ "error_code=TestCode"
assert capture_log(fn ->
assert Logging.maybe_log_error(socket, "TestCode", %{a: "b"}) ==
@@ -103,11 +109,14 @@ defmodule RealtimeWeb.RealtimeChannel.LoggingTest do
for log_level <- log_levels do
socket = %{assigns: %{log_level: log_level, tenant: random_string(), access_token: "test_token"}}
- assert capture_log(fn ->
- assert Logging.maybe_log_warning(socket, "TestCode", "test message") ==
- {:error, %{reason: "TestCode: test message"}}
- end) =~
- "TestCode: test message"
+ log =
+ capture_log(fn ->
+ assert Logging.maybe_log_warning(socket, "TestCode", "test message") ==
+ {:error, %{reason: "TestCode: test message"}}
+ end)
+
+ assert log =~ "TestCode: test message"
+ assert log =~ "error_code=TestCode"
assert capture_log(fn ->
assert Logging.maybe_log_warning(socket, "TestCode", %{a: "b"}) ==
diff --git a/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs b/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
index 7a9e2eb25..834cf7ad8 100644
--- a/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
+++ b/test/realtime_web/channels/realtime_channel/message_dispatcher_test.exs
@@ -4,7 +4,10 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
import ExUnit.CaptureLog
alias Phoenix.Socket.Broadcast
+ alias Phoenix.Socket.V1
alias RealtimeWeb.RealtimeChannel.MessageDispatcher
+ alias RealtimeWeb.Socket.UserBroadcast
+ alias RealtimeWeb.Socket.V2Serializer
defmodule TestSerializer do
def fastlane!(msg) do
@@ -16,18 +19,35 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
describe "fastlane_metadata/5" do
test "info level" do
assert MessageDispatcher.fastlane_metadata(self(), Serializer, "realtime:topic", :info, "tenant_id") ==
- {:realtime_channel_fastlane, self(), Serializer, "realtime:topic", {:log, "tenant_id"}}
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :info, "tenant_id", MapSet.new()}
end
test "non-info level" do
assert MessageDispatcher.fastlane_metadata(self(), Serializer, "realtime:topic", :warning, "tenant_id") ==
- {:realtime_channel_fastlane, self(), Serializer, "realtime:topic"}
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :warning, "tenant_id", MapSet.new()}
+ end
+
+ test "replayed message ids" do
+ assert MessageDispatcher.fastlane_metadata(
+ self(),
+ Serializer,
+ "realtime:topic",
+ :warning,
+ "tenant_id",
+ MapSet.new([1])
+ ) ==
+ {:rc_fastlane, self(), Serializer, "realtime:topic", :warning, "tenant_id", MapSet.new([1])}
end
end
describe "dispatch/3" do
setup do
- {:ok, _pid} = Agent.start_link(fn -> 0 end, name: TestSerializer)
+ {:ok, _pid} =
+ start_supervised(%{
+ id: TestSerializer,
+ start: {Agent, :start_link, [fn -> 0 end, [name: TestSerializer]]}
+ })
+
:ok
end
@@ -50,12 +70,11 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
from_pid = :erlang.list_to_pid(~c'<0.2.1>')
subscribers = [
- {subscriber_pid, {:realtime_channel_fastlane, self(), TestSerializer, "realtime:topic", {:log, "tenant123"}}},
- {subscriber_pid, {:realtime_channel_fastlane, self(), TestSerializer, "realtime:topic"}}
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", MapSet.new()}}
]
msg = %Broadcast{topic: "some:other:topic", event: "event", payload: %{data: "test"}}
- require Logger
log =
capture_log(fn ->
@@ -75,6 +94,130 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
refute_receive _any
end
+ test "dispatches 'presence_diff' messages to fastlane subscribers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant456", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant456", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "presence_diff", payload: %{data: "test"}}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ assert_receive {:encoded, %Broadcast{event: "presence_diff", payload: %{data: "test"}, topic: "realtime:topic"}}
+ assert_receive {:encoded, %Broadcast{event: "presence_diff", payload: %{data: "test"}, topic: "realtime:topic"}}
+
+ assert Agent.get(TestSerializer, & &1) == 1
+
+ assert Realtime.GenCounter.get(Realtime.Tenants.presence_events_per_second_key("tenant456")) == 2
+
+ refute_receive _any
+ end
+
+ test "does not dispatch messages to fastlane subscribers if they already replayed it" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+ replaeyd_message_ids = MapSet.new(["123"])
+
+ subscribers = [
+ {subscriber_pid,
+ {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", replaeyd_message_ids}},
+ {subscriber_pid,
+ {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", replaeyd_message_ids}}
+ ]
+
+ msg = %Broadcast{
+ topic: "some:other:topic",
+ event: "event",
+ payload: %{"data" => "test", "meta" => %{"id" => "123"}}
+ }
+
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+
+ assert Agent.get(TestSerializer, & &1) == 0
+
+ refute_receive _any
+ end
+
+ test "payload is not a map" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), TestSerializer, "realtime:topic", :warning, "tenant123", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "event", payload: "not a map"}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ assert_receive {:encoded, %Broadcast{event: "event", payload: "not a map", topic: "realtime:topic"}}
+ assert_receive {:encoded, %Broadcast{event: "event", payload: "not a map", topic: "realtime:topic"}}
+
+ assert Agent.get(TestSerializer, & &1) == 1
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
test "dispatches messages to non fastlane subscribers" do
from_pid = :erlang.list_to_pid(~c'<0.2.1>')
@@ -93,5 +236,236 @@ defmodule RealtimeWeb.RealtimeChannel.MessageDispatcherTest do
# TestSerializer is not called
assert Agent.get(TestSerializer, & &1) == 0
end
+
+ test "dispatches Broadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ msg = %Broadcast{topic: "some:other:topic", event: "event", payload: %{data: "test"}}
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ # Receive 2 messages using V1
+ assert_receive {:socket_push, :text, message_v1}
+ assert_receive {:socket_push, :text, ^message_v1}
+
+ assert Jason.decode!(message_v1) == %{
+ "event" => "event",
+ "payload" => %{"data" => "test"},
+ "ref" => nil,
+ "topic" => "realtime:topic"
+ }
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :text, message_v2}
+ assert_receive {:socket_push, :text, ^message_v2}
+
+ # V2 is an array format
+ assert Jason.decode!(message_v2) == [nil, nil, "realtime:topic", "event", %{"data" => "test"}]
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
+ test "dispatches json UserBroadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ user_payload = Jason.encode!(%{data: "test"})
+
+ msg = %UserBroadcast{
+ topic: "some:other:topic",
+ user_event: "event123",
+ user_payload: user_payload,
+ user_payload_encoding: :json,
+ metadata: %{"id" => "123", "replayed" => true}
+ }
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+
+ # Receive 2 messages using V1
+ assert_receive {:socket_push, :text, message_v1}
+ assert_receive {:socket_push, :text, ^message_v1}
+
+ assert Jason.decode!(message_v1) == %{
+ "event" => "broadcast",
+ "payload" => %{
+ "event" => "event123",
+ "meta" => %{"id" => "123", "replayed" => true},
+ "payload" => %{"data" => "test"},
+ "type" => "broadcast"
+ },
+ "ref" => nil,
+ "topic" => "realtime:topic"
+ }
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :binary, message_v2}
+ assert_receive {:socket_push, :binary, ^message_v2}
+
+ encoded_metadata = Jason.encode!(%{"id" => "123", "replayed" => true})
+ metadata_size = byte_size(encoded_metadata)
+
+ # binary payload structure
+ assert message_v2 ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ 14,
+ # user_event_size
+ 8,
+ # metadata_size
+ metadata_size,
+ # json encoding
+ 1::size(8),
+ "realtime:topic",
+ "event123"
+ >> <> encoded_metadata <> user_payload
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
+
+ test "dispatches binary UserBroadcast to V1 & V2 Serializers" do
+ parent = self()
+
+ subscriber_pid =
+ spawn(fn ->
+ loop = fn loop ->
+ receive do
+ msg ->
+ send(parent, {:subscriber, msg})
+ loop.(loop)
+ end
+ end
+
+ loop.(loop)
+ end)
+
+ from_pid = :erlang.list_to_pid(~c'<0.2.1>')
+
+ subscribers = [
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V1.JSONSerializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}},
+ {subscriber_pid, {:rc_fastlane, self(), V2Serializer, "realtime:topic", :info, "tenant123", MapSet.new()}}
+ ]
+
+ user_payload = <<123, 456, 789>>
+
+ msg = %UserBroadcast{
+ topic: "some:other:topic",
+ user_event: "event123",
+ user_payload: user_payload,
+ user_payload_encoding: :binary,
+ metadata: %{"id" => "123", "replayed" => true}
+ }
+
+ log =
+ capture_log(fn ->
+ assert MessageDispatcher.dispatch(subscribers, from_pid, msg) == :ok
+ end)
+
+ assert log =~ "Received message on realtime:topic with payload: #{inspect(msg, pretty: true)}"
+ assert log =~ "User payload encoding is not JSON"
+
+ # Only prints once
+ assert String.split(log, "User payload encoding is not JSON") |> length() == 2
+
+ # No V1 message received as binary payloads are not supported
+ refute_receive {:socket_push, :text, _message_v1}
+
+ # Receive 2 messages using V2
+ assert_receive {:socket_push, :binary, message_v2}
+ assert_receive {:socket_push, :binary, ^message_v2}
+
+ encoded_metadata = Jason.encode!(%{"id" => "123", "replayed" => true})
+ metadata_size = byte_size(encoded_metadata)
+
+ # binary payload structure
+ assert message_v2 ==
+ <<
+ # user broadcast = 4
+ 4::size(8),
+ # topic_size
+ 14,
+ # user_event_size
+ 8,
+ # metadata_size
+ metadata_size,
+ # binary encoding
+ 0::size(8),
+ "realtime:topic",
+ "event123"
+ >> <> encoded_metadata <> user_payload
+
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+ assert_receive {:subscriber, :update_rate_counter}
+
+ refute_receive _any
+ end
end
end
diff --git a/test/realtime_web/channels/realtime_channel/presence_handler_test.exs b/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
index e5ecd32ad..3be1ae582 100644
--- a/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
+++ b/test/realtime_web/channels/realtime_channel/presence_handler_test.exs
@@ -99,26 +99,42 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
end
end
- describe "handle/2" do
+ describe "handle/3" do
+ setup %{tenant: tenant} do
+ on_exit(fn -> :telemetry.detach(__MODULE__) end)
+
+ :telemetry.attach(
+ __MODULE__,
+ [:realtime, :tenants, :payload, :size],
+ &__MODULE__.handle_telemetry/4,
+ %{pid: self(), tenant: tenant}
+ )
+ end
+
test "with true policy and is private, user can track their presence and changes", %{
tenant: tenant,
topic: topic,
db_conn: db_conn
} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket =
socket_fixture(tenant, topic, key, policies: policies)
- PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"A" => "b", "c" => "b"}}, db_conn, socket)
topic = socket.assigns.tenant_topic
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 30},
+ %{tenant: ^external_id, message_type: :presence}}
end
test "when tracking already existing user, metadata updated", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies)
@@ -134,19 +150,87 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
- refute_receive :_
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 6},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 55},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ refute_receive _
+ end
+
+ test "tracking the same payload does nothing", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ external_id = tenant.external_id
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies)
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+
+ assert {:ok, _socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ refute_receive _
+ end
+
+ test "tracking, untracking and then tracking the same payload emit events", context do
+ %{tenant: tenant, topic: topic, db_conn: db_conn} = context
+ external_id = tenant.external_id
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies)
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+ assert socket.assigns.presence_track_payload == %{"a" => "b"}
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = joins
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
+ assert socket.assigns.presence_track_payload == nil
+
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: %{}, leaves: leaves}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = leaves
+
+ assert {:ok, socket} = PresenceHandler.handle(%{"event" => "track", "payload" => %{"a" => "b"}}, db_conn, socket)
+
+ assert socket.assigns.presence_track_payload == %{"a" => "b"}
+
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert %{^key => %{metas: [%{:phx_ref => _, "a" => "b"}]}} = joins
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 18},
+ %{tenant: ^external_id, message_type: :presence}}
+
+ refute_receive _
end
test "with false policy and is public, user can track their presence and changes", %{tenant: tenant, topic: topic} do
+ external_id = tenant.external_id
key = random_string()
policies = %Policies{presence: %PresencePolicies{read: false, write: false}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, nil, socket)
topic = socket.assigns.tenant_topic
assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
assert Map.has_key?(joins, key)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 6},
+ %{tenant: ^external_id, message_type: :presence}}
end
test "user can untrack when they want", %{tenant: tenant, topic: topic, db_conn: db_conn} do
@@ -174,7 +258,9 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
reject(&Authorization.get_write_authorizations/3)
key = random_string()
- socket = socket_fixture(tenant, topic, key)
+ # Use high client rate limit to test tenant-level rate limiting
+ client_rate_limit = %{max_calls: 1000, window_ms: 60_000, counter: 0, reset_at: nil}
+ socket = socket_fixture(tenant, topic, key, client_rate_limit: client_rate_limit)
topic = socket.assigns.tenant_topic
for _ <- 1..300, reduce: socket do
@@ -221,7 +307,12 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
key = random_string()
policies = %Policies{broadcast: %BroadcastPolicies{read: false}}
- socket = socket_fixture(tenant, topic, key, policies: policies, private?: false)
+ # Use high client rate limit to test tenant-level rate limiting
+ client_rate_limit = %{max_calls: 1000, window_ms: 60_000, counter: 0, reset_at: nil}
+
+ socket =
+ socket_fixture(tenant, topic, key, policies: policies, private?: false, client_rate_limit: client_rate_limit)
+
topic = socket.assigns.tenant_topic
for _ <- 1..300, reduce: socket do
@@ -229,6 +320,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert {:ok, socket} =
PresenceHandler.handle(
%{"event" => "track", "payload" => %{"metadata" => random_string()}},
+ nil,
socket
)
@@ -238,7 +330,13 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
end
test "logs out non recognized events" do
- socket = %Phoenix.Socket{joined: true}
+ tenant = tenant_fixture()
+
+ socket =
+ socket_fixture(tenant, "topic", "presence_key",
+ private?: false,
+ client_rate_limit: %{max_calls: 1000, window_ms: 60_000, counter: 0, reset_at: nil}
+ )
log =
capture_log(fn ->
@@ -248,7 +346,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
assert log =~ "UnknownPresenceEvent"
end
- test "socket with presence enabled false will ignore presence events in public channel", %{
+ test "socket with presence enabled false will ignore non-track presence events in public channel", %{
tenant: tenant,
topic: topic
} do
@@ -256,12 +354,12 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "untrack"}, nil, socket)
topic = socket.assigns.tenant_topic
refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
end
- test "socket with presence enabled false will ignore presence events in private channel", %{
+ test "socket with presence enabled false will ignore non-track presence events in private channel", %{
tenant: tenant,
topic: topic,
db_conn: db_conn
@@ -270,11 +368,80 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
- assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+ assert {:ok, _socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
topic = socket.assigns.tenant_topic
refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
end
+ test "socket with presence disabled will enable presence on track message for public channel", %{
+ tenant: tenant,
+ topic: topic
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, private?: false, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "track"}, nil, socket)
+
+ assert updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+ end
+
+ test "socket with presence disabled will enable presence on track message for private channel", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, private?: true, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
+
+ assert updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ assert_receive %Broadcast{topic: ^topic, event: "presence_diff", payload: %{joins: joins, leaves: %{}}}
+ assert Map.has_key?(joins, key)
+ end
+
+ test "socket with presence disabled will not enable presence on untrack message", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:ok, updated_socket} = PresenceHandler.handle(%{"event" => "untrack"}, db_conn, socket)
+
+ refute updated_socket.assigns.presence_enabled?
+ topic = socket.assigns.tenant_topic
+ refute_receive %Broadcast{topic: ^topic, event: "presence_diff"}
+ end
+
+ test "socket with presence disabled will not enable presence on unknown event", %{
+ tenant: tenant,
+ topic: topic,
+ db_conn: db_conn
+ } do
+ key = random_string()
+ policies = %Policies{presence: %PresencePolicies{read: true, write: true}}
+ socket = socket_fixture(tenant, topic, key, policies: policies, enabled?: false)
+
+ refute socket.assigns.presence_enabled?
+
+ assert {:error, :unknown_presence_event} = PresenceHandler.handle(%{"event" => "unknown"}, db_conn, socket)
+ end
+
@tag policies: [:authenticated_read_broadcast_and_presence, :authenticated_write_broadcast_and_presence]
test "rate limit is checked on private channel", %{tenant: tenant, topic: topic, db_conn: db_conn} do
key = random_string()
@@ -284,7 +451,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -299,13 +467,25 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
assert log =~ "PresenceRateLimitReached"
end
+
+ test "fails on high payload size", %{tenant: tenant, topic: topic, db_conn: db_conn} do
+ key = random_string()
+ socket = socket_fixture(tenant, topic, key, private?: false)
+ payload_size = tenant.max_payload_size_in_kb * 1000
+
+ payload = %{content: random_string(payload_size)}
+
+ assert {:error, :payload_size_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => payload}, db_conn, socket)
+ end
end
describe "sync/1" do
@@ -356,7 +536,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -372,7 +553,8 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
log =
capture_log(fn ->
for _ <- 1..300, do: PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
- Process.sleep(1100)
+
+ {:ok, _} = RateCounterHelper.tick!(Tenants.presence_events_per_second_rate(tenant))
assert {:error, :rate_limit_exceeded} = PresenceHandler.handle(%{"event" => "track"}, db_conn, socket)
end)
@@ -381,10 +563,186 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
end
end
+ describe "per-client rate limiting" do
+ test "allows calls under the limit", %{tenant: tenant, topic: topic} do
+ client_rate_limit = %{max_calls: 10, window_ms: 60_000, counter: 0, reset_at: nil}
+ socket = socket_fixture(tenant, topic, random_string(), private?: false, client_rate_limit: client_rate_limit)
+
+ # Make 9 calls (under limit of 10)
+ socket =
+ Enum.reduce(1..9, socket, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ assert %{counter: 9, max_calls: 10, window_ms: 60000, reset_at: _} = socket.assigns.presence_client_rate_limit
+
+ # 10th call should still work
+ assert {:ok, socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+
+ assert %{counter: 10, max_calls: 10, window_ms: 60000, reset_at: _} = socket.assigns.presence_client_rate_limit
+ end
+
+ test "blocks calls over the limit", %{tenant: tenant, topic: topic} do
+ client_rate_limit = %{max_calls: 10, window_ms: 60_000, counter: 0, reset_at: nil}
+ socket = socket_fixture(tenant, topic, random_string(), private?: false, client_rate_limit: client_rate_limit)
+
+ # Make 10 calls (at limit)
+ socket =
+ Enum.reduce(1..10, socket, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ # 11th call should fail
+ assert {:error, :client_rate_limit_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+
+ assert %{counter: 10, max_calls: 10, window_ms: 60000, reset_at: _} = socket.assigns.presence_client_rate_limit
+ end
+
+ test "rate limits work independently per socket", %{tenant: tenant, topic: topic} do
+ client_rate_limit = %{max_calls: 10, window_ms: 60_000, counter: 0, reset_at: nil}
+ socket1 = socket_fixture(tenant, topic, random_string(), private?: false, client_rate_limit: client_rate_limit)
+ socket2 = socket_fixture(tenant, topic, random_string(), private?: false, client_rate_limit: client_rate_limit)
+
+ socket1 =
+ Enum.reduce(1..10, socket1, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ assert {:error, :client_rate_limit_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket1)
+
+ # socket2 should still work (independent limit)
+ assert {:ok, _socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket2)
+ end
+
+ test "tenant override for max_client_presence_events_per_window is applied", %{tenant: tenant, topic: topic} do
+ {:ok, updated_tenant} =
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{max_client_presence_events_per_window: 3})
+
+ Realtime.Tenants.Cache.update_cache(updated_tenant)
+
+ socket = socket_fixture(updated_tenant, topic, random_string(), private?: false)
+
+ assert %{max_calls: 3} = socket.assigns.presence_client_rate_limit
+
+ socket =
+ Enum.reduce(1..3, socket, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ assert {:error, :client_rate_limit_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+ end
+
+ test "falls back to env config when tenant override is nil", %{tenant: tenant, topic: topic} do
+ assert is_nil(tenant.max_client_presence_events_per_window)
+ assert is_nil(tenant.client_presence_window_ms)
+
+ config = Application.get_env(:realtime, :client_presence_rate_limit)
+ expected_max_calls = config[:max_calls]
+ expected_window_ms = config[:window_ms]
+ socket = socket_fixture(tenant, topic, random_string(), private?: false)
+
+ assert %{max_calls: ^expected_max_calls, window_ms: ^expected_window_ms} =
+ socket.assigns.presence_client_rate_limit
+ end
+
+ test "tenant override for client_presence_window_ms is applied", %{tenant: tenant, topic: topic} do
+ {:ok, updated_tenant} =
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{client_presence_window_ms: 5_000})
+
+ Realtime.Tenants.Cache.update_cache(updated_tenant)
+
+ socket = socket_fixture(updated_tenant, topic, random_string(), private?: false)
+
+ assert %{window_ms: 5_000} = socket.assigns.presence_client_rate_limit
+ end
+
+ test "tenant override for client_presence_window_ms respects the window", %{tenant: tenant, topic: topic} do
+ {:ok, updated_tenant} =
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{
+ max_client_presence_events_per_window: 3,
+ client_presence_window_ms: 100
+ })
+
+ Realtime.Tenants.Cache.update_cache(updated_tenant)
+
+ socket = socket_fixture(updated_tenant, topic, random_string(), private?: false)
+
+ assert %{max_calls: 3, window_ms: 100} = socket.assigns.presence_client_rate_limit
+
+ socket =
+ Enum.reduce(1..3, socket, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ assert {:error, :client_rate_limit_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+
+ Process.sleep(101)
+
+ assert {:ok, _socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+ end
+
+ test "rate limit resets after window expires", %{tenant: tenant, topic: topic} do
+ # Create socket with a very short window (100ms)
+ socket = socket_fixture(tenant, topic, random_string(), private?: false)
+
+ # Override the window to be very short for testing
+ short_window_config = %{
+ max_calls: 3,
+ window_ms: 100,
+ counter: 0,
+ reset_at: nil
+ }
+
+ socket = %{socket | assigns: Map.put(socket.assigns, :presence_client_rate_limit, short_window_config)}
+
+ # Make 3 calls (at limit)
+ socket =
+ Enum.reduce(1..3, socket, fn _, acc_socket ->
+ {:ok, updated_socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, acc_socket)
+
+ updated_socket
+ end)
+
+ # 4th call should fail
+ assert {:error, :client_rate_limit_exceeded} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+
+ # Wait for window to expire
+ Process.sleep(101)
+
+ # Should be able to call again after window reset
+ assert {:ok, _socket} =
+ PresenceHandler.handle(%{"event" => "track", "payload" => %{"call" => random_string()}}, nil, socket)
+ end
+ end
+
defp initiate_tenant(context) do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
assert Connect.ready?(tenant.external_id)
@@ -427,6 +785,34 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
RateCounter.new(rate)
+ client_rate_limit_override = Keyword.get(opts, :client_rate_limit)
+
+ client_rate_limit =
+ if client_rate_limit_override do
+ client_rate_limit_override
+ else
+ config = Application.get_env(:realtime, :client_presence_rate_limit, max_calls: 10, window_ms: 60_000)
+
+ max_calls =
+ case tenant.max_client_presence_events_per_window do
+ value when is_integer(value) and value > 0 -> value
+ _ -> config[:max_calls]
+ end
+
+ window_ms =
+ case tenant.client_presence_window_ms do
+ value when is_integer(value) and value > 0 -> value
+ _ -> config[:window_ms]
+ end
+
+ %{
+ max_calls: max_calls,
+ window_ms: window_ms,
+ counter: 0,
+ reset_at: nil
+ }
+ end
+
%Phoenix.Socket{
joined: true,
topic: "realtime:#{topic}",
@@ -438,6 +824,7 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
policies: policies,
authorization_context: authorization_context,
presence_rate_counter: rate,
+ presence_client_rate_limit: client_rate_limit,
private?: private?,
presence_key: presence_key,
presence_enabled?: enabled?,
@@ -447,4 +834,10 @@ defmodule RealtimeWeb.RealtimeChannel.PresenceHandlerTest do
}
}
end
+
+ def handle_telemetry(event, measures, metadata, %{pid: pid, tenant: tenant}) do
+ if metadata[:tenant] == tenant.external_id do
+ send(pid, {:telemetry, event, measures, metadata})
+ end
+ end
end
diff --git a/test/realtime_web/channels/realtime_channel/tracker_test.exs b/test/realtime_web/channels/realtime_channel/tracker_test.exs
index 2590b9597..7137256c1 100644
--- a/test/realtime_web/channels/realtime_channel/tracker_test.exs
+++ b/test/realtime_web/channels/realtime_channel/tracker_test.exs
@@ -1,5 +1,7 @@
defmodule RealtimeWeb.RealtimeChannel.TrackerTest do
- use Realtime.DataCase
+ # It kills websockets when no channels are open
+ # It can affect other tests
+ use Realtime.DataCase, async: false
alias RealtimeWeb.RealtimeChannel.Tracker
setup do
diff --git a/test/realtime_web/channels/realtime_channel_test.exs b/test/realtime_web/channels/realtime_channel_test.exs
index 2dff83da3..631b85e36 100644
--- a/test/realtime_web/channels/realtime_channel_test.exs
+++ b/test/realtime_web/channels/realtime_channel_test.exs
@@ -1,61 +1,663 @@
defmodule RealtimeWeb.RealtimeChannelTest do
- # Can't run async true because under the hood Cachex is used and it doesn't see Ecto Sandbox
- use RealtimeWeb.ChannelCase, async: false
+ use RealtimeWeb.ChannelCase, async: true
use Mimic
import ExUnit.CaptureLog
- alias Phoenix.Socket
alias Phoenix.Channel.Server
+ alias Phoenix.Socket
alias Realtime.Tenants.Authorization
alias Realtime.Tenants.Connect
alias Realtime.RateCounter
alias RealtimeWeb.UserSocket
- @default_limits %{
- max_concurrent_users: 200,
- max_events_per_second: 100,
- max_joins_per_second: 100,
- max_channels_per_client: 100,
- max_bytes_per_second: 100_000
- }
-
setup do
tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, db_conn} = Realtime.Database.connect(tenant, "realtime_test", :stop)
+ Integrations.setup_postgres_changes(db_conn)
+ GenServer.stop(db_conn)
+ Realtime.Tenants.Cache.update_cache(tenant)
{:ok, tenant: tenant}
end
setup :rls_context
+ describe "process flags" do
+ test "max heap size is set for both transport and channel processes", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ assert Process.info(socket.transport_pid, :max_heap_size) ==
+ {:max_heap_size, %{error_logger: true, include_shared_binaries: false, kill: true, size: 6_250_000}}
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{})
+
+ assert Process.info(socket.channel_pid, :max_heap_size) ==
+ {:max_heap_size, %{error_logger: true, include_shared_binaries: false, kill: true, size: 6_250_000}}
+ end
+
+ # We don't test the socket because on unit tests Phoenix is not setting the fullsweep_after config
+ test "fullsweep_after is set on channel process", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{})
+
+ assert Process.info(socket.channel_pid, :fullsweep_after) == {:fullsweep_after, 20}
+ end
+ end
+
+ describe "postgres changes" do
+ test "subscribes to inserts", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "INSERT", "schema" => "public", "table" => "test"}]
+ }
+
+ assert {:ok, reply, _socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{:id => sub_id, "event" => "INSERT", "schema" => "public", "table" => "test"}]} =
+ reply
+
+ assert_push "system",
+ %{message: "Subscribed to PostgreSQL", status: "ok", extension: "postgres_changes", channel: "test"},
+ 5000
+
+ {:ok, conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+
+ assert_push "postgres_changes", %{data: data, ids: [^sub_id]}, 500
+
+ # we encode and decode because the data is a Jason.Fragment
+ assert %{
+ "table" => "test",
+ "type" => "INSERT",
+ "record" => %{"details" => "test", "id" => ^id, "binary_data" => nil},
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "errors" => nil,
+ "schema" => "public",
+ "commit_timestamp" => _
+ } = Jason.encode!(data) |> Jason.decode!()
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
+ end
+
+ test "multiple subscriptions", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [
+ %{"event" => "INSERT", "schema" => "public", "table" => "test"},
+ %{"event" => "DELETE", "schema" => "public", "table" => "test"}
+ ]
+ }
+
+ assert {:ok, reply, _socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{
+ postgres_changes: [
+ %{:id => insert_sub_id, "event" => "INSERT", "schema" => "public", "table" => "test"},
+ %{
+ :id => delete_sub_id,
+ "event" => "DELETE",
+ "schema" => "public",
+ "table" => "test"
+ }
+ ]
+ } =
+ reply
+
+ assert_push "system",
+ %{message: "Subscribed to PostgreSQL", status: "ok", extension: "postgres_changes", channel: "test"},
+ 5000
+
+ {:ok, conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ # Insert, update and delete but update should not be received
+ %{rows: [[id]]} = Postgrex.query!(conn, "insert into test (details) values ('test') returning id", [])
+ Postgrex.query!(conn, "update test set details = 'test' where id = $1", [id])
+ Postgrex.query!(conn, "delete from test where id = $1", [id])
+
+ assert_push "postgres_changes", %{data: data, ids: [^insert_sub_id]}, 500
+
+ # we encode and decode because the data is a Jason.Fragment
+ assert %{
+ "table" => "test",
+ "type" => "INSERT",
+ "record" => %{"details" => "test", "id" => ^id},
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "errors" => nil,
+ "schema" => "public",
+ "commit_timestamp" => _
+ } = Jason.encode!(data) |> Jason.decode!()
+
+ assert_push "postgres_changes", %{data: data, ids: [^delete_sub_id]}, 500
+
+ # we encode and decode because the data is a Jason.Fragment
+ assert %{
+ "table" => "test",
+ "type" => "DELETE",
+ "old_record" => %{"id" => ^id},
+ "columns" => [
+ %{"name" => "id", "type" => "int4"},
+ %{"name" => "details", "type" => "text"},
+ %{"name" => "binary_data", "type" => "bytea"}
+ ],
+ "errors" => nil,
+ "schema" => "public",
+ "commit_timestamp" => _
+ } = Jason.encode!(data) |> Jason.decode!()
+
+ refute_receive _any
+ end
+
+ test "malformed subscription params", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "test", "filter" => "wrong"}]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message: "Error parsing `filter` params: [\"wrong\"]",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 3000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "invalid subscription table does not exist", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "doesnotexist"}]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "doesnotexist"}]} = reply
+
+ assert_push "system",
+ %{
+ message:
+ "Unable to subscribe to changes with given parameters. Please check Realtime is enabled for the given connect parameters: [event: *, schema: public, table: doesnotexist, filters: []]",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "invalid subscription column does not exist", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [
+ %{"event" => "*", "schema" => "public", "table" => "test", "filter" => "notacolumn=eq.123"}
+ ]
+ }
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message:
+ "Unable to subscribe to changes with given parameters. An exception happened so please check your connect parameters: [event: *, schema: public, table: test, filters: [{\"notacolumn\", \"eq\", \"123\"}]]. Exception: ERROR P0001 (raise_exception) invalid column for filter notacolumn",
+ status: "error",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ socket = Server.socket(socket.channel_pid)
+
+ # It won't re-subscribe
+ assert socket.assigns.pg_sub_ref == nil
+ end
+
+ test "connection error", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "postgres_changes" => [%{"event" => "*", "schema" => "public", "table" => "test"}]
+ }
+
+ conn = spawn(fn -> :ok end)
+ # Let's set the subscription manager conn to be a pid that is no more
+
+ assert {:ok, reply, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert %{postgres_changes: [%{"event" => "*", "schema" => "public", "table" => "test"}]} = reply
+
+ assert_push "system",
+ %{
+ message: "Subscribed to PostgreSQL",
+ status: "ok",
+ extension: "postgres_changes",
+ channel: "test"
+ },
+ 5000
+
+ {:ok, manager_pid, _conn} = Extensions.PostgresCdcRls.get_manager_conn(tenant.external_id)
+ Extensions.PostgresCdcRls.update_meta(tenant.external_id, manager_pid, conn)
+
+ assert {:ok, _reply, socket} = subscribe_and_join(socket, "realtime:test_fail", %{"config" => config})
+
+ assert_push "system",
+ %{message: message, status: "error", extension: "postgres_changes", channel: "test_fail"},
+ 5000
+
+ assert message =~ "{:error, \"Too many database timeouts\"}"
+ socket = Server.socket(socket.channel_pid)
+
+ # It will try again in the future
+ assert socket.assigns.pg_sub_ref != nil
+ end
+ end
+
+ describe "broadcast" do
+ @describetag policies: [:authenticated_all_topic_read]
+
+ test "broadcast map payload", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"self" => true}
+ }
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ push(socket, "broadcast", %{"event" => "my_event", "payload" => %{"hello" => "world"}})
+
+ assert_receive %Phoenix.Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: %{"event" => "my_event", "payload" => %{"hello" => "world"}}
+ }
+ end
+
+ test "broadcast non-map payload", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"self" => true}
+ }
+
+ assert {:ok, _, socket} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ push(socket, "broadcast", "not a map")
+
+ assert_receive %Phoenix.Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: "not a map"
+ }
+ end
+
+ test "wrong replay params", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => "not a number", "since" => :erlang.system_time(:millisecond) - 5 * 60000}
+ }
+ }
+
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => 1, "since" => "not a number"}
+ }
+ }
+
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{}
+ }
+ }
+
+ assert {:error, %{reason: "UnableToReplayMessages: Replay params are not valid"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+ end
+
+ test "failure to replay", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{
+ "private" => true,
+ "broadcast" => %{
+ "replay" => %{"limit" => 12, "since" => :erlang.system_time(:millisecond) - 5 * 60000}
+ }
+ }
+
+ Authorization
+ |> expect(:get_read_authorizations, fn _, _, _, _ ->
+ {:ok,
+ %Authorization.Policies{
+ broadcast: %Authorization.Policies.BroadcastPolicies{read: true, write: nil}
+ }}
+ end)
+
+ # Broken database connection
+ conn = spawn(fn -> :ok end)
+ Connect.lookup_or_start_connection(tenant.external_id)
+ {:ok, _} = :syn.update_registry(Connect, tenant.external_id, fn _pid, meta -> %{meta | conn: conn} end)
+
+ assert {:error, %{reason: "UnableToReplayMessages: Realtime was unable to replay messages"}} =
+ subscribe_and_join(socket, "realtime:test", %{"config" => config})
+ end
+
+ test "replay messages on public topic not allowed", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"replay" => %{"limit" => 2, "since" => :erlang.system_time(:millisecond) - 5 * 60000}}
+ }
+
+ assert {
+ :error,
+ %{reason: "UnableToReplayMessages: Replay is not allowed for public channels"}
+ } = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
+ end
+
+ @tag policies: [:authenticated_all_topic_read]
+ test "replay messages on private topic", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ # Old message
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :day),
+ "event" => "old",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "old"}
+ })
+
+ %{id: message1_id} =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-1, :minute),
+ "event" => "first",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "first"}
+ })
+
+ %{id: message2_id} =
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-2, :minute),
+ "event" => "second",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "second"}
+ })
+
+ # This one should not be received because of the limit
+ message_fixture(tenant, %{
+ "private" => true,
+ "inserted_at" => NaiveDateTime.utc_now() |> NaiveDateTime.add(-3, :minute),
+ "event" => "third",
+ "extension" => "broadcast",
+ "topic" => "test",
+ "payload" => %{"value" => "third"}
+ })
+
+ config = %{
+ "private" => true,
+ "presence" => %{"enabled" => false},
+ "broadcast" => %{"replay" => %{"limit" => 2, "since" => :erlang.system_time(:millisecond) - 5 * 60000}}
+ }
+
+ assert {:ok, _, %Socket{}} = subscribe_and_join(socket, "realtime:test", %{"config" => config})
+
+ assert_receive %Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: %{
+ "event" => "first",
+ "meta" => %{"id" => ^message1_id, "replayed" => true},
+ "payload" => %{"value" => "first"},
+ "type" => "broadcast"
+ }
+ }
+
+ assert_receive %Socket.Message{
+ topic: "realtime:test",
+ event: "broadcast",
+ payload: %{
+ "event" => "second",
+ "meta" => %{"id" => ^message2_id, "replayed" => true},
+ "payload" => %{"value" => "second"},
+ "type" => "broadcast"
+ }
+ }
+
+ refute_receive %Socket.Message{}
+ end
+ end
+
describe "presence" do
- test "events are counted", %{tenant: tenant} do
+ test "presence state event is counted", %{tenant: tenant} do
jwt = Generators.generate_jwt_token(tenant)
{:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
assert {:ok, _, %Socket{} = socket} = subscribe_and_join(socket, "realtime:test", %{})
- presence_diff = %Socket.Broadcast{event: "presence_diff", payload: %{joins: %{}, leaves: %{}}}
- send(socket.channel_pid, presence_diff)
+ assert_receive %Socket.Message{topic: "realtime:test", event: "presence_state", payload: %{}}
+
+ tenant_id = tenant.external_id
+
+ assert {:ok, %RateCounter{id: {:channel, :presence_events, ^tenant_id}, bucket: bucket}} =
+ RateCounterHelper.tick!(socket.assigns.presence_rate_counter)
+
+ # presence_state
+ assert Enum.sum(bucket) == 1
+ end
+
+ test "client rate limit blocks calls over the limit and shuts down channel", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ config = %{"config" => %{"presence" => %{"key" => "user_id"}}}
+ assert {:ok, _, %Socket{channel_pid: channel_pid} = socket} = subscribe_and_join(socket, "realtime:test", config)
assert_receive %Socket.Message{topic: "realtime:test", event: "presence_state", payload: %{}}
+ # Make 5 presence calls (at the default limit)
+ for i <- 1..5 do
+ ref = push(socket, "presence", %{"type" => "presence", "event" => "TRACK", "payload" => %{"call" => i}})
+ assert_receive %Socket.Reply{ref: ^ref, status: :ok}, 500
+ end
+
+ assert capture_log(fn ->
+ # 6th call should cause channel shutdown
+ push(socket, "presence", %{"type" => "presence", "event" => "TRACK", "payload" => %{"call" => 6}})
+
+ assert_receive %Socket.Message{
+ topic: "realtime:test",
+ event: "system",
+ payload: %{
+ message: "Client presence rate limit exceeded",
+ status: "error",
+ extension: "system",
+ channel: "test"
+ }
+ },
+ 500
+ end) =~ "ClientPresenceRateLimitReached"
+
+ assert_process_down(channel_pid)
+ end
+
+ test "client rate limits are independent per connection", %{tenant: tenant} do
+ jwt1 = Generators.generate_jwt_token(tenant)
+ jwt2 = Generators.generate_jwt_token(tenant)
+
+ {:ok, %Socket{} = socket1} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt1))
+ {:ok, %Socket{} = socket2} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt2))
+
+ config = %{"config" => %{"presence" => %{"key" => "user_id"}}}
+
+ assert {:ok, _, %Socket{channel_pid: channel_pid1} = socket1} =
+ subscribe_and_join(socket1, "realtime:test1", config)
+
+ assert {:ok, _, %Socket{} = socket2} = subscribe_and_join(socket2, "realtime:test2", config)
+
+ assert_receive %Socket.Message{topic: "realtime:test1", event: "presence_state", payload: %{}}
+ assert_receive %Socket.Message{topic: "realtime:test2", event: "presence_state", payload: %{}}
+
+ # Exhaust rate limit for socket1
+ for i <- 1..5 do
+ ref = push(socket1, "presence", %{"type" => "presence", "event" => "TRACK", "payload" => %{"call" => i}})
+ assert_receive %Socket.Reply{ref: ^ref, status: :ok}, 500
+ end
+
+ # socket1's 6th call should cause shutdown
+ push(socket1, "presence", %{"type" => "presence", "event" => "TRACK", "payload" => %{"call" => 6}})
+
assert_receive %Socket.Message{
- topic: "realtime:test",
- event: "presence_diff",
- payload: %{joins: %{}, leaves: %{}}
+ topic: "realtime:test1",
+ event: "system",
+ payload: %{
+ message: "Client presence rate limit exceeded",
+ status: "error",
+ extension: "system",
+ channel: "test1"
+ }
+ },
+ 500
+
+ assert_process_down(channel_pid1)
+
+ # socket2 should still work (independent rate limit)
+ ref = push(socket2, "presence", %{"type" => "presence", "event" => "TRACK", "payload" => %{"call" => 1}})
+ assert_receive %Socket.Reply{ref: ^ref, status: :ok}, 500
+ end
+
+ test "presence track closes on high payload size", %{tenant: tenant} do
+ topic = "realtime:test"
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
+
+ assert {:ok, _, %Socket{} = socket} = subscribe_and_join(socket, topic, %{})
+
+ assert_receive %Phoenix.Socket.Message{topic: "realtime:test", event: "presence_state"}, 500
+
+ payload = %{
+ type: "presence",
+ event: "TRACK",
+ payload: %{name: "realtime_presence_96", t: 1814.7000000029802, content: String.duplicate("a", 3_500_000)}
}
- tenant_id = tenant.external_id
+ push(socket, "presence", payload)
+
+ assert_receive %Phoenix.Socket.Message{
+ event: "system",
+ payload: %{
+ extension: "system",
+ message: "Track message size exceeded",
+ status: "error"
+ },
+ topic: ^topic
+ },
+ 500
+ end
- # Wait for RateCounter to tick
- Process.sleep(1100)
+ test "presence track with same payload does nothing", %{tenant: tenant} do
+ topic = "realtime:test"
+ jwt = Generators.generate_jwt_token(tenant)
+ {:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
- assert {:ok, %RateCounter{id: {:channel, :presence_events, ^tenant_id}, bucket: bucket}} =
- RateCounter.get(socket.assigns.presence_rate_counter)
+ assert {:ok, _, %Socket{} = socket} =
+ subscribe_and_join(socket, topic, %{config: %{presence: %{enabled: true, key: "my_key"}}})
+
+ assert_receive %Phoenix.Socket.Message{topic: "realtime:test", event: "presence_state"}, 500
- # presence_state + presence_diff
- assert 2 in bucket
+ payload = %{type: "presence", event: "TRACK", payload: %{"hello" => "world"}}
+
+ push(socket, "presence", payload)
+
+ assert_receive %Socket.Reply{payload: %{}, topic: "realtime:test", status: :ok}, 500
+
+ assert_receive %Socket.Message{
+ payload: %{
+ joins: %{"my_key" => %{metas: [%{:phx_ref => _, "hello" => "world"}]}},
+ leaves: %{}
+ },
+ topic: "realtime:test",
+ event: "presence_diff"
+ },
+ 500
+
+ push(socket, "presence", payload)
+
+ assert_receive %Socket.Reply{payload: %{}, topic: "realtime:test", status: :ok}, 500
+ # no presence_diff this time
+
+ refute_receive %Socket.Message{}
+ refute_receive %Socket.Reply{}
end
end
@@ -78,7 +680,7 @@ defmodule RealtimeWeb.RealtimeChannelTest do
jwt = Generators.generate_jwt_token(tenant)
{:ok, %Socket{} = socket} = connect(UserSocket, %{}, conn_opts(tenant, jwt))
- expect(Authorization, :get_read_authorizations, fn _, _, _ ->
+ expect(Authorization, :get_read_authorizations, fn _, _, _, _ ->
{:error, "Realtime was unable to connect to the project database"}
end)
@@ -94,25 +696,38 @@ defmodule RealtimeWeb.RealtimeChannelTest do
jwt = Generators.generate_jwt_token(tenant)
{:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
- socket = Socket.assign(socket, %{limits: %{@default_limits | max_concurrent_users: 1}})
+ Realtime.Tenants.Cache.update_cache(%{tenant | max_concurrent_users: 1})
+
assert {:ok, _, %Socket{}} = subscribe_and_join(socket, "realtime:test", %{})
end
- test "reached", %{tenant: tenant} do
+ test "reached after connecting", %{tenant: tenant} do
jwt = Generators.generate_jwt_token(tenant)
{:ok, %Socket{} = socket} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
- socket_at_capacity =
- Socket.assign(socket, %{limits: %{@default_limits | max_concurrent_users: 0}})
+ Realtime.Tenants.Cache.update_cache(%{tenant | max_concurrent_users: 1})
- socket_over_capacity =
- Socket.assign(socket, %{limits: %{@default_limits | max_concurrent_users: -1}})
+ pid = spawn_link(fn -> Process.sleep(:infinity) end)
+ Realtime.UsersCounter.add(pid, tenant.external_id)
assert {:error, %{reason: "ConnectionRateLimitReached: Too many connected users"}} =
- subscribe_and_join(socket_at_capacity, "realtime:test", %{})
+ subscribe_and_join(socket, "realtime:test", %{})
+
+ pid = spawn_link(fn -> Process.sleep(:infinity) end)
+ Realtime.UsersCounter.add(pid, tenant.external_id)
assert {:error, %{reason: "ConnectionRateLimitReached: Too many connected users"}} =
- subscribe_and_join(socket_over_capacity, "realtime:test", %{})
+ subscribe_and_join(socket, "realtime:test", %{})
+ end
+
+ test "reached before connecting", %{tenant: tenant} do
+ jwt = Generators.generate_jwt_token(tenant)
+
+ Realtime.Tenants.Cache.update_cache(%{tenant | max_concurrent_users: 1})
+
+ Realtime.UsersCounter.add(self(), tenant.external_id)
+
+ {:error, :too_many_connections} = connect(UserSocket, %{"log_level" => "warning"}, conn_opts(tenant, jwt))
end
end
@@ -762,7 +1377,10 @@ defmodule RealtimeWeb.RealtimeChannelTest do
put_in(extension, ["settings", "db_port"], db_port)
]
- Realtime.Api.update_tenant(tenant, %{extensions: extensions})
+ with {:ok, tenant} <- Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{extensions: extensions}) do
+ Realtime.Tenants.Cache.update_cache(tenant)
+ {:ok, tenant}
+ end
end
defp assert_process_down(pid) do
diff --git a/test/realtime_web/channels/tenant_rate_limiters_test.exs b/test/realtime_web/channels/tenant_rate_limiters_test.exs
new file mode 100644
index 000000000..05d56ec82
--- /dev/null
+++ b/test/realtime_web/channels/tenant_rate_limiters_test.exs
@@ -0,0 +1,31 @@
+defmodule RealtimeWeb.TenantRateLimitersTest do
+ use Realtime.DataCase, async: true
+
+ use Mimic
+ alias RealtimeWeb.TenantRateLimiters
+ alias Realtime.Api.Tenant
+
+ setup do
+ tenant = %Tenant{external_id: random_string(), max_concurrent_users: 1, max_joins_per_second: 1}
+
+ %{tenant: tenant}
+ end
+
+ describe "check_tenant/1" do
+ test "rate is not exceeded", %{tenant: tenant} do
+ assert TenantRateLimiters.check_tenant(tenant) == :ok
+ end
+
+ test "max concurrent users is exceeded", %{tenant: tenant} do
+ Realtime.UsersCounter.add(self(), tenant.external_id)
+
+ assert TenantRateLimiters.check_tenant(tenant) == {:error, :too_many_connections}
+ end
+
+ test "max joins is exceeded", %{tenant: tenant} do
+ expect(Realtime.RateCounter, :get, fn _ -> {:ok, %{limit: %{triggered: true}}} end)
+
+ assert TenantRateLimiters.check_tenant(tenant) == {:error, :too_many_joins}
+ end
+ end
+end
diff --git a/test/realtime_web/controllers/broadcast_controller_test.exs b/test/realtime_web/controllers/broadcast_controller_test.exs
index 9c38d58bd..d1531831e 100644
--- a/test/realtime_web/controllers/broadcast_controller_test.exs
+++ b/test/realtime_web/controllers/broadcast_controller_test.exs
@@ -18,7 +18,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
setup %{conn: conn} do
tenant = Containers.checkout_tenant(run_migrations: true)
# Warm cache to avoid Cachex and Ecto.Sandbox ownership issues
- Cachex.put!(Realtime.Tenants.Cache, {{:get_tenant_by_external_id, 1}, [tenant.external_id]}, {:cached, tenant})
+ Realtime.Tenants.Cache.update_cache(tenant)
conn = generate_conn(conn, tenant)
@@ -141,16 +141,38 @@ defmodule RealtimeWeb.BroadcastControllerTest do
assert conn.status == 422
- # Wait for counters to increment. RateCounter tick is 1 second
- Process.sleep(2000)
- {:ok, rate_counter} = RateCounter.get(Tenants.requests_per_second_rate(tenant))
+ {:ok, rate_counter} = RateCounterHelper.tick!(Tenants.requests_per_second_rate(tenant))
assert rate_counter.avg != 0.0
- {:ok, rate_counter} = RateCounter.get(Tenants.events_per_second_rate(tenant))
+ {:ok, rate_counter} = RateCounterHelper.tick!(Tenants.events_per_second_rate(tenant))
assert rate_counter.avg == 0.0
refute_receive {:socket_push, _, _}
end
+
+ test "returns 422 when batch of messages includes a message that exceeds the tenant payload size", %{
+ conn: conn,
+ tenant: tenant
+ } do
+ sub_topic_1 = "sub_topic_1"
+ sub_topic_2 = "sub_topic_2"
+
+ payload_1 = %{"data" => "data"}
+ payload_2 = %{"data" => random_string(tenant.max_payload_size_in_kb * 1000 + 100)}
+ event_1 = "event_1"
+ event_2 = "event_2"
+
+ conn =
+ post(conn, Routes.broadcast_path(conn, :broadcast), %{
+ "messages" => [
+ %{"topic" => sub_topic_1, "payload" => payload_1, "event" => event_1},
+ %{"topic" => sub_topic_1, "payload" => payload_1, "event" => event_1},
+ %{"topic" => sub_topic_2, "payload" => payload_2, "event" => event_2}
+ ]
+ })
+
+ assert conn.status == 422
+ end
end
describe "too many requests" do
@@ -209,23 +231,25 @@ defmodule RealtimeWeb.BroadcastControllerTest do
end
describe "unauthorized" do
- test "invalid token returns 401", %{conn: conn} do
+ test "invalid token returns 401", %{conn: conn, tenant: tenant} do
conn =
conn
+ |> delete_req_header("authorization")
|> put_req_header("accept", "application/json")
|> put_req_header("x-api-key", "potato")
- |> then(&%{&1 | host: "dev_tenant.supabase.com"})
+ |> then(&%{&1 | host: "#{tenant.external_id}.supabase.com"})
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{})
assert conn.status == 401
end
- test "expired token returns 401", %{conn: conn} do
+ test "expired token returns 401", %{conn: conn, tenant: tenant} do
conn =
conn
+ |> delete_req_header("authorization")
|> put_req_header("accept", "application/json")
|> put_req_header("x-api-key", @expired_token)
- |> then(&%{&1 | host: "dev_tenant.supabase.com"})
+ |> then(&%{&1 | host: "#{tenant.external_id}.supabase.com"})
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{})
assert conn.status == 401
@@ -272,7 +296,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _, _ -> :ok end)
messages_to_send =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -294,7 +318,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(messages_to_send, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -310,7 +334,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.any?(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end)
end)
@@ -326,7 +350,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 6, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 6, fn _, _, _, _, _ -> :ok end)
channels =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -358,7 +382,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(channels, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -374,7 +398,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.count(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
end)
@@ -393,7 +417,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
open_channel_topic = Tenants.tenant_topic(tenant, "open_channel", true)
assert Enum.count(broadcast_calls, fn
- [_, ^open_channel_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^open_channel_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
@@ -408,7 +432,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
} do
request_events_key = Tenants.requests_per_second_key(tenant)
broadcast_events_key = Tenants.events_per_second_key(tenant)
- expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _ -> :ok end)
+ expect(TenantBroadcaster, :pubsub_broadcast, 5, fn _, _, _, _, _ -> :ok end)
messages_to_send =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -428,11 +452,12 @@ defmodule RealtimeWeb.BroadcastControllerTest do
GenCounter
|> expect(:add, fn ^request_events_key -> :ok end)
- |> expect(:add, length(messages_to_send), fn ^broadcast_events_key -> :ok end)
+ # remove the one message that won't be broadcasted for this user
+ |> expect(:add, length(messages) - 1, fn ^broadcast_events_key -> :ok end)
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
- broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/4)
+ broadcast_calls = calls(&TenantBroadcaster.pubsub_broadcast/5)
Enum.each(messages_to_send, fn %{topic: topic} ->
broadcast_topic = Tenants.tenant_topic(tenant, topic, false)
@@ -448,7 +473,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
}
assert Enum.count(broadcast_calls, fn
- [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher] -> true
+ [_, ^broadcast_topic, ^message, RealtimeChannel.MessageDispatcher, :broadcast] -> true
_ -> false
end) == 1
end)
@@ -461,7 +486,7 @@ defmodule RealtimeWeb.BroadcastControllerTest do
@tag role: "anon"
test "user without permission won't broadcast", %{conn: conn, db_conn: db_conn, tenant: tenant} do
request_events_key = Tenants.requests_per_second_key(tenant)
- reject(&TenantBroadcaster.pubsub_broadcast/4)
+ reject(&TenantBroadcaster.pubsub_broadcast/5)
messages =
Stream.repeatedly(fn -> generate_message_with_policies(db_conn, tenant) end)
@@ -482,7 +507,6 @@ defmodule RealtimeWeb.BroadcastControllerTest do
GenCounter
|> expect(:add, fn ^request_events_key -> 1 end)
- |> reject(:add, 1)
conn = post(conn, Routes.broadcast_path(conn, :broadcast), %{"messages" => messages})
diff --git a/test/realtime_web/controllers/fallback_controller_test.exs b/test/realtime_web/controllers/fallback_controller_test.exs
new file mode 100644
index 000000000..3cbf87d2a
--- /dev/null
+++ b/test/realtime_web/controllers/fallback_controller_test.exs
@@ -0,0 +1,52 @@
+defmodule RealtimeWeb.FallbackControllerTest do
+ use RealtimeWeb.ConnCase, async: true
+
+ alias RealtimeWeb.FallbackController
+
+ describe "call/2" do
+ test "returns 404 with not found message", %{conn: conn} do
+ conn = FallbackController.call(conn, {:error, :not_found})
+
+ assert json_response(conn, 404) == %{"message" => "not found"}
+ end
+
+ test "returns 422 with changeset errors", %{conn: conn} do
+ changeset =
+ {%{}, %{name: :string}}
+ |> Ecto.Changeset.cast(%{name: 123}, [:name])
+
+ conn = FallbackController.call(conn, {:error, changeset})
+
+ assert %{"errors" => _} = json_response(conn, 422)
+ end
+
+ test "returns custom status with message", %{conn: conn} do
+ conn = FallbackController.call(conn, {:error, :bad_request, "invalid input"})
+
+ assert json_response(conn, 400) == %{"message" => "invalid input"}
+ end
+
+ test "returns 401 for generic error tuple", %{conn: conn} do
+ conn = FallbackController.call(conn, {:error, "something went wrong"})
+
+ assert json_response(conn, 401) == %{"message" => "Unauthorized"}
+ end
+
+ test "returns 422 for bare invalid changeset", %{conn: conn} do
+ changeset =
+ {%{}, %{name: :string}}
+ |> Ecto.Changeset.cast(%{name: 123}, [:name])
+ |> Map.put(:valid?, false)
+
+ conn = FallbackController.call(conn, changeset)
+
+ assert %{"errors" => _} = json_response(conn, 422)
+ end
+
+ test "returns 422 for unknown error format", %{conn: conn} do
+ conn = FallbackController.call(conn, :unexpected_value)
+
+ assert json_response(conn, 422) == %{"message" => "Unknown error"}
+ end
+ end
+end
diff --git a/test/realtime_web/controllers/metrics_controller_test.exs b/test/realtime_web/controllers/metrics_controller_test.exs
index f16edc83f..52453271c 100644
--- a/test/realtime_web/controllers/metrics_controller_test.exs
+++ b/test/realtime_web/controllers/metrics_controller_test.exs
@@ -2,11 +2,23 @@ defmodule RealtimeWeb.MetricsControllerTest do
# Usage of Clustered
# Also changing Application env
use RealtimeWeb.ConnCase, async: false
+ alias Realtime.GenRpc
import ExUnit.CaptureLog
+ use Mimic
setup_all do
- {:ok, _} = Clustered.start(nil, extra_config: [{:realtime, :region, "ap-southeast-2"}])
+ metrics_tags = %{
+ region: "ap-southeast-2",
+ host: "anothernode@something.com",
+ id: "someid"
+ }
+
+ {:ok, _} =
+ Clustered.start(nil,
+ extra_config: [{:realtime, :region, "ap-southeast-2"}, {:realtime, :metrics_tags, metrics_tags}]
+ )
+
:ok
end
@@ -30,14 +42,18 @@ defmodule RealtimeWeb.MetricsControllerTest do
assert response =~
"# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
- assert response =~ "region=\"ap-southeast-2"
- assert response =~ "region=\"us-east-1"
+ assert response =~ "region=\"ap-southeast-2\""
+ assert response =~ "region=\"us-east-1\""
end
test "returns 200 and log on timeout", %{conn: conn} do
- current_value = Application.get_env(:realtime, :metrics_rpc_timeout)
- on_exit(fn -> Application.put_env(:realtime, :metrics_rpc_timeout, current_value) end)
- Application.put_env(:realtime, :metrics_rpc_timeout, 0)
+ Mimic.stub(GenRpc, :call, fn node, mod, func, args, opts ->
+ if node != node() do
+ {:error, :rpc_error, :timeout}
+ else
+ call_original(GenRpc, :call, [node, mod, func, args, opts])
+ end
+ end)
log =
capture_log(fn ->
@@ -74,4 +90,64 @@ defmodule RealtimeWeb.MetricsControllerTest do
|> response(403)
end
end
+
+ describe "GET /metrics/:region" do
+ setup %{conn: conn} do
+ # The metrics pipeline requires authentication
+ jwt_secret = Application.fetch_env!(:realtime, :metrics_jwt_secret)
+ token = generate_jwt_token(jwt_secret, %{})
+ authenticated_conn = put_req_header(conn, "authorization", "Bearer #{token}")
+
+ {:ok, conn: authenticated_conn}
+ end
+
+ test "returns 200", %{conn: conn} do
+ assert response =
+ conn
+ |> get(~p"/metrics/ap-southeast-2")
+ |> text_response(200)
+
+ # Check prometheus like metrics
+ assert response =~
+ "# HELP beam_system_schedulers_online_info The number of scheduler threads that are online."
+
+ assert response =~ "region=\"ap-southeast-2\""
+ refute response =~ "region=\"us-east-1\""
+ end
+
+ test "returns 200 and log on timeout", %{conn: conn} do
+ Mimic.stub(GenRpc, :call, fn _node, _mod, _func, _args, _opts ->
+ {:error, :rpc_error, :timeout}
+ end)
+
+ log =
+ capture_log(fn ->
+ assert response =
+ conn
+ |> get(~p"/metrics/ap-southeast-2")
+ |> text_response(200)
+
+ assert response == ""
+ end)
+
+ assert log =~ "Cannot fetch metrics from the node"
+ end
+
+ test "returns 403 when authorization header is missing", %{conn: conn} do
+ assert conn
+ |> delete_req_header("authorization")
+ |> get(~p"/metrics/ap-southeast-2")
+ |> response(403)
+ end
+
+ test "returns 403 when authorization header is wrong", %{conn: conn} do
+ token = generate_jwt_token("bad_secret", %{})
+
+ assert _ =
+ conn
+ |> put_req_header("authorization", "Bearer #{token}")
+ |> get(~p"/metrics/ap-southeast-2")
+ |> response(403)
+ end
+ end
end
diff --git a/test/realtime_web/controllers/page_controller_test.exs b/test/realtime_web/controllers/page_controller_test.exs
index 91fe825b5..581c46970 100644
--- a/test/realtime_web/controllers/page_controller_test.exs
+++ b/test/realtime_web/controllers/page_controller_test.exs
@@ -1,5 +1,7 @@
defmodule RealtimeWeb.PageControllerTest do
- use RealtimeWeb.ConnCase
+ use RealtimeWeb.ConnCase, async: false
+
+ import ExUnit.CaptureLog
test "GET / renders index page", %{conn: conn} do
conn = get(conn, "/")
@@ -10,4 +12,48 @@ defmodule RealtimeWeb.PageControllerTest do
conn = get(conn, "/healthcheck")
assert text_response(conn, 200) == "ok"
end
+
+ describe "GET /healthcheck logging behavior" do
+ setup do
+ original_value = Application.get_env(:realtime, :disable_healthcheck_logging, false)
+ on_exit(fn -> Application.put_env(:realtime, :disable_healthcheck_logging, original_value) end)
+ :ok
+ end
+
+ test "logs request when DISABLE_HEALTHCHECK_LOGGING is false", %{conn: conn} do
+ Application.put_env(:realtime, :disable_healthcheck_logging, false)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, "/healthcheck")
+ assert text_response(conn, 200) == "ok"
+ end)
+
+ assert log =~ "GET /healthcheck"
+ end
+
+ test "does not log request when DISABLE_HEALTHCHECK_LOGGING is true", %{conn: conn} do
+ Application.put_env(:realtime, :disable_healthcheck_logging, true)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, "/healthcheck")
+ assert text_response(conn, 200) == "ok"
+ end)
+
+ refute log =~ "GET /healthcheck"
+ end
+
+ test "logs request when DISABLE_HEALTHCHECK_LOGGING is not set (default)", %{conn: conn} do
+ Application.delete_env(:realtime, :disable_healthcheck_logging)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, "/healthcheck")
+ assert text_response(conn, 200) == "ok"
+ end)
+
+ assert log =~ "GET /healthcheck"
+ end
+ end
end
diff --git a/test/realtime_web/controllers/tenant_controller_test.exs b/test/realtime_web/controllers/tenant_controller_test.exs
index 3974e7e7b..ff46ad10b 100644
--- a/test/realtime_web/controllers/tenant_controller_test.exs
+++ b/test/realtime_web/controllers/tenant_controller_test.exs
@@ -3,6 +3,7 @@ defmodule RealtimeWeb.TenantControllerTest do
# Also using global otel_simple_processor
use RealtimeWeb.ConnCase, async: false
+ import ExUnit.CaptureLog
require OpenTelemetry.Tracer, as: Tracer
alias Realtime.Api.Tenant
@@ -48,7 +49,7 @@ defmodule RealtimeWeb.TenantControllerTest do
test "returns not found on non existing tenant", %{conn: conn} do
conn = get(conn, ~p"/api/tenants/no")
response = json_response(conn, 404)
- assert response == %{"error" => "not found"}
+ assert response == %{"message" => "not found"}
end
test "sets appropriate observability metadata", %{conn: conn, tenant: tenant} do
@@ -147,6 +148,46 @@ defmodule RealtimeWeb.TenantControllerTest do
assert 100 = json_response(conn, 200)["data"]["max_joins_per_second"]
end
+ test "can set max_client_presence_events_per_window", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+ port = Database.from_tenant(tenant, "realtime_test", :stop).port
+ attrs = default_tenant_attrs(port) |> Map.put("max_client_presence_events_per_window", 42)
+ attrs = Map.put(attrs, "external_id", external_id)
+
+ conn = post(conn, ~p"/api/tenants", tenant: attrs)
+ assert %{"max_client_presence_events_per_window" => 42} = json_response(conn, 200)["data"]
+
+ conn = get(conn, Routes.tenant_path(conn, :show, external_id))
+ assert 42 = json_response(conn, 200)["data"]["max_client_presence_events_per_window"]
+ end
+
+ test "max_client_presence_events_per_window defaults to nil", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+
+ conn = get(conn, Routes.tenant_path(conn, :show, external_id))
+ assert is_nil(json_response(conn, 200)["data"]["max_client_presence_events_per_window"])
+ end
+
+ test "can set client_presence_window_ms", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+ port = Database.from_tenant(tenant, "realtime_test", :stop).port
+ attrs = default_tenant_attrs(port) |> Map.put("client_presence_window_ms", 5_000)
+ attrs = Map.put(attrs, "external_id", external_id)
+
+ conn = post(conn, ~p"/api/tenants", tenant: attrs)
+ assert %{"client_presence_window_ms" => 5_000} = json_response(conn, 200)["data"]
+
+ conn = get(conn, Routes.tenant_path(conn, :show, external_id))
+ assert 5_000 = json_response(conn, 200)["data"]["client_presence_window_ms"]
+ end
+
+ test "client_presence_window_ms defaults to nil", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+
+ conn = get(conn, Routes.tenant_path(conn, :show, external_id))
+ assert is_nil(json_response(conn, 200)["data"]["client_presence_window_ms"])
+ end
+
test "renders errors when data is invalid", %{conn: conn} do
conn = post(conn, ~p"/api/tenants", tenant: @invalid_attrs)
assert json_response(conn, 422)["errors"] != %{}
@@ -178,6 +219,24 @@ defmodule RealtimeWeb.TenantControllerTest do
assert 100 = json_response(conn, 200)["data"]["max_joins_per_second"]
end
+ test "can update max_client_presence_events_per_window", %{tenant: tenant, conn: conn} do
+ external_id = tenant.external_id
+ port = Database.from_tenant(tenant, "realtime_test", :stop).port
+ attrs = default_tenant_attrs(port) |> Map.put("max_client_presence_events_per_window", 99)
+
+ conn = put(conn, ~p"/api/tenants/#{external_id}", tenant: attrs)
+ assert %{"max_client_presence_events_per_window" => 99} = json_response(conn, 200)["data"]
+ end
+
+ test "can update client_presence_window_ms", %{tenant: tenant, conn: conn} do
+ external_id = tenant.external_id
+ port = Database.from_tenant(tenant, "realtime_test", :stop).port
+ attrs = default_tenant_attrs(port) |> Map.put("client_presence_window_ms", 10_000)
+
+ conn = put(conn, ~p"/api/tenants/#{external_id}", tenant: attrs)
+ assert %{"client_presence_window_ms" => 10_000} = json_response(conn, 200)["data"]
+ end
+
test "renders errors when data is invalid", %{conn: conn} do
conn = put(conn, ~p"/api/tenants/#{random_string()}", tenant: @invalid_attrs)
assert json_response(conn, 422)["errors"] != %{}
@@ -330,12 +389,66 @@ defmodule RealtimeWeb.TenantControllerTest do
end
end
+ describe "shutdown Connect module for tenant" do
+ setup [:with_tenant]
+
+ test "shuts down Connect process when tenant exists", %{conn: conn, tenant: %{external_id: external_id}} do
+ Phoenix.PubSub.subscribe(Realtime.PubSub, "realtime:operations:" <> external_id)
+
+ {:ok, connect_pid} = Connect.lookup_or_start_connection(external_id)
+ Process.monitor(connect_pid)
+
+ assert Process.alive?(connect_pid)
+
+ %{status: status} = post(conn, ~p"/api/tenants/#{external_id}/shutdown")
+
+ assert status == 204
+ assert_receive {:DOWN, _, :process, ^connect_pid, _}
+ refute Process.alive?(connect_pid)
+ end
+
+ test "returns 204 when tenant exists but Connect is not running", %{conn: conn, tenant: %{external_id: external_id}} do
+ %{status: status} = post(conn, ~p"/api/tenants/#{external_id}/shutdown")
+ assert status == 204
+ end
+
+ test "returns 404 when tenant does not exist", %{conn: conn} do
+ %{status: status} = post(conn, ~p"/api/tenants/nope/shutdown")
+ assert status == 404
+ end
+
+ test "returns 403 when jwt is invalid", %{conn: conn, tenant: tenant} do
+ conn = put_req_header(conn, "authorization", "Bearer potato")
+ conn = post(conn, ~p"/api/tenants/#{tenant.external_id}/shutdown")
+ assert response(conn, 403) == ""
+ end
+
+ test "sets appropriate observability metadata", %{conn: conn, tenant: tenant} do
+ external_id = tenant.external_id
+
+ Tracer.with_span "test" do
+ Task.async(fn ->
+ post(conn, ~p"/api/tenants/#{tenant.external_id}/shutdown")
+
+ assert Logger.metadata()[:external_id] == external_id
+ assert Logger.metadata()[:project] == external_id
+ end)
+ |> Task.await()
+ end
+
+ assert_receive {:span, span(name: "POST /api/tenants/:tenant_id/shutdown", attributes: attributes)}
+
+ assert attributes(map: %{external_id: ^external_id}) = attributes
+ end
+ end
+
describe "health check tenant" do
setup [:with_tenant]
setup do
+ previous_region = Application.get_env(:realtime, :region)
Application.put_env(:realtime, :region, "us-east-1")
- on_exit(fn -> Application.put_env(:realtime, :region, nil) end)
+ on_exit(fn -> Application.put_env(:realtime, :region, previous_region) end)
end
test "health check when tenant does not exist", %{conn: conn} do
@@ -354,13 +467,14 @@ defmodule RealtimeWeb.TenantControllerTest do
assert %{
"healthy" => true,
"db_connected" => false,
+ "replication_connected" => false,
"connected_cluster" => 0,
"region" => "us-east-1",
"node" => "#{node()}"
} == data
end
- test "unhealthy tenant with 1 client connections", %{
+ test "unhealthy tenant with 1 client connections and no db connection", %{
conn: conn,
tenant: %Tenant{external_id: ext_id}
} do
@@ -374,19 +488,23 @@ defmodule RealtimeWeb.TenantControllerTest do
assert %{
"healthy" => false,
"db_connected" => false,
+ "replication_connected" => false,
"connected_cluster" => 1,
"region" => "us-east-1",
"node" => "#{node()}"
} == data
end
- test "healthy tenant with 1 client connection", %{conn: conn, tenant: %Tenant{external_id: ext_id}} do
+ test "healthy tenant with db connection but no replication connection", %{
+ conn: conn,
+ tenant: %Tenant{external_id: ext_id}
+ } do
{:ok, db_conn} = Connect.lookup_or_start_connection(ext_id)
# Fake adding a connected client here
UsersCounter.add(self(), ext_id)
- # Fake a db connection
- :syn.register(Realtime.Tenants.Connect, ext_id, self(), %{conn: nil})
+ # Fake a db connection without replication (replication_conn: nil)
+ :syn.register(Realtime.Tenants.Connect, ext_id, self(), %{conn: nil, region: "us-east-1", replication_conn: nil})
:syn.update_registry(Realtime.Tenants.Connect, ext_id, fn _pid, meta ->
%{meta | conn: db_conn}
@@ -398,6 +516,32 @@ defmodule RealtimeWeb.TenantControllerTest do
assert %{
"healthy" => true,
"db_connected" => true,
+ "replication_connected" => false,
+ "connected_cluster" => 1,
+ "region" => "us-east-1",
+ "node" => "#{node()}"
+ } == data
+ end
+
+ test "healthy tenant with db and replication connection", %{conn: conn, tenant: %Tenant{external_id: ext_id}} do
+ {:ok, db_conn} = Connect.lookup_or_start_connection(ext_id)
+ # Fake adding a connected client here
+ UsersCounter.add(self(), ext_id)
+
+ # Fake a db connection with replication_conn in syn metadata
+ :syn.register(Realtime.Tenants.Connect, ext_id, self(), %{conn: nil, region: "us-east-1", replication_conn: nil})
+
+ :syn.update_registry(Realtime.Tenants.Connect, ext_id, fn _pid, meta ->
+ %{meta | conn: db_conn, replication_conn: self()}
+ end)
+
+ conn = get(conn, ~p"/api/tenants/#{ext_id}/health")
+ data = json_response(conn, 200)["data"]
+
+ assert %{
+ "healthy" => true,
+ "db_connected" => true,
+ "replication_connected" => true,
"connected_cluster" => 1,
"region" => "us-east-1",
"node" => "#{node()}"
@@ -418,11 +562,12 @@ defmodule RealtimeWeb.TenantControllerTest do
conn = get(conn, ~p"/api/tenants/#{tenant.external_id}/health")
data = json_response(conn, 200)["data"]
- Process.sleep(2000)
+ Process.sleep(1000)
assert {:ok, %{rows: []}} = Postgrex.query(db_conn, "SELECT * FROM realtime.messages", [])
- assert %{"healthy" => true, "db_connected" => false, "connected_cluster" => 0} = data
+ assert %{"healthy" => true, "db_connected" => false, "replication_connected" => false, "connected_cluster" => 0} =
+ data
end
test "sets appropriate observability metadata", %{conn: conn, tenant: tenant} do
@@ -442,6 +587,51 @@ defmodule RealtimeWeb.TenantControllerTest do
assert attributes(map: %{external_id: ^external_id}) = attributes
end
+
+ test "logs request when DISABLE_HEALTHCHECK_LOGGING is false", %{conn: conn, tenant: tenant} do
+ original_value = Application.get_env(:realtime, :disable_healthcheck_logging, false)
+ Application.put_env(:realtime, :disable_healthcheck_logging, false)
+ on_exit(fn -> Application.put_env(:realtime, :disable_healthcheck_logging, original_value) end)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, ~p"/api/tenants/#{tenant.external_id}/health")
+ assert json_response(conn, 200)
+ end)
+
+ assert log =~ "GET /api/tenants"
+ assert log =~ "/health"
+ end
+
+ test "does not log request when DISABLE_HEALTHCHECK_LOGGING is true", %{conn: conn, tenant: tenant} do
+ original_value = Application.get_env(:realtime, :disable_healthcheck_logging, false)
+ Application.put_env(:realtime, :disable_healthcheck_logging, true)
+ on_exit(fn -> Application.put_env(:realtime, :disable_healthcheck_logging, original_value) end)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, ~p"/api/tenants/#{tenant.external_id}/health")
+ assert json_response(conn, 200)
+ end)
+
+ refute log =~ "GET /api/tenants"
+ refute log =~ "/health"
+ end
+
+ test "logs request when DISABLE_HEALTHCHECK_LOGGING is not set (default)", %{conn: conn, tenant: tenant} do
+ original_value = Application.get_env(:realtime, :disable_healthcheck_logging, false)
+ Application.delete_env(:realtime, :disable_healthcheck_logging)
+ on_exit(fn -> Application.put_env(:realtime, :disable_healthcheck_logging, original_value) end)
+
+ log =
+ capture_log(fn ->
+ conn = get(conn, ~p"/api/tenants/#{tenant.external_id}/health")
+ assert json_response(conn, 200)
+ end)
+
+ assert log =~ "GET /api/tenants"
+ assert log =~ "/health"
+ end
end
defp default_tenant_attrs(port) do
diff --git a/test/realtime_web/live/status_live/index_test.exs b/test/realtime_web/live/status_live/index_test.exs
new file mode 100644
index 000000000..ae3af0ad0
--- /dev/null
+++ b/test/realtime_web/live/status_live/index_test.exs
@@ -0,0 +1,33 @@
+defmodule RealtimeWeb.StatusLive.IndexTest do
+ use RealtimeWeb.ConnCase
+ import Phoenix.LiveViewTest
+
+ alias Realtime.Latency.Payload
+ alias Realtime.Nodes
+ alias RealtimeWeb.Endpoint
+
+ describe "Status LiveView" do
+ test "renders status page", %{conn: conn} do
+ {:ok, _view, html} = live(conn, ~p"/status")
+
+ assert html =~ "Realtime Status"
+ end
+
+ test "receives broadcast from PubSub", %{conn: conn} do
+ {:ok, view, _html} = live(conn, ~p"/status")
+
+ payload = %Payload{
+ from_node: Nodes.short_node_id_from_name(:"pink@127.0.0.1"),
+ node: Nodes.short_node_id_from_name(:"orange@127.0.0.1"),
+ latency: "42ms",
+ timestamp: DateTime.utc_now()
+ }
+
+ Endpoint.broadcast("admin:cluster", "ping", payload)
+
+ html = render(view)
+ assert html =~ "42ms"
+ assert html =~ "pink@127.0.0.1_orange@127.0.0.1"
+ end
+ end
+end
diff --git a/test/realtime_web/plugs/rate_limiter_test.exs b/test/realtime_web/plugs/rate_limiter_test.exs
index 78b22fc8f..d79aef969 100644
--- a/test/realtime_web/plugs/rate_limiter_test.exs
+++ b/test/realtime_web/plugs/rate_limiter_test.exs
@@ -47,9 +47,7 @@ defmodule RealtimeWeb.Plugs.RateLimiterTest do
end
test "serve a 200 when rate limit is set to 100", %{conn: conn} do
- {:ok, _tenant} =
- Api.get_tenant_by_external_id(@tenant["external_id"])
- |> Api.update_tenant(%{"max_events_per_second" => 100})
+ {:ok, _tenant} = Api.update_tenant_by_external_id(@tenant["external_id"], %{"max_events_per_second" => 100})
conn =
conn
@@ -58,4 +56,23 @@ defmodule RealtimeWeb.Plugs.RateLimiterTest do
assert conn.status == 200
end
+
+ test "passes through when tenant is not in assigns", %{conn: conn} do
+ alias RealtimeWeb.Plugs.RateLimiter
+
+ result = RateLimiter.call(conn, [])
+
+ refute result.halted
+ end
+
+ test "sets rate limit headers on 429 response", %{conn: conn} do
+ conn =
+ conn
+ |> Map.put(:host, "localhost.localhost.com")
+ |> get(Routes.ping_path(conn, :ping))
+
+ assert conn.status == 429
+ assert get_resp_header(conn, "x-rate-limit") == ["0"]
+ assert get_resp_header(conn, "x-rate-rolling") != []
+ end
end
diff --git a/test/realtime_web/socket/v2_serializer_test.exs b/test/realtime_web/socket/v2_serializer_test.exs
new file mode 100644
index 000000000..2d83e1ea1
--- /dev/null
+++ b/test/realtime_web/socket/v2_serializer_test.exs
@@ -0,0 +1,553 @@
+defmodule RealtimeWeb.Socket.V2SerializerTest do
+ use ExUnit.Case, async: true
+
+ alias Phoenix.Socket.{Broadcast, Message, Reply}
+ alias RealtimeWeb.Socket.UserBroadcast
+ alias RealtimeWeb.Socket.V2Serializer
+
+ @serializer V2Serializer
+ @v2_fastlane_json "[null,null,\"t\",\"e\",{\"m\":1}]"
+ @v2_msg_json "[null,null,\"t\",\"e\",{\"m\":1}]"
+
+ @client_push <<
+ # push
+ 0::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "12",
+ "123",
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @client_binary_user_broadcast_push <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @client_json_user_broadcast_push <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ @client_binary_user_broadcast_push_with_metadata <<
+ # user broadcast push
+ 3::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 14,
+ # binary encoding
+ 0::size(8),
+ "12",
+ "123",
+ "topic",
+ "user_event",
+ ~s<{"store":true}>,
+ 101,
+ 102,
+ 103
+ >>
+
+ @reply <<
+ # reply
+ 1::size(8),
+ # join_ref_size
+ 2,
+ # ref_size
+ 3,
+ # topic_size
+ 5,
+ # status_size
+ 2,
+ "12",
+ "123",
+ "topic",
+ "ok",
+ 101,
+ 102,
+ 103
+ >>
+
+ @broadcast <<
+ # broadcast
+ 2::size(8),
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ @binary_user_broadcast <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 17,
+ # binary encoding
+ 0::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ 123,
+ 34,
+ 114,
+ 101,
+ 112,
+ 108,
+ 97,
+ 121,
+ 101,
+ 100,
+ 34,
+ 58,
+ 116,
+ 114,
+ 117,
+ 101,
+ 125,
+ # payload
+ 101,
+ 102,
+ 103
+ >>
+
+ @binary_user_broadcast_no_metadata <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # binary encoding
+ 0::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ # payload
+ 101,
+ 102,
+ 103
+ >>
+
+ @json_user_broadcast <<
+ # user broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 17,
+ # json encoding
+ 1::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ 123,
+ 34,
+ 114,
+ 101,
+ 112,
+ 108,
+ 97,
+ 121,
+ 101,
+ 100,
+ 34,
+ 58,
+ 116,
+ 114,
+ 117,
+ 101,
+ 125,
+ # payload
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ @json_user_broadcast_no_metadata <<
+ # broadcast
+ 4::size(8),
+ # topic_size
+ 5,
+ # user_event_size
+ 10,
+ # metadata_size
+ 0,
+ # json encoding
+ 1::size(8),
+ "topic",
+ "user_event",
+ # metadata
+ # payload
+ 123,
+ 34,
+ 97,
+ 34,
+ 58,
+ 34,
+ 98,
+ 34,
+ 125
+ >>
+
+ defp encode!(serializer, msg) do
+ case serializer.encode!(msg) do
+ {:socket_push, :text, encoded} ->
+ assert is_list(encoded)
+ IO.iodata_to_binary(encoded)
+
+ {:socket_push, :binary, encoded} ->
+ assert is_binary(encoded)
+ encoded
+ end
+ end
+
+ defp decode!(serializer, msg, opts), do: serializer.decode!(msg, opts)
+
+ defp fastlane!(serializer, msg) do
+ case serializer.fastlane!(msg) do
+ {:socket_push, :text, encoded} ->
+ assert is_list(encoded)
+ IO.iodata_to_binary(encoded)
+
+ {:socket_push, :binary, encoded} ->
+ assert is_binary(encoded)
+ encoded
+ end
+ end
+
+ test "encode!/1 encodes `Phoenix.Socket.Message` as JSON" do
+ msg = %Message{topic: "t", event: "e", payload: %{m: 1}}
+ assert encode!(@serializer, msg) == @v2_msg_json
+ end
+
+ test "encode!/1 raises when payload is not a map" do
+ msg = %Message{topic: "t", event: "e", payload: "invalid"}
+ assert_raise ArgumentError, fn -> encode!(@serializer, msg) end
+ end
+
+ test "encode!/1 encodes `Phoenix.Socket.Reply` as JSON" do
+ msg = %Reply{topic: "t", payload: %{m: 1}}
+ encoded = encode!(@serializer, msg)
+
+ assert Jason.decode!(encoded) == [
+ nil,
+ nil,
+ "t",
+ "phx_reply",
+ %{"response" => %{"m" => 1}, "status" => nil}
+ ]
+ end
+
+ test "decode!/2 decodes `Phoenix.Socket.Message` from JSON" do
+ assert %Message{topic: "t", event: "e", payload: %{"m" => 1}} ==
+ decode!(@serializer, @v2_msg_json, opcode: :text)
+ end
+
+ test "fastlane!/1 encodes a broadcast into a message as JSON" do
+ msg = %Broadcast{topic: "t", event: "e", payload: %{m: 1}}
+ assert fastlane!(@serializer, msg) == @v2_fastlane_json
+ end
+
+ test "fastlane!/1 raises when payload is not a map" do
+ msg = %Broadcast{topic: "t", event: "e", payload: "invalid"}
+ assert_raise ArgumentError, fn -> fastlane!(@serializer, msg) end
+ end
+
+ describe "binary encode" do
+ test "general pushed message" do
+ push = <<
+ # push
+ 0::size(8),
+ # join_ref_size
+ 2,
+ # topic_size
+ 5,
+ # event_size
+ 5,
+ "12",
+ "topic",
+ "event",
+ 101,
+ 102,
+ 103
+ >>
+
+ assert encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }) == push
+ end
+
+ test "encode with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: String.duplicate("t", 256),
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert event to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: nil,
+ topic: "topic",
+ event: String.duplicate("e", 256),
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert join_ref to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Message{
+ join_ref: String.duplicate("j", 256),
+ ref: nil,
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+ end
+
+ test "reply" do
+ assert encode!(@serializer, %Phoenix.Socket.Reply{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ status: :ok,
+ payload: {:binary, <<101, 102, 103>>}
+ }) == @reply
+ end
+
+ test "reply with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert ref to binary/, fn ->
+ encode!(@serializer, %Phoenix.Socket.Reply{
+ join_ref: "12",
+ ref: String.duplicate("r", 256),
+ topic: "topic",
+ status: :ok,
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+ end
+
+ test "fastlane binary Broadcast" do
+ assert fastlane!(@serializer, %Broadcast{
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }) == @broadcast
+ end
+
+ test "fastlane binary UserBroadcast" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{"replayed" => true},
+ user_payload_encoding: :binary,
+ user_payload: <<101, 102, 103>>
+ }) == @binary_user_broadcast
+ end
+
+ test "fastlane binary UserBroadcast no metadata" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: nil,
+ user_payload_encoding: :binary,
+ user_payload: <<101, 102, 103>>
+ }) == @binary_user_broadcast_no_metadata
+ end
+
+ test "fastlane json UserBroadcast" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{"replayed" => true},
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ }) == @json_user_broadcast
+ end
+
+ test "fastlane json UserBroadcast no metadata" do
+ assert fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ }) == @json_user_broadcast_no_metadata
+ end
+
+ test "fastlane with oversized headers" do
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ fastlane!(@serializer, %Broadcast{
+ topic: String.duplicate("t", 256),
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert event to binary/, fn ->
+ fastlane!(@serializer, %Broadcast{
+ topic: "topic",
+ event: String.duplicate("e", 256),
+ payload: {:binary, <<101, 102, 103>>}
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert topic to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: String.duplicate("t", 256),
+ user_event: "user_event",
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert user_event to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: String.duplicate("e", 256),
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+
+ assert_raise ArgumentError, ~r/unable to convert metadata to binary/, fn ->
+ fastlane!(@serializer, %UserBroadcast{
+ topic: "topic",
+ user_event: "user_event",
+ metadata: %{k: String.duplicate("e", 256)},
+ user_payload_encoding: :json,
+ user_payload: "{\"a\":\"b\"}"
+ })
+ end
+ end
+ end
+
+ describe "binary decode" do
+ test "pushed message" do
+ assert decode!(@serializer, @client_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "event",
+ payload: {:binary, <<101, 102, 103>>}
+ }
+ end
+
+ test "binary user pushed message with metadata" do
+ assert decode!(@serializer, @client_binary_user_broadcast_push_with_metadata, opcode: :binary) ==
+ %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :binary, <<101, 102, 103>>, %{"store" => true}}
+ }
+ end
+
+ test "binary user pushed message" do
+ assert decode!(@serializer, @client_binary_user_broadcast_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :binary, <<101, 102, 103>>, %{}}
+ }
+ end
+
+ test "json binary user pushed message" do
+ assert decode!(@serializer, @client_json_user_broadcast_push, opcode: :binary) == %Phoenix.Socket.Message{
+ join_ref: "12",
+ ref: "123",
+ topic: "topic",
+ event: "broadcast",
+ payload: {"user_event", :json, "{\"a\":\"b\"}", %{}}
+ }
+ end
+ end
+end
diff --git a/test/realtime_web/tenant_broadcaster_test.exs b/test/realtime_web/tenant_broadcaster_test.exs
index d9afbf641..163a1236b 100644
--- a/test/realtime_web/tenant_broadcaster_test.exs
+++ b/test/realtime_web/tenant_broadcaster_test.exs
@@ -1,5 +1,5 @@
defmodule RealtimeWeb.TenantBroadcasterTest do
- # Usage of Clustered
+ # Usage of Clustered and changing Application env
use Realtime.DataCase, async: false
alias Phoenix.Socket.Broadcast
@@ -33,6 +33,7 @@ defmodule RealtimeWeb.TenantBroadcasterTest do
end
setup context do
+ tenant_id = random_string()
Endpoint.subscribe(@topic)
:erpc.call(context.node, Subscriber, :subscribe, [self(), @topic])
@@ -44,100 +45,208 @@ defmodule RealtimeWeb.TenantBroadcasterTest do
__MODULE__,
[:realtime, :tenants, :payload, :size],
&__MODULE__.handle_telemetry/4,
- pid: self()
+ %{pid: self(), tenant: tenant_id}
)
- :ok
+ original = Application.fetch_env!(:realtime, :pubsub_adapter)
+ on_exit(fn -> Application.put_env(:realtime, :pubsub_adapter, original) end)
+ Application.put_env(:realtime, :pubsub_adapter, context.pubsub_adapter)
+
+ {:ok, tenant_id: tenant_id}
end
- describe "pubsub_broadcast/4" do
- test "pubsub_broadcast", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ for pubsub_adapter <- [:gen_rpc, :pg2] do
+ describe "pubsub_broadcast/5 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
- assert_receive ^message
+ test "pubsub_broadcast", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ assert_receive ^message
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 114},
- %{tenant: "realtime-dev"}
- }
- end
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
+
+ test "pubsub_broadcast list payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive ^message
- test "pubsub_broadcast list payload", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive ^message
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 130},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ test "pubsub_broadcast string payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
+ TenantBroadcaster.pubsub_broadcast(tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 130},
- %{tenant: "realtime-dev"}
- }
+ assert_receive ^message
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 119},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
end
- test "pubsub_broadcast string payload", %{node: node} do
- message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
- TenantBroadcaster.pubsub_broadcast("realtime-dev", @topic, message, Phoenix.PubSub)
+ describe "pubsub_broadcast_from/6 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
+
+ test "pubsub_broadcast_from", %{node: node, tenant_id: tenant_id} do
+ parent = self()
+
+ spawn_link(fn ->
+ Endpoint.subscribe(@topic)
+ send(parent, :ready)
- assert_receive ^message
+ receive do
+ msg -> send(parent, {:other_process, msg})
+ end
+ end)
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ assert_receive :ready
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 119},
- %{tenant: "realtime-dev"}
- }
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+
+ TenantBroadcaster.pubsub_broadcast_from(tenant_id, self(), @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive {:other_process, ^message}
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+
+ # This process does not receive the message
+ refute_receive _any
+ end
end
- end
- describe "pubsub_broadcast_from/5" do
- test "pubsub_broadcast_from", %{node: node} do
- parent = self()
+ describe "pubsub_direct_broadcast/6 #{pubsub_adapter}" do
+ @describetag pubsub_adapter: pubsub_adapter
+
+ test "pubsub_direct_broadcast", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- spawn_link(fn ->
- Endpoint.subscribe(@topic)
- send(parent, :ready)
+ assert_receive ^message
- receive do
- msg -> send(parent, {:other_process, msg})
- end
- end)
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive :ready
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 114},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- message = %Broadcast{topic: @topic, event: "an event", payload: %{"a" => "b"}}
+ test "pubsub_direct_broadcast list payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: ["a", %{"b" => "c"}, 1, 23]}
- TenantBroadcaster.pubsub_broadcast_from("realtime-dev", self(), @topic, message, Phoenix.PubSub)
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
- assert_receive {:other_process, ^message}
+ assert_receive ^message
- # Remote node received the broadcast
- assert_receive {:relay, ^node, ^message}
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
- assert_receive {
- :telemetry,
- [:realtime, :tenants, :payload, :size],
- %{size: 114},
- %{tenant: "realtime-dev"}
- }
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 130},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
- # This process does not receive the message
- refute_receive _any
+ test "pubsub_direct_broadcast string payload", %{node: node, tenant_id: tenant_id} do
+ message = %Broadcast{topic: @topic, event: "an event", payload: "some text payload"}
+
+ TenantBroadcaster.pubsub_direct_broadcast(node(), tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+ TenantBroadcaster.pubsub_direct_broadcast(node, tenant_id, @topic, message, Phoenix.PubSub, :broadcast)
+
+ assert_receive ^message
+
+ # Remote node received the broadcast
+ assert_receive {:relay, ^node, ^message}
+
+ assert_receive {
+ :telemetry,
+ [:realtime, :tenants, :payload, :size],
+ %{size: 119},
+ %{tenant: ^tenant_id, message_type: :broadcast}
+ }
+ end
end
end
- def handle_telemetry(event, measures, metadata, pid: pid), do: send(pid, {:telemetry, event, measures, metadata})
+ describe "collect_payload_size/3" do
+ @describetag pubsub_adapter: :gen_rpc
+
+ test "emit telemetry for struct", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(
+ tenant_id,
+ %Phoenix.Socket.Broadcast{event: "broadcast", payload: %{"a" => "b"}},
+ :broadcast
+ )
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 65},
+ %{tenant: ^tenant_id, message_type: :broadcast}}
+ end
+
+ test "emit telemetry for map", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(
+ tenant_id,
+ %{event: "broadcast", payload: %{"a" => "b"}},
+ :postgres_changes
+ )
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 53},
+ %{tenant: ^tenant_id, message_type: :postgres_changes}}
+ end
+
+ test "emit telemetry for non-map", %{tenant_id: tenant_id} do
+ TenantBroadcaster.collect_payload_size(tenant_id, "some blob", :presence)
+
+ assert_receive {:telemetry, [:realtime, :tenants, :payload, :size], %{size: 15},
+ %{tenant: ^tenant_id, message_type: :presence}}
+ end
+ end
+
+ def handle_telemetry(event, measures, metadata, %{pid: pid, tenant: tenant}) do
+ if metadata[:tenant] == tenant do
+ send(pid, {:telemetry, event, measures, metadata})
+ end
+ end
end
diff --git a/test/support/channel_case.ex b/test/support/channel_case.ex
index 8bdab2185..66b6a67a3 100644
--- a/test/support/channel_case.ex
+++ b/test/support/channel_case.ex
@@ -16,7 +16,6 @@ defmodule RealtimeWeb.ChannelCase do
"""
use ExUnit.CaseTemplate
- alias Ecto.Adapters.SQL.Sandbox
using do
quote do
@@ -30,8 +29,6 @@ defmodule RealtimeWeb.ChannelCase do
end
setup tags do
- pid = Sandbox.start_owner!(Realtime.Repo, shared: not tags[:async])
- on_exit(fn -> Sandbox.stop_owner(pid) end)
- :ok
+ Realtime.DataCase.setup_sandbox(tags)
end
end
diff --git a/test/support/clustered.ex b/test/support/clustered.ex
index c7028b79b..6533ca8ee 100644
--- a/test/support/clustered.ex
+++ b/test/support/clustered.ex
@@ -23,25 +23,37 @@ defmodule Clustered do
end
```
"""
- @spec start(any()) :: {:ok, node}
+ @spec start(any(), keyword()) :: {:ok, node}
def start(aux_mod \\ nil, opts \\ []) do
- {:ok, _pid, node} = start_disconnected(aux_mod, opts)
+ {:ok, pid, node} = start_disconnected(aux_mod, opts)
+
+ :ok = wait_for_gen_rpc(pid)
true = Node.connect(node)
+ max_clients = Application.get_env(:realtime, :max_gen_rpc_clients, 5)
+
+ for key <- 1..max_clients do
+ _ = :gen_rpc.call({node, key}, :erlang, :node, [], 5_000)
+ end
+
{:ok, node}
end
@doc """
Similar to `start/2` but the node is not connected automatically
"""
- @spec start_disconnected(any()) :: {:ok, :peer.server_ref(), node}
+ @spec start_disconnected(any(), keyword()) :: {:ok, :peer.server_ref(), node}
def start_disconnected(aux_mod \\ nil, opts \\ []) do
extra_config = Keyword.get(opts, :extra_config, [])
phoenix_port = Keyword.get(opts, :phoenix_port, 4012)
+ name = Keyword.get(opts, :name, :peer.random_name())
+
+ partition = System.get_env("MIX_TEST_PARTITION")
+ node_name = if partition, do: :"main#{partition}@127.0.0.1", else: :"main@127.0.0.1"
:ok =
- case :net_kernel.start([:"main@127.0.0.1"]) do
+ case :net_kernel.start([node_name]) do
{:ok, _} ->
:ok
@@ -53,7 +65,6 @@ defmodule Clustered do
end
true = :erlang.set_cookie(:cookie)
- name = :peer.random_name()
{:ok, pid, node} =
ExUnit.Callbacks.start_supervised(%{
@@ -106,10 +117,12 @@ defmodule Clustered do
:ok = :peer.call(pid, Application, :put_env, [app_name, key, value])
end
+ wait_for_port_free(gen_rpc_tcp_client_port)
+ {:ok, _} = :peer.call(pid, Application, :ensure_all_started, [:gen_rpc])
{:ok, _} = :peer.call(pid, Application, :ensure_all_started, [:mix])
:ok = :peer.call(pid, Mix, :env, [Mix.env()])
- Enum.map(
+ Enum.each(
[:logger, :runtime_tools, :prom_ex, :mix, :os_mon, :realtime],
fn app -> {:ok, _} = :peer.call(pid, Application, :ensure_all_started, [app]) end
)
@@ -121,7 +134,41 @@ defmodule Clustered do
{:ok, pid, node}
end
- def stop() do
- Node.stop()
+ defp wait_for_gen_rpc(pid) do
+ port = :peer.call(pid, Application, :get_env, [:gen_rpc, :tcp_server_port])
+
+ case port do
+ port when is_integer(port) and port > 0 -> wait_for_port({127, 0, 0, 1}, port, 50, 100)
+ _ -> raise "gen_rpc tcp_server_port is not configured: #{inspect(port)}"
+ end
+ end
+
+ defp wait_for_port_free(port, attempts \\ 50, delay_ms \\ 100)
+ defp wait_for_port_free(_port, 0, _delay_ms), do: :ok
+
+ defp wait_for_port_free(port, attempts, delay_ms) do
+ case :gen_tcp.connect({127, 0, 0, 1}, port, [:binary, active: false], 100) do
+ {:ok, socket} ->
+ :gen_tcp.close(socket)
+ Process.sleep(delay_ms)
+ wait_for_port_free(port, attempts - 1, delay_ms)
+
+ {:error, _} ->
+ :ok
+ end
+ end
+
+ defp wait_for_port(_host, _port, 0, _delay_ms), do: raise("gen_rpc tcp server did not start in time")
+
+ defp wait_for_port(host, port, attempts, delay_ms) do
+ case :gen_tcp.connect(host, port, [:binary, active: false], 200) do
+ {:ok, socket} ->
+ :ok = :gen_tcp.close(socket)
+ :ok
+
+ {:error, _reason} ->
+ Process.sleep(delay_ms)
+ wait_for_port(host, port, attempts - 1, delay_ms)
+ end
end
end
diff --git a/test/support/conn_case.ex b/test/support/conn_case.ex
index 9289af1b5..6567d8a62 100644
--- a/test/support/conn_case.ex
+++ b/test/support/conn_case.ex
@@ -16,12 +16,12 @@ defmodule RealtimeWeb.ConnCase do
"""
use ExUnit.CaseTemplate
- alias Ecto.Adapters.SQL.Sandbox
using do
quote do
# Import conveniences for testing with connections
import Generators
+ import Integrations
import TenantConnection
import Phoenix.ConnTest
import Plug.Conn
@@ -38,9 +38,7 @@ defmodule RealtimeWeb.ConnCase do
end
setup tags do
- pid = Sandbox.start_owner!(Realtime.Repo, shared: not tags[:async])
- on_exit(fn -> Sandbox.stop_owner(pid) end)
-
+ Realtime.DataCase.setup_sandbox(tags)
{:ok, conn: Phoenix.ConnTest.build_conn()}
end
end
diff --git a/test/support/containers.ex b/test/support/containers.ex
index cd66f2699..070a2973e 100644
--- a/test/support/containers.ex
+++ b/test/support/containers.ex
@@ -3,12 +3,11 @@ defmodule Containers do
alias Realtime.Tenants.Connect
alias Containers.Container
alias Realtime.Database
- alias Realtime.RateCounter
alias Realtime.Tenants.Migrations
use GenServer
- @image "supabase/postgres:15.8.1.040"
+ @image "supabase/postgres:17.6.1.074"
# Pull image if not available
def pull do
case System.cmd("docker", ["image", "inspect", @image]) do
@@ -37,7 +36,13 @@ defmodule Containers do
def handle_continue({:pool, max_cases}, state) do
{:ok, _pid} =
:poolboy.start_link(
- [name: {:local, Containers.Pool}, size: max_cases + 2, max_overflow: 0, worker_module: Containers.Container],
+ [
+ strategy: :fifo,
+ name: {:local, Containers.Pool},
+ size: max_cases + 2,
+ max_overflow: 0,
+ worker_module: Containers.Container
+ ],
[]
)
@@ -110,68 +115,94 @@ defmodule Containers do
end
end
- # Might be worth changing this to {:ok, tenant}
- def checkout_tenant(opts \\ []) do
+ defp storage_up!(tenant) do
+ settings =
+ Database.from_tenant(tenant, "realtime_test", :stop)
+ |> Map.from_struct()
+ |> Keyword.new()
+
+ case Ecto.Adapters.Postgres.storage_up(settings) do
+ :ok -> :ok
+ {:error, :already_up} -> :ok
+ _ -> raise "Failed to create database"
+ end
+ end
+
+ def checkout_tenant(opts \\ []), do: do_checkout_tenant(opts, :sandbox)
+ def checkout_tenant_unboxed(opts \\ []), do: do_checkout_tenant(opts, :unboxed)
+
+ defp do_checkout_tenant(opts, mode) do
with container when is_pid(container) <- :poolboy.checkout(Containers.Pool, true, 5_000),
port <- Container.port(container) do
- tenant = Generators.tenant_fixture(%{port: port, migrations_ran: 0})
+ tenant = repo_run(mode, fn -> Generators.tenant_fixture(%{port: port, migrations_ran: 0}) end)
+
run_migrations? = Keyword.get(opts, :run_migrations, false)
settings = Database.from_tenant(tenant, "realtime_test", :stop)
settings = %{settings | max_restarts: 0, ssl: false}
{:ok, conn} = Database.connect_db(settings)
- Postgrex.transaction(conn, fn db_conn ->
- Postgrex.query!(db_conn, "DROP SCHEMA IF EXISTS realtime CASCADE", [])
- Postgrex.query!(db_conn, "CREATE SCHEMA IF NOT EXISTS realtime", [])
- end)
+ try do
+ Postgrex.transaction(conn, fn db_conn ->
+ Postgrex.query!(db_conn, "DROP SCHEMA IF EXISTS realtime CASCADE", [])
+ Postgrex.query!(db_conn, "CREATE SCHEMA IF NOT EXISTS realtime", [])
+ end)
- Process.exit(conn, :normal)
+ storage_up!(tenant)
- RateCounter.stop(tenant.external_id)
+ RateCounterHelper.stop(tenant.external_id)
- # Automatically checkin the container at the end of the test
- ExUnit.Callbacks.on_exit(fn ->
- # Clean up database connections if they are set-up
+ ExUnit.Callbacks.on_exit(fn ->
+ if connect_pid = Connect.whereis(tenant.external_id) do
+ supervisor = {:via, PartitionSupervisor, {Realtime.Tenants.Connect.DynamicSupervisor, tenant.external_id}}
- if connect_pid = Connect.whereis(tenant.external_id) do
- supervisor = {:via, PartitionSupervisor, {Realtime.Tenants.Connect.DynamicSupervisor, tenant.external_id}}
+ DynamicSupervisor.terminate_child(supervisor, connect_pid)
+ end
- DynamicSupervisor.terminate_child(supervisor, connect_pid)
- end
+ try do
+ PostgresCdcRls.handle_stop(tenant.external_id, 5_000)
+ catch
+ _, _ -> :ok
+ end
- try do
- PostgresCdcRls.handle_stop(tenant.external_id, 5_000)
- catch
- _, _ -> :ok
- end
+ if mode == :unboxed do
+ repo_run(:unboxed, fn -> Realtime.Api.delete_tenant_by_external_id(tenant.external_id) end)
+ end
- :poolboy.checkin(Containers.Pool, container)
- end)
+ :poolboy.checkin(Containers.Pool, container)
+ end)
- tenant =
if run_migrations? do
case run_migrations(tenant) do
{:ok, count} ->
- # Avoiding to use Tenants.update_migrations_ran/2 because it touches Cachex and it doesn't play well with
- # Ecto Sandbox
:ok = Migrations.create_partitions(conn)
- {:ok, tenant} = Realtime.Api.update_tenant(tenant, %{migrations_ran: count})
+
+ {:ok, tenant} =
+ repo_run(mode, fn ->
+ Realtime.Api.update_tenant_by_external_id(tenant.external_id, %{migrations_ran: count})
+ end)
+
+ if mode == :sandbox, do: Realtime.Tenants.Cache.invalidate_tenant_cache(tenant.external_id)
+
tenant
- _ ->
- raise "Faled to run migrations"
+ error ->
+ raise "Failed to run migrations: #{inspect(error)}"
end
else
tenant
end
-
- tenant
+ after
+ GenServer.stop(conn)
+ end
else
_ -> {:error, "failed to checkout a container"}
end
end
+ defp repo_run(:unboxed, fun), do: Ecto.Adapters.SQL.Sandbox.unboxed_run(Realtime.Repo, fun)
+ defp repo_run(:sandbox, fun), do: fun.()
+
def stop_containers() do
{list, 0} = System.cmd("docker", ["ps", "-a", "--format", "{{.Names}}", "--filter", "name=realtime-test-*"])
names = list |> String.trim() |> String.split("\n")
@@ -267,7 +298,13 @@ defmodule Containers do
@image,
"postgres",
"-c",
- "config_file=/etc/postgresql/postgresql.conf"
+ "config_file=/etc/postgresql/postgresql.conf",
+ "-c",
+ "wal_keep_size=32MB",
+ "-c",
+ "max_wal_size=32MB",
+ "-c",
+ "max_slot_wal_keep_size=32MB"
])
end
end
diff --git a/test/support/data_case.ex b/test/support/data_case.ex
index 3c9cd02b8..afbad155a 100644
--- a/test/support/data_case.ex
+++ b/test/support/data_case.ex
@@ -28,10 +28,14 @@ defmodule Realtime.DataCase do
end
end
- setup tags do
+ def setup_sandbox(tags) do
pid = Sandbox.start_owner!(Realtime.Repo, shared: not tags[:async])
on_exit(fn -> Sandbox.stop_owner(pid) end)
+ :ok
+ end
+ setup tags do
+ setup_sandbox(tags)
{:ok, conn: Phoenix.ConnTest.build_conn()}
end
diff --git a/test/support/generators.ex b/test/support/generators.ex
index 768e3823b..67014816c 100644
--- a/test/support/generators.ex
+++ b/test/support/generators.ex
@@ -283,25 +283,31 @@ defmodule Generators do
jwt
end
- @port 4003
- @serializer Phoenix.Socket.V1.JSONSerializer
-
- def get_connection(
- tenant,
- role \\ "anon",
- claims \\ %{},
- params \\ %{vsn: "1.0.0", log_level: :warning}
- ) do
+ defp test_port do
+ :realtime
+ |> Application.get_env(RealtimeWeb.Endpoint, %{})
+ |> get_in([:http, :port]) || 4002
+ end
+
+ def get_connection(tenant, serializer \\ Phoenix.Socket.V1.JSONSerializer, opts \\ []) do
+ params = Keyword.get(opts, :params, %{log_level: :warning})
+ claims = Keyword.get(opts, :claims, %{})
+ role = Keyword.get(opts, :role, "anon")
+
params = Enum.reduce(params, "", fn {k, v}, acc -> "#{acc}{k}=#{v}" end)
- uri = "#{uri(tenant)}?#{params}"
+ uri = "#{uri(tenant, serializer)}{params}"
with {:ok, token} <- token_valid(tenant, role, claims),
- {:ok, socket} <- WebsocketClient.connect(self(), uri, @serializer, [{"x-api-key", token}]) do
+ {:ok, socket} <- WebsocketClient.connect(self(), uri, serializer, [{"x-api-key", token}]) do
{socket, token}
end
end
- def uri(tenant, port \\ @port), do: "ws://#{tenant.external_id}.localhost:#{port}/socket/websocket"
+ def uri(tenant, serializer, port \\ nil),
+ do: "ws://#{tenant.external_id}.localhost:#{port || test_port()}/socket/websocket?vsn=#{vsn(serializer)}"
+
+ defp vsn(Phoenix.Socket.V1.JSONSerializer), do: "1.0.0"
+ defp vsn(RealtimeWeb.Socket.V2Serializer), do: "2.0.0"
@spec token_valid(Tenant.t(), binary(), map()) :: {:ok, binary()}
def token_valid(tenant, role, claims \\ %{}), do: generate_token(tenant, Map.put(claims, :role, role))
diff --git a/test/support/integrations.ex b/test/support/integrations.ex
new file mode 100644
index 000000000..d6c3338fd
--- /dev/null
+++ b/test/support/integrations.ex
@@ -0,0 +1,124 @@
+defmodule Integrations do
+ import ExUnit.Assertions
+ import Generators
+
+ alias Realtime.Api.Tenant
+ alias Realtime.Database
+ alias Realtime.Tenants.Authorization
+ alias Realtime.Tenants.Connect
+
+ def checkout_tenant_and_connect(_context \\ %{}) do
+ tenant = Containers.checkout_tenant(run_migrations: true)
+ {:ok, db_conn} = Connect.lookup_or_start_connection(tenant.external_id)
+ assert Connect.ready?(tenant.external_id)
+ %{db_conn: db_conn, tenant: tenant}
+ end
+
+ def rls_context(%{tenant: tenant} = context) do
+ {:ok, db_conn} = Database.connect(tenant, "realtime_test", :stop)
+ clean_table(db_conn, "realtime", "messages")
+ topic = Map.get(context, :topic, random_string())
+ policies = Map.get(context, :policies, nil)
+ role = Map.get(context, :role, nil)
+ sub = Map.get(context, :sub, nil)
+
+ if policies, do: create_rls_policies(db_conn, policies, %{topic: topic, role: role, sub: sub})
+
+ authorization_context =
+ Authorization.build_authorization_params(%{
+ tenant_id: tenant.external_id,
+ topic: topic,
+ headers: [{"header-1", "value-1"}],
+ claims: %{sub: sub, role: role},
+ role: role,
+ sub: sub
+ })
+
+ ExUnit.Callbacks.on_exit(fn ->
+ if Process.alive?(db_conn) do
+ try do
+ GenServer.stop(db_conn, :normal, 1_000)
+ catch
+ :exit, _ -> :ok
+ end
+ end
+ end)
+
+ %{topic: topic, role: role, sub: sub, db_conn: db_conn, authorization_context: authorization_context}
+ end
+
+ def change_tenant_configuration(%Tenant{external_id: external_id}, limit, value) do
+ tenant =
+ external_id
+ |> Realtime.Tenants.get_tenant_by_external_id()
+ |> Tenant.changeset(%{limit => value})
+ |> Realtime.Repo.update!()
+
+ Realtime.Tenants.Cache.update_cache(tenant)
+ end
+
+ def checkout_tenant_connect_and_setup_postgres_changes(_context \\ %{}) do
+ %{db_conn: db_conn} = result = checkout_tenant_and_connect()
+ setup_postgres_changes(db_conn)
+ result
+ end
+
+ def setup_postgres_changes(conn) do
+ publication = "supabase_realtime_test"
+
+ Postgrex.transaction(conn, fn db_conn ->
+ queries = [
+ "DROP TABLE IF EXISTS public.test",
+ "DROP PUBLICATION IF EXISTS #{publication}",
+ "create sequence if not exists test_id_seq;",
+ """
+ create table "public"."test" (
+ "id" int4 not null default nextval('test_id_seq'::regclass),
+ "details" text,
+ "binary_data" bytea,
+ primary key ("id"));
+ """,
+ "grant all on table public.test to anon;",
+ "grant all on table public.test to supabase_admin;",
+ "grant all on table public.test to authenticated;",
+ "create publication #{publication} for all tables",
+ """
+ DO $$
+ DECLARE
+ r RECORD;
+ BEGIN
+ FOR r IN
+ SELECT slot_name, active_pid
+ FROM pg_replication_slots
+ WHERE slot_name LIKE 'supabase_realtime%'
+ LOOP
+ IF r.active_pid IS NOT NULL THEN
+ BEGIN
+ SELECT pg_terminate_backend(r.active_pid);
+ PERFORM pg_sleep(0.5);
+ EXCEPTION WHEN OTHERS THEN
+ NULL;
+ END;
+ END IF;
+
+ BEGIN
+ IF EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = r.slot_name) THEN
+ PERFORM pg_drop_replication_slot(r.slot_name);
+ END IF;
+ EXCEPTION WHEN OTHERS THEN
+ NULL;
+ END;
+ END LOOP;
+ END$$;
+ """
+ ]
+
+ Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
+ end)
+ end
+
+ def assert_process_down(pid, timeout \\ 1000) do
+ ref = Process.monitor(pid)
+ assert_receive {:DOWN, ^ref, :process, ^pid, _reason}, timeout
+ end
+end
diff --git a/test/support/metrics_helper.ex b/test/support/metrics_helper.ex
new file mode 100644
index 000000000..ca31ad91b
--- /dev/null
+++ b/test/support/metrics_helper.ex
@@ -0,0 +1,53 @@
+defmodule MetricsHelper do
+ @spec search(String.t(), String.t(), map() | keyword() | nil) ::
+ {:ok, String.t(), map(), String.t()} | {:error, String.t()}
+ def search(prometheus_metrics, metric_name, expected_tags \\ nil) do
+ # Escape the metric_name to handle any special regex characters
+ escaped_name = Regex.escape(metric_name)
+ regex = ~r/^(?#{escaped_name})\{(?[^}]+)\}\s+(?\d+(?:\.\d+)?)$/
+
+ prometheus_metrics
+ |> IO.iodata_to_binary()
+ |> String.split("\n", trim: true)
+ |> Enum.find_value(
+ nil,
+ fn item ->
+ case parse(item, regex, expected_tags) do
+ {:ok, value} -> value
+ {:error, _reason} -> false
+ end
+ end
+ )
+ |> case do
+ nil -> nil
+ number -> String.to_integer(number)
+ end
+ end
+
+ defp parse(metric_string, regex, expected_tags) do
+ case Regex.named_captures(regex, metric_string) do
+ %{"name" => _name, "tags" => tags_string, "value" => value} ->
+ tags = parse_tags(tags_string)
+
+ if expected_tags && !matching_tags(tags, expected_tags) do
+ {:error, "Tags do not match expected tags"}
+ else
+ {:ok, value}
+ end
+
+ nil ->
+ {:error, "Invalid metric format or metric name mismatch"}
+ end
+ end
+
+ defp parse_tags(tags_string) do
+ ~r/(?[a-zA-Z_][a-zA-Z0-9_]*)="(?[^"]*)"/
+ |> Regex.scan(tags_string, capture: :all_names)
+ |> Enum.map(fn [key, value] -> {key, value} end)
+ |> Map.new()
+ end
+
+ defp matching_tags(tags, expected_tags) do
+ Enum.all?(expected_tags, fn {k, v} -> Map.get(tags, to_string(k)) == to_string(v) end)
+ end
+end
diff --git a/test/support/rate_counter_helper.ex b/test/support/rate_counter_helper.ex
new file mode 100644
index 000000000..660ec422f
--- /dev/null
+++ b/test/support/rate_counter_helper.ex
@@ -0,0 +1,41 @@
+defmodule RateCounterHelper do
+ alias Realtime.RateCounter
+
+ @spec stop(term()) :: :ok
+ def stop(tenant_id) do
+ keys =
+ Registry.select(Realtime.Registry.Unique, [
+ {{{:"$1", :_, {:_, :_, :"$2"}}, :"$3", :_}, [{:==, :"$1", RateCounter}, {:==, :"$2", tenant_id}], [:"$_"]}
+ ])
+
+ Enum.each(keys, fn {{_, _, key}, {pid, _}} ->
+ if Process.alive?(pid), do: GenServer.stop(pid)
+ Realtime.GenCounter.delete(key)
+ Cachex.del!(RateCounter, key)
+ end)
+
+ :ok
+ end
+
+ @spec tick!(RateCounter.Args.t()) :: RateCounter.t()
+ def tick!(args) do
+ [{pid, _}] = Registry.lookup(Realtime.Registry.Unique, {RateCounter, :rate_counter, args.id})
+ send(pid, :tick)
+ {:ok, :sys.get_state(pid)}
+ end
+
+ def tick_tenant_rate_counters!(tenant_id) do
+ keys =
+ Registry.select(Realtime.Registry.Unique, [
+ {{{:"$1", :_, {:_, :_, :"$2"}}, :"$3", :_}, [{:==, :"$1", RateCounter}, {:==, :"$2", tenant_id}], [:"$_"]}
+ ])
+
+ Enum.each(keys, fn {{_, _, _key}, {pid, _}} ->
+ send(pid, :tick)
+ # do a get_state to wait for the tick to be processed
+ :sys.get_state(pid)
+ end)
+
+ :ok
+ end
+end
diff --git a/test/support/tenant_connection.ex b/test/support/tenant_connection.ex
index ce5956b49..77328bdfc 100644
--- a/test/support/tenant_connection.ex
+++ b/test/support/tenant_connection.ex
@@ -4,17 +4,17 @@ defmodule TenantConnection do
"""
alias Realtime.Api.Message
alias Realtime.Database
- alias Realtime.Repo
+ alias Realtime.Tenants.Repo
alias Realtime.Tenants.Connect
alias RealtimeWeb.Endpoint
def create_message(attrs, conn, opts \\ [mode: :savepoint]) do
- channel = Message.changeset(%Message{}, attrs)
+ message = Message.changeset(%Message{}, attrs)
{:ok, result} =
Database.transaction(conn, fn transaction_conn ->
- with {:ok, %Message{} = channel} <- Repo.insert(transaction_conn, channel, Message, opts) do
- channel
+ with {:ok, %Message{} = message} <- Repo.insert(transaction_conn, message, Message, opts) do
+ message
end
end)
diff --git a/test/support/test_endpoint.ex b/test/support/test_endpoint.ex
deleted file mode 100644
index 67c477153..000000000
--- a/test/support/test_endpoint.ex
+++ /dev/null
@@ -1,26 +0,0 @@
-defmodule TestEndpoint do
- use Phoenix.Endpoint, otp_app: :phoenix
-
- @session_config store: :cookie,
- key: "_hello_key",
- signing_salt: "change_me"
-
- socket("/socket", RealtimeWeb.UserSocket,
- websocket: [
- connect_info: [:peer_data, :uri, :x_headers],
- fullsweep_after: 20,
- max_frame_size: 8_000_000
- ]
- )
-
- plug(Plug.Session, @session_config)
- plug(:fetch_session)
- plug(Plug.CSRFProtection)
- plug(:put_session)
-
- defp put_session(conn, _) do
- conn
- |> put_session(:from_session, "123")
- |> send_resp(200, Plug.CSRFProtection.get_csrf_token())
- end
-end
diff --git a/test/test_helper.exs b/test/test_helper.exs
index 435f00ef8..14fb626bc 100644
--- a/test/test_helper.exs
+++ b/test/test_helper.exs
@@ -1,8 +1,8 @@
start_time = :os.system_time(:millisecond)
alias Realtime.Api
-alias Realtime.Database
-ExUnit.start(exclude: [:failing], max_cases: 3, capture_log: true)
+max_cases = String.to_integer(System.get_env("MAX_CASES", "4"))
+ExUnit.start(exclude: [:failing], max_cases: max_cases, capture_log: true)
max_cases = ExUnit.configuration()[:max_cases]
@@ -10,53 +10,27 @@ Containers.pull()
if System.get_env("REUSE_CONTAINERS") != "true" do
Containers.stop_containers()
- Containers.stop_container("dev_tenant")
end
{:ok, _pid} = Containers.start_link(max_cases)
-for tenant <- Api.list_tenants(), do: Api.delete_tenant(tenant)
-
-tenant_name = "dev_tenant"
-tenant = Containers.initialize(tenant_name)
-publication = "supabase_realtime_test"
-
-# Start dev_realtime container to be used in integration tests
-{:ok, conn} = Database.connect(tenant, "realtime_seed", :stop)
-
-Database.transaction(conn, fn db_conn ->
- queries = [
- "DROP TABLE IF EXISTS public.test",
- "DROP PUBLICATION IF EXISTS #{publication}",
- "create sequence if not exists test_id_seq;",
- """
- create table "public"."test" (
- "id" int4 not null default nextval('test_id_seq'::regclass),
- "details" text,
- primary key ("id"));
- """,
- "grant all on table public.test to anon;",
- "grant all on table public.test to postgres;",
- "grant all on table public.test to authenticated;",
- "create publication #{publication} for all tables"
- ]
-
- Enum.each(queries, &Postgrex.query!(db_conn, &1, []))
-end)
+for tenant <- Api.list_tenants(), do: Api.delete_tenant_by_external_id(tenant.external_id)
Ecto.Adapters.SQL.Sandbox.mode(Realtime.Repo, :manual)
-end_time = :os.system_time(:millisecond)
-IO.puts("[test_helper.exs] Time to start tests: #{end_time - start_time} ms")
-
Mimic.copy(:syn)
+Mimic.copy(Extensions.PostgresCdcRls)
+Mimic.copy(Extensions.PostgresCdcRls.Replications)
+Mimic.copy(Extensions.PostgresCdcRls.Subscriptions)
+Mimic.copy(Realtime.Database)
Mimic.copy(Realtime.GenCounter)
+Mimic.copy(Realtime.GenRpc)
Mimic.copy(Realtime.Nodes)
+Mimic.copy(Realtime.Repo.Replica)
Mimic.copy(Realtime.RateCounter)
Mimic.copy(Realtime.Tenants.Authorization)
Mimic.copy(Realtime.Tenants.Cache)
Mimic.copy(Realtime.Tenants.Connect)
-Mimic.copy(Realtime.Database)
Mimic.copy(Realtime.Tenants.Migrations)
Mimic.copy(Realtime.Tenants.Rebalancer)
Mimic.copy(Realtime.Tenants.ReplicationConnection)
@@ -64,3 +38,13 @@ Mimic.copy(RealtimeWeb.ChannelsAuthorization)
Mimic.copy(RealtimeWeb.Endpoint)
Mimic.copy(RealtimeWeb.JwtVerification)
Mimic.copy(RealtimeWeb.TenantBroadcaster)
+
+partition = System.get_env("MIX_TEST_PARTITION")
+node_name = if partition, do: :"main#{partition}@127.0.0.1", else: :"main@127.0.0.1"
+:net_kernel.start([node_name])
+region = Application.get_env(:realtime, :region)
+[{pid, _}] = :syn.members(RegionNodes, region)
+:syn.update_member(RegionNodes, region, pid, fn _ -> [node: node()] end)
+
+end_time = :os.system_time(:millisecond)
+IO.puts("[test_helper.exs] Time to start tests: #{end_time - start_time} ms")